max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
findmax.py | banuti/pyGA | 0 | 12760051 | #!/usr/bin/python
#GA main program
from pyGA import *
# set world constants
population = 100
generations = 1000
# create world with given number of generations
# and number of entities per generation
world1=World(population,generations)
# run optimization
world1.runworld()
# show final results
world1.showreport()
| 2.828125 | 3 |
OrderSystem/routing/Budgets.py | mattjt/OrderSystem | 1 | 12760052 | from decimal import Decimal
from flask import render_template, url_for, abort
from flask_classy import FlaskView, route
from flask_login import login_required
from sqlalchemy import and_
from werkzeug.utils import redirect
from OrderSystem import db, sentry
from OrderSystem import forms
from OrderSystem.routing.CRUDBase import CRUDBase
from OrderSystem.sql.ORM import Budget, Subteam, Order
from OrderSystem.utilities.Helpers import flash_errors
from OrderSystem.utilities.Permissions import update_order_status_access_required
from OrderSystem.utilities.ServerLogger import log_event
class Budgets(FlaskView, CRUDBase):
"""
The Budgets system provides team members with a way to view how much money their subteam still has available, as
well as drilling down into individual subteams and specific orders
"""
route_base = ""
BUDGET_FULL_THRESH = 0.75 # 75%
BUDGET_MEDIUM_THRESH = 0.50 # 50%
BUDGET_LOW_THRESH = 0.25 # 25%
def create(self):
"""
No implementation
"""
pass
@route('/<int:fiscal_year>')
@login_required
def index(self, fiscal_year):
"""
Shows the user an overview of the budgets for subteams this year
@return: List of subteams color-coded with their amount of money remaining
"""
subteams = db.session.query(Subteam).all()
ids = []
names = []
css_classes = []
cash_left = []
started_with = []
for subteam in subteams:
try:
budget = db.session.query(Budget).filter(
and_(Budget.fiscal_year == fiscal_year, Budget.subteam_id == subteam.id)).first()
curr_orders = db.session.query(Order).filter(
and_(Order.fiscal_year == fiscal_year, Order.part_for_subteam == subteam.id,
Order.pending_approval == False))
dollars_left = Decimal(budget.dollar_amount)
for order in curr_orders:
dollars_left -= Decimal(order.total)
# Decide what class to use
if (dollars_left / budget.dollar_amount) > self.BUDGET_FULL_THRESH:
css_class = "budget-full"
elif self.BUDGET_MEDIUM_THRESH < (dollars_left / budget.dollar_amount) < self.BUDGET_FULL_THRESH:
css_class = "budget-low"
elif self.BUDGET_LOW_THRESH < (dollars_left / budget.dollar_amount) < self.BUDGET_MEDIUM_THRESH:
css_class = "budget-verylow"
elif 0 < (dollars_left / budget.dollar_amount) < self.BUDGET_LOW_THRESH:
css_class = "budget-critical"
else:
css_class = "budget-empty"
ids.append(subteam.id)
names.append(subteam.name)
css_classes.append(css_class)
cash_left.append('{0:.2f}'.format(dollars_left))
started_with.append('{0:.2f}'.format(budget.dollar_amount))
except:
ids.append(subteam.id)
names.append(subteam.name)
css_classes.append("")
cash_left.append(0)
started_with.append(0)
return render_template('settings/budgets/index.html', subteams=names, cash_left=cash_left,
started_with=started_with, css_classes=css_classes, fiscal_year=fiscal_year,
ids=ids, page="budgets",
thresholds=[self.BUDGET_FULL_THRESH, self.BUDGET_MEDIUM_THRESH, self.BUDGET_LOW_THRESH])
@route('/<int:fiscal_year>/<int:subteam_id>/set', methods=['GET', 'POST'])
@update_order_status_access_required
def update(self, fiscal_year, subteam_id):
"""
Changes the amount of money that a subteam is marked as having available
@param subteam_id: The database-given ID of the subteam to update the budget of
@param fiscal_year: The current FRC season
@return: Redirect to Budgets index
"""
try:
form = forms.SetBudgetForm()
existing_budget = db.session.query(Budget).filter(
and_(Budget.subteam_id == subteam_id, Budget.fiscal_year == fiscal_year)).first()
if form.validate_on_submit():
if existing_budget is None:
# Subteam didn't have a budget previously set
db.session.add(Budget(subteam_id, form.amount.data, fiscal_year))
else:
# Subteam had an existing budget; update the previous one instead of creating a new DB row
existing_budget.dollar_amount = form.amount.data
db.session.commit()
return redirect(url_for('Budgets:index', fiscal_year=fiscal_year))
else:
flash_errors(form)
return render_template('settings/budgets/set.html', form=form, page="budgets")
except Exception as e:
log_event("ERROR", e)
sentry.captureException()
abort(500)
def delete(self):
"""
No implementation
"""
pass
@route('/<int:fiscal_year>/<int:subteam_id>')
@login_required
def view_orders_by_subteam(self, fiscal_year, subteam_id):
"""
Shows a list of orders for the given subteam
@param subteam_id: The database-given ID of the subteam to update the budget of
@param fiscal_year: The current FRC season
@return: List of all orders for the given subteam, along with the member who ordered the part, and other info
"""
orders_by_subteam = db.session.query(Order).filter(
and_(Order.fiscal_year == fiscal_year, Order.part_for_subteam == subteam_id,
Order.pending_approval == False))
subteam = db.session.query(Subteam).filter(Subteam.id == subteam_id).first()
subtotal = 0
credit = 0
shipping = 0
total = 0
for order in orders_by_subteam:
subtotal += order.part_total_price
credit += order.credit
shipping += order.part_shipping_cost
total += order.total
return render_template('settings/budgets/orders-by-subteam.html', orders_by_subteam=orders_by_subteam,
total=total, credit=credit, shipping=shipping, subtotal=subtotal, subteam=subteam,
fiscal_year=fiscal_year, page="budgets")
| 2.25 | 2 |
test_edgeless.py | siweiwang24/graph-coloring | 1 | 12760053 | """
Test colorings for edgeless graphs.
Copyright 2020. <NAME>.
"""
from pytest import mark
from common import create_edgeless, parameters, len_iter, check_surjective
@mark.parametrize('vertices,colors', parameters(7, 8))
def test_edgeless(vertices: int, colors: int):
"""Test edgeless graph colorings."""
graph = create_edgeless(vertices)
colorings = graph.colorings(colors)
num_colorings = len_iter(colorings)
assert num_colorings == colors ** vertices
assert check_surjective(graph, colors, num_colorings)
| 2.75 | 3 |
Tutorials/10 Days of Statistics/Day 1 - Standard Deviation/solution.py | abhinavgunwant/hackerrank-solutions | 1 | 12760054 | <filename>Tutorials/10 Days of Statistics/Day 1 - Standard Deviation/solution.py
import math
# Get n
n = int(input())
# Get X
X = list(map(int, input().split()))
mean = sum(X) / n
#---- Calculate squared difference from mean ----
sDiff = []
for i in X:
diff = i - mean # difference
diff *= diff # squared
sDiff.append(diff)
# Calculate Standard Deviation and round off to
# nearest 1 decimal place
stdDeviation = round( math.sqrt(sum(sDiff) / n) , 1)
print(stdDeviation) | 3.859375 | 4 |
eval_ricord1a_timm-regnetx_002_GridDistortion.py | BrunoKrinski/segtool | 0 | 12760055 | import os
ls=["python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_0_GridDistortion.yml",
"python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_1_GridDistortion.yml",
"python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_2_GridDistortion.yml",
"python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_3_GridDistortion.yml",
"python main.py --configs configs/eval_ricord1a_unetplusplus_timm-regnetx_002_4_GridDistortion.yml",
]
for l in ls:
os.system(l) | 1.40625 | 1 |
apps/falcon/arch/temp-evaluate-viz-code.py | Roy-Tuhin/maskrcnn_sophisticate- | 0 | 12760056 |
# log.debug("len(pred_boxes), pred_boxes.shape, type(pred_boxes): {},{},{}".format(len(pred_boxes), pred_boxes.shape, type(pred_boxes)))
# log.debug("len(pred_masks), pred_masks.shape, type(pred_masks): {},{},{}".format(len(pred_masks), pred_masks.shape, type(pred_masks)))
# log.debug("len(pred_match_class_ids), type(pred_match_class_ids): {},{}".format(len(pred_match_class_ids), type(pred_match_class_ids)))
# log.info("boxes.shape, masks.shape, pred_match_class_ids.shape: {},{},{}".format(pred_boxes.shape, pred_masks.shape, pred_match_class_ids.shape))
# ## TODO: if model is trained with higher classes but dataset contains less classes; this fails because predictions bbox and masks are higher and not equal to class_ids
# ## TODO: visualize pred_match_class_ids alone, which is different from pred_class_ids; or highlight in viz which are those matched IDs
# ## TODO: batchify
# if save_viz_and_json:
# imgviz, jsonres = viz.get_display_instances(im, pred_boxes, pred_masks, pred_class_ids, class_names, pred_scores,
# colors=cc, show_bbox=False, get_mask=get_mask)
# # imgviz, jsonres = viz.get_display_instances(im, pred_boxes, pred_masks, np.array(pred_match_class_ids), class_names, pred_scores,
# # colors=cc, show_bbox=False, get_mask=get_mask)
# else:
# jsonres = viz.get_detections(im, pred_boxes, pred_masks, pred_class_ids, class_names, pred_scores,
# colors=cc, get_mask=get_mask)
# # jsonres = viz.get_detections(im, pred_boxes, pred_masks, np.array(pred_match_class_ids), class_names, pred_scores,
# # colors=cc, get_mask=get_mask)
# ## Convert Json response to VIA Json response
# ##---------------------------------------------
# # size_image = 0
# size_image = os.path.getsize(filepath_image_in)
# jsonres["filename"] = image_filename
# jsonres["size"] = size_image
# ## Create Visualisations & Save output
# ## TODO: resize the annotation and match with the original image size and not the min or max image dimenion form cfg
# ##---------------------------------------------
# time_taken_save_viz_and_json = -1
# if save_viz_and_json:
# t6 = time.time()
# # ## TODO: resize image and masks to the original image size, unable to fix the vizulaisation and mask in the original dimensions
# # ## expand the masks to original image size
# # gt_masks = utils.expand_mask(gt_boxes, gt_masks, original_image_shape)
# # gt_boxes = utils.extract_bboxes(gt_masks)
# # pred_masks = utils.expand_mask(pred_boxes, pred_masks, original_image_shape)
# # ## recompute bbox by extracting the bbox from the resized mask
# # pred_boxes = utils.extract_bboxes(pred_masks)
# # ## resize the image to the original size
# # im = utils.resize(im, original_image_shape)
# ## Annotation Visualisation & Save image
# ##---------------------------------------------
# fext = ".png"
# file_name = image_filename+fext
# log.info("saved to: file_name: {}".format(file_name))
# ## Color Mask Effect & Save image
# ##---------------------------------------------
# viz.imsave(os.path.join(filepath, 'mask', file_name), viz.color_mask(im, pred_masks))
# ## Annotation Visualisation & Save image
# ##---------------------------------------------
# viz.imsave(os.path.join(filepath, 'viz', file_name), imgviz)
# t7 = time.time()
# time_taken_save_viz_and_json = (t6 - t7)
# log.debug('Total time taken in save_viz_and_json: %f seconds' %(time_taken_save_viz_and_json))
# t8 = time.time()
# tt_turnaround = (t8 - t0)
# log.debug('Total time taken in tt_turnaround: %f seconds' %(tt_turnaround))
# jsonres['file_attributes'] = {
# 'image_read': time_taken_imread
# ,'detect': time_taken_in_detect
# ,'res_preparation': time_taken_res_preparation
# ,'time_taken_save_viz_and_json': time_taken_save_viz_and_json
# ,'tt_turnaround': tt_turnaround
# }
# ## TODO: if want to store in mongoDB, '.' (dot) should not be present in the key in the json data
# ## but, to visualize the results in VIA tool, this (dot) and size is expected
# # via_jsonres[image_filename.replace('.','-')+str(size_image)] = json.loads(common.numpy_to_json(jsonres))
# via_jsonres[image_filename+str(size_image)] = json.loads(common.numpy_to_json(jsonres))
| 2.09375 | 2 |
example/echo_srv.py | daroot/protoc-gen-twirp_python_srv | 6 | 12760057 | <reponame>daroot/protoc-gen-twirp_python_srv<gh_stars>1-10
import bjoern
import echo_pb2 as pb
from echo_twirp_srv import EchoImpl, EchoServer
class Echoer(EchoImpl):
def Repeat(self, request):
return pb.EchoResponse(output=request.input)
def RepeatMultiple(self, request):
output = request.input
if request.count > 0:
output = output * request.count
return pb.EchoResponse(output=output)
if __name__ == "__main__":
app = EchoServer(Echoer())
bjoern.run(app, "0.0.0.0", 8080)
| 2.421875 | 2 |
youtube_searcher/parse.py | OpenJarbas/youtube_searcher | 0 | 12760058 | <reponame>OpenJarbas/youtube_searcher
import bs4
import re
import json
from youtube_searcher.session import session
def _extract_json_blob(soup):
# Make sure we always get the correct blob and santize it
blob = soup.find('script', text=re.compile("ytInitialData"))
json_data = str(blob)[
str(blob).find('{\"responseContext\"'):str(blob).find(
'module={}')]
json_data = re.split(r"\};", json_data)[0]
results = json.loads(json_data + "}")
return results
def _parse_soup(soup):
results = _extract_json_blob(soup)
def parse_channel(data):
channel_data = {}
if "channelThumbnailSupportedRenderers" in data:
channel_data = data["channelThumbnailSupportedRenderers"][
'channelThumbnailWithLinkRenderer']
brws = channel_data['navigationEndpoint']['browseEndpoint']
url = "https://www.youtube.com" + brws.get('canonicalBaseUrl',
brws['browseId'])
channel_data = {
"thumbnail": channel_data['thumbnail']['thumbnails'][-1][
"url"],
"url": url,
"userId": brws['browseId']
}
elif 'ownerText' in data:
channel_data = data['ownerText']["runs"][0]
brws = channel_data['navigationEndpoint']['browseEndpoint']
url = "https://www.youtube.com" + brws.get('canonicalBaseUrl',
brws['browseId'])
channel_data = {
"name": channel_data["text"],
"userId": brws['browseId'],
"url": url
}
elif 'longBylineText' in data:
channel_data = data['longBylineText']["runs"][0]
brws = channel_data['navigationEndpoint']['browseEndpoint']
url = "https://www.youtube.com" + brws.get('canonicalBaseUrl',
brws['browseId'])
channel_data = {
"name": channel_data["text"],
"userId": brws['browseId'],
"url": url
}
elif 'shortBylineText' in data:
channel_data = data['shortBylineText']["runs"][0]
brws = channel_data['navigationEndpoint']['browseEndpoint']
url = "https://www.youtube.com" + brws.get('canonicalBaseUrl',
brws['browseId'])
channel_data = {
"name": channel_data["text"],
"userId": brws['browseId'],
"url": url
}
return channel_data
def parse_title(data):
if "title" in data:
data = data['title']
if 'simpleText' in data:
return data['simpleText']
if "runs" in data:
return " ".join([r["text"] for r in data["runs"]])
if 'accessibility' in data:
return data['accessibility']['accessibilityData']['label']
return ""
def parse_description(data):
if 'descriptionSnippet' in data:
data = data['descriptionSnippet']
if 'simpleText' in data:
return data['simpleText']
if "runs" in data:
return " ".join([r["text"] for r in data["runs"]])
if 'accessibility' in data:
return data['accessibility']['accessibilityData']['label']
return parse_title(data)
def parse_thumbnail(data):
if "thumbnail" in data:
return data['thumbnail']['thumbnails'][-1]["url"]
return ""
def parse_live(data):
if 'thumbnailOverlays' in data:
for overlay in data['thumbnailOverlays']:
if 'thumbnailOverlayTimeStatusRenderer' not in overlay:
continue
if overlay['thumbnailOverlayTimeStatusRenderer'].get(
"style", "") == "LIVE":
return True
return False
def parse_views(data):
if 'shortViewCountText' in data:
data = data['shortViewCountText']
if "runs" in data:
return " ".join([r["text"] for r in data["runs"]])
elif 'simpleText' in data:
return data['simpleText']
return ""
def parse_video(data):
title = parse_title(data)
if not title:
# NOTE: non recorded past live streams/private videos usually have
# only videoId and sometimes playlistId
return
video_data = {
"videoId": data["videoId"],
# last one is the larger size
"thumbnail": parse_thumbnail(data),
'title': title,
"description": parse_description(data),
"url": "https://www.youtube.com/watch?v=" + data["videoId"],
"channel": parse_channel(data),
"is_live": parse_live(data),
"views": parse_views(data),
}
if "playlistId" in video_data:
video_data["playlist"] = parse_playlist(data)
return video_data
def parse_playlist(data):
title = parse_title(data)
if not title or "playlistId" not in data:
return {}
url = f'https://www.youtube.com/playlist?list={data["playlistId"]}'
return {
"playlistId": data["playlistId"],
# last one is the larger size
"thumbnail": parse_thumbnail(data),
'title': parse_title(data),
"description": parse_description(data),
"url": url,
"channel": parse_channel(data)
}
def parse_dict(data):
for k, v in data.items():
if isinstance(v, dict):
if "playlistId" in v:
yield parse_playlist(v)
if "videoId" in v:
yield parse_video(v)
else:
for _ in parse_dict(v):
yield _
elif isinstance(v, list):
for _ in parse_list(v):
yield _
def parse_list(data):
for v in data:
if isinstance(v, dict):
for _ in parse_dict(v):
yield _
elif isinstance(v, list):
for _ in parse_list(v):
yield _
return [_ for _ in parse_dict(results) if _]
def extract_videos(url, user_agent='Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/57.0.2987.110 '
'Safari/537.36'):
headers = {
'User-Agent': user_agent
}
html = session.get(url, headers=headers).text
soup = bs4.BeautifulSoup(html, 'html.parser')
for vid in _parse_soup(soup):
if vid.get("videoId"):
yield vid
def extract_playlists(url, user_agent='Mozilla/5.0 (X11; Linux x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/57.0.2987.110 '
'Safari/537.36'):
headers = {
'User-Agent': user_agent
}
html = session.get(url, headers=headers).text
soup = bs4.BeautifulSoup(html, 'html.parser')
for plist in _parse_soup(soup):
if plist.get("playlistId"):
yield plist
def extract_videos_from_playlists(url, user_agent='Mozilla/5.0 (X11; Linux '
'x86_64) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/57.0.2987.110 '
'Safari/537.36'):
for playlist in extract_playlists(url, user_agent):
for vid in extract_videos(playlist["url"], user_agent):
yield vid
| 2.953125 | 3 |
pirauber_project/settings.py | andrenbrandao/pirauber | 0 | 12760059 | """
Django settings for pirauber_project project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import django_heroku
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Reading .env file
env = environ.Env()
environ.Env.read_env()
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env('DJANGO_SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DJANGO_DEBUG", False)
ALLOWED_HOSTS = ['.herokuapp.com', 'localhost', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# 3rd party
'storages',
'crispy_forms',
'allauth',
'allauth.account',
'phonenumber_field',
'intl_tel_input',
'django_tables2',
# Local
'users.apps.UsersConfig',
'pages.apps.PagesConfig',
'rides.apps.RidesConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'pirauber_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'pirauber_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": env.db("DATABASE_URL", default="postgres:///pirauber")
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'pt-BR'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'locale'),
]
# Activate Django-Heroku.
django_heroku.settings(locals())
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static'), ]
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Staging and Production settings
if not DEBUG:
# Security Settings
SECURE_BROWSER_XSS_FILTER = True
X_FRAME_OPTIONS = 'DENY'
SECURE_SSL_REDIRECT = True
SECURE_HSTS_SECONDS = 3600
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
SECURE_HSTS_PRELOAD = True
SECURE_CONTENT_TYPE_NOSNIFF = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
# AWS Settings
AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
AWS_S3_CUSTOM_DOMAIN = f'{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com'
AWS_S3_OBJECT_PARAMETERS = {'CacheControl': 'max-age=86400', }
AWS_DEFAULT_ACL = None
# S3 static settings
STATIC_LOCATION = 'static'
STATIC_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{STATIC_LOCATION}/'
STATICFILES_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# S3 public media settings
PUBLIC_MEDIA_LOCATION = 'media'
MEDIA_URL = f'https://{AWS_S3_CUSTOM_DOMAIN}/{PUBLIC_MEDIA_LOCATION}/'
DEFAULT_FILE_STORAGE = 'pirauber_project.storage_backends.MediaStorage'
# Custom User
AUTH_USER_MODEL = 'users.CustomUser'
# Crispy Forms
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# django-allauth config
LOGIN_REDIRECT_URL = 'home'
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
SITE_ID = 1
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend',
)
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ACCOUNT_FORMS = {
'login': 'pirauber_project.forms.CustomLoginForm',
'signup': 'pirauber_project.forms.CustomSignupForm',
}
# phonenumber_field config
PHONENUMBER_DEFAULT_REGION = 'BR'
| 1.960938 | 2 |
geo/__init__.py | hariharshankar/pygeo | 1 | 12760060 | """
Registering all the views in flask.
Includes both html and json end points.
"""
import flask
from geo.db import connection
import geo.views.form as form
import geo.views.resources as resources
import geo.views.new_resources as new_resources
import geo.views.moderation as moderation
import geo.views.moderation_submit as moderationsubmit
import geo.views.map
import geo.views.form_submit as formsubmit
import geo.views.type_summary as type_summary
import geo.views.country_summary as country_summary
import geo.views.user as user
import geo.views.index as index
# json services
import geo.views.json.location as location
import geo.views.json.menu as menu
import geo.views.json.summarydata as summarydata
import geo.views.json.linechart as linechart
import geo.views.json.get_resources as get_resources
import geo.views.json.add_ai as add_ai
import geo.views.show_ai as show_ai
import geo.views.allunits as allunits
# static html
import geo.views.static.partners as partners
app = flask.Flask(__name__)
app.secret_key = '\<KEY>>(\xb5\x92a\x87\xbf\xca3\xc9F\xec\xe3\x06aQ0\x19\xb1\xbf\xd0\xae\x8b\x8a5\xfbW\xab\x18\x08uV\x94)\xa0\x99\xfb\x0b1\x0f\xa2n\xba\xa3mya\xf8\xdfR\'F@\xd9\xb2\x10S\xf4r~\xae\x94\x1c\x7f\xd1J\x86\x1ar.m"\xdc\x18\x85\x80\xb8\x18\x1cG\x81\x1e]\xb3E\x01i\xf4\xd9_\x18\xfar\xbe`\xaa\xa7+3\x92\xe8Q'
#app.config['SERVER_NAME'] = "http://globalenergyobservatory.org/dev"
conn = connection.Db()
form.db = conn
resources.db = conn
new_resources.db = conn
menu.db = conn
geo.views.map.db = conn
location.db = conn
formsubmit.db = conn
type_summary.db = conn
country_summary.db = conn
summarydata.db = conn
linechart.db = conn
user.db = conn
index.db = conn
moderation.db = conn
moderationsubmit.db = conn
get_resources.db = conn
add_ai.db = conn
show_ai.db = conn
allunits.db = conn
# html
app.register_blueprint(form.mod)
app.register_blueprint(resources.mod)
app.register_blueprint(new_resources.mod)
app.register_blueprint(moderation.mod)
app.register_blueprint(geo.views.map.mod)
app.register_blueprint(formsubmit.mod)
app.register_blueprint(type_summary.mod)
app.register_blueprint(country_summary.mod)
app.register_blueprint(user.mod)
app.register_blueprint(index.mod)
app.register_blueprint(moderationsubmit.mod)
app.register_blueprint(add_ai.mod)
app.register_blueprint(show_ai.mod)
app.register_blueprint(allunits.mod)
# json services
app.register_blueprint(summarydata.mod)
app.register_blueprint(location.mod)
app.register_blueprint(menu.mod)
app.register_blueprint(linechart.mod)
app.register_blueprint(get_resources.mod)
# static html
app.register_blueprint(partners.mod) | 1.890625 | 2 |
FEM/Utils/__init__.py | ZibraMax/FEM | 10 | 12760061 | """Utilities
"""
from .polygonal import *
| 1.125 | 1 |
slitronomy/Optimization/proximals.py | aymgal/SLITronomy | 7 | 12760062 | __author__ = 'aymgal'
# implementations of proximal operators adapted to sparsity
import numpy as np
from slitronomy.Util import util
def prox_sparsity_wavelets(coeffs_input, step, level_const=None, level_pixels=None, l_norm=1):
"""
Apply soft or hard threshold on all wavelets scales excepts the last one (the coarse scale)
"""
if l_norm not in [0, 1]:
raise ValueError("Sparsity proximal operator only defined with l0- and l1-norms")
if step == 0:
return coeffs_input
coeffs = np.copy(coeffs_input)
n_scales = coeffs.shape[0]
# apply threshold operation to all starlet scales except the coarsest
for s in range(n_scales-1):
thresh = step
if level_const is not None:
thresh *= level_const[s]
if level_pixels is not None:
thresh *= level_pixels[s, :, :]
if l_norm == 0:
coeffs[s, :, :] = util.hard_threshold(coeffs[s, :, :], thresh)
else:
coeffs[s, :, :] = util.soft_threshold(coeffs[s, :, :], thresh)
return coeffs
def prox_positivity(image_input):
image = np.copy(image_input)
image[image < 0] = 0.
return image
def full_prox_sparsity_positivity(image, transform, inverse_transform,
weights, noise_levels, thresh, thresh_increm,
n_scales, l_norm, formulation, force_positivity):
"""
returns the proximal operator of the regularisation term
g = lambda * |Phi^T HG|_0
or
g = lambda * |Phi^T HG|_1
"""
level_const = thresh * np.ones(n_scales)
level_const[0] += thresh_increm # possibly a stronger threshold for first decomposition levels (small scales features)
level_pixels = weights * noise_levels
if formulation == 'analysis':
coeffs = transform(image)
elif formulation == 'synthesis':
coeffs = image
# apply proximal operator
step = 1 # because threshold is already expressed in data units
coeffs_proxed = prox_sparsity_wavelets(coeffs, step=step,
level_const=level_const,
level_pixels=level_pixels,
l_norm=l_norm)
if formulation == 'analysis':
image_proxed = inverse_transform(coeffs_proxed)
elif formulation == 'synthesis':
image_proxed = coeffs_proxed
if force_positivity and formulation == 'analysis':
image_proxed = prox_positivity(image_proxed)
# TODO: apply positivity also in 'synthesis' formulation (i.e. to coeffs in starlet space?)
return image_proxed
| 2.796875 | 3 |
cfg.py | ghost-60/TF_CenterNet | 0 | 12760063 | <reponame>ghost-60/TF_CenterNet
# common
classes_file = './data/classes/voc.names'
num_classes = 1
input_image_h = 448
input_image_w = 448
down_ratio = 4
max_objs = 150
ot_nodes = ['detector/hm/Sigmoid', "detector/wh/Relu", "detector/reg/Relu"]
moving_ave_decay = 0.9995
# train
train_data_file = './data/dataset/voc_train.txt'
batch_size = 4
epochs = 80
# learning rate
lr_type="exponential"# "exponential","piecewise","CosineAnnealing"
lr = 1e-3 # exponential
lr_decay_steps = 5000 # exponential
lr_decay_rate = 0.95 # exponential
lr_boundaries = [40000,60000] # piecewise
lr_piecewise = [0.0001, 0.00001, 0.000001] # piecewise
warm_up_epochs = 2 # CosineAnnealing
init_lr= 1e-4 # CosineAnnealing
end_lr = 1e-6 # CosineAnnealing
pre_train = True
depth = 1
# test
test_data_file = './data/dataset/voc_test.txt'
score_threshold = 0.3
use_nms = True
nms_thresh = 0.4
weight_file = './checkpoint'
write_image = True
write_image_path = './eval/JPEGImages/'
show_label = True
| 1.71875 | 2 |
converter/video/models.py | HosseinMirjalali/converter-task | 0 | 12760064 | <filename>converter/video/models.py<gh_stars>0
import uuid as uuid_lib
from django.contrib.auth import get_user_model
from django.core.validators import FileExtensionValidator
from django.db import models
from django.utils.translation import gettext_lazy as _
User = get_user_model()
class VideoRaw(models.Model):
"""
This model represents the structure for the video the user uploads for conversion
"""
# possible formats that we can convert, with extra information
REQUESTED_FORMAT_CHOICES = [
("mp4", "mp4, using libx264 codec"),
("avi", "avi, using mpeg4 codec"),
("mkv", "mkv, using libvpx codec"),
("3gp", "3gp, using h263 codec"),
]
user = models.ForeignKey(User, related_name="raw_videos", on_delete=models.CASCADE)
file = models.FileField(
upload_to="raw_videos/",
validators=[
FileExtensionValidator(allowed_extensions=["mp4", "avi", "mkv", "3gp"])
],
)
uuid = models.UUIDField(db_index=True, default=uuid_lib.uuid4, editable=False)
req_format = models.CharField(
_("The format this video should be converted to."),
max_length=3,
choices=REQUESTED_FORMAT_CHOICES,
default="mp4",
)
def __str__(self):
return "%s" % self.uuid
class VideoConverted(models.Model):
user = models.ForeignKey(
User, related_name="converted_videos", on_delete=models.CASCADE
)
file = models.FileField(
upload_to="converted_videos/",
validators=[
FileExtensionValidator(allowed_extensions=["mp4", "avi", "mkv", "3gp"])
],
blank=True,
)
uuid = models.UUIDField(
default=uuid_lib.uuid4,
editable=False,
db_index=True,
)
raw = models.ForeignKey(
VideoRaw, related_name="raw", on_delete=models.CASCADE, null=True
)
created_at = models.DateTimeField(auto_now_add=True)
expiration_time = models.DateTimeField(default=None, blank=True, null=True)
remaining_expiration_time = models.DateTimeField(blank=True, null=True)
def __str__(self):
return "%s" % self.uuid
| 2.390625 | 2 |
jsonmask_test.py | simonw/jsonmask | 5 | 12760065 | import unittest
import jsonmask
fixture = {
"kind": "plus#activity",
"etag": "\"DOKFJGXi7L9ogpHc3dzouWOBEEg/ZiaatWNPRL3cQ-I-WbeQPR_yVa0\"",
"title": "Congratulations! You have successfully fetched an explicit public activity. The attached video is your...",
"published": "2011-09-08T21:17:41.232Z",
"updated": "2011-10-04T17:25:26.000Z",
"id": "z12gtjhq3qn2xxl2o224exwiqruvtda0i",
"url": "https://plus.google.com/102817283354809142195/posts/F97fqZwJESL",
"actor": {
"id": "102817283354809142195",
"displayName": "<NAME>",
"url": "https://plus.google.com/102817283354809142195",
"image": {
"url": "https://lh4.googleusercontent.com/-yth5HLY4Qi4/AAAAAAAAAAI/AAAAAAAAPVs/fAq4PVOVBdc/photo.jpg?sz=50"
}
},
"verb": "post",
"object": {
"objectType": "note",
"content": "Congratulations! You have successfully fetched an explicit public activity. The attached video is your reward. :)",
"url": "https://plus.google.com/102817283354809142195/posts/F97fqZwJESL",
"replies": {
"totalItems": 16,
"selfLink": "https://www.googleapis.com/plus/v1/activities/z12gtjhq3qn2xxl2o224exwiqruvtda0i/comments"
},
"plusoners": {
"totalItems": 44,
"selfLink": "https://www.googleapis.com/plus/v1/activities/z12gtjhq3qn2xxl2o224exwiqruvtda0i/people/plusoners"
},
"resharers": {
"totalItems": 1,
"selfLink": "https://www.googleapis.com/plus/v1/activities/z12gtjhq3qn2xxl2o224exwiqruvtda0i/people/resharers"
},
"attachments": [{
"objectType": "video",
"displayName": "<NAME> - Never Gonna Give You Up",
"content": "Music video by <NAME> performing Never Gonna Give You Up. YouTube view counts pre-VEVO: 2,573,462 (C) 1987 PWL",
"url": "http://www.youtube.com/watch?v=dQw4w9WgXcQ",
"image": {
"url": "https://lh3.googleusercontent.com/proxy/ex1bQ9_TpVClePgZxFmCPVxYeJUHW5dixt53FLmup-q44pd1mwO6rPIPti6tDWbjitBclMm5Ou595xPEMKq2b8Qu3mQ_TzX0kOqksE8o1w=w506-h284-n",
"type": "image/jpeg",
"height": 284,
"width": 506
},
"embed": {
"url": "http://www.youtube.com/v/dQw4w9WgXcQ&hl=en&fs=1&autoplay=1",
"type": "application/x-shockwave-flash"
}
}]
},
"provider": {
"title": "Google+"
},
"access": {
"kind": "plus#acl",
"description": "Public",
"items": [{
"type": "public"
}]
}
}
filter_tests = [{
"m": 'a', "o": None, "e": None
}, {
"m": 'a', "o": {"b": 1}, "e": None
}, {
"m": 'a', "o": [{"b": 1}], "e": None
}, {
"m": None, "o": {"a": 1}, "e": {"a": 1}
}, {
"m": '', "o": {"a": 1}, "e": {"a": 1}
}, {
"m": 'a', "o": {"a": 1, "b": 1}, "e": {"a": 1}
}, {
"m": 'notEmptyStr', "o": {"notEmptyStr": ''}, "e": {"notEmptyStr": ''}
}, {
"m": 'notEmptyNum', "o": {"notEmptyNum": 0}, "e": {"notEmptyNum": 0}
}, {
"m": 'a,b', "o": {"a": 1, "b": 1, "c": 1}, "e": {"a": 1, "b": 1}
}, {
"m": 'obj/s', "o": {"obj": {"s": 1, "t": 2}, "b": 1}, "e": {"obj": {"s": 1}}
}, {
"m": 'arr/s', "o": {"arr": [{"s": 1, "t": 2}, {"s": 2, "t": 3}], "b": 1}, "e": {"arr": [{"s": 1}, {"s": 2}]}
}, {
"m": 'a/s/g,b', "o": {"a": {"s": {"g": 1, "z": 1}}, "t": 2, "b": 1}, "e": {"a": {"s": {"g": 1}}, "b": 1}
}, {
"m": 'a/*/g', "o": {"a": {"s": {"g": 3}, "t": {"g": 4}, "u": {"z": 1}}, "b": 1}, "e": {"a": {"s": {"g": 3}, "t": {"g": 4}}}
}, {
"m": 'a/*', "o": {"a": {"s": {"g": 3}, "t": {"g": 4}, "u": {"z": 1}}, "b": 3}, "e": {"a": {"s": {"g": 3}, "t": {"g": 4}, "u": {"z": 1}}}
}, {
"m": 'a(g)', "o": {"a": [{"g": 1, "d": 2}, {"g": 2, "d": 3}]}, "e": {"a": [{"g": 1}, {"g": 2}]}
}, {
"m": 'a,c', "o": {"a": [], "c": {}}, "e": {"a": [], "c": {}}
}, {
"m": 'b(d/*/z)', "o": {"b": [{"d": {"g": {"z": 22}, "b": 34}}]}, "e": {"b": [{"d": {"g": {"z": 22}}}]
}
}, {
"m": 'url,obj(url,a/url)', "o": {"url": 1, "id": '1', "obj": {"url": 'h', "a": [{"url": 1, "z": 2}], "c": 3}}, "e": {"url": 1, "obj": {"url": 'h', "a": [{"url": 1}]}}
}, {
"m": 'kind', "o": fixture, "e": {"kind": 'plus#activity'}
}, {
"m": 'object(objectType)', "o": fixture, "e": {"object": {"objectType": 'note'}}
}, {
"m": 'url,object(content,attachments/url)', "o": fixture, "e": {
"url": 'https://plus.google.com/102817283354809142195/posts/F97fqZwJESL', "object": {
"content": 'Congratulations! You have successfully fetched an explicit public activity. The attached video is your reward. :)', "attachments": [{"url": 'http://www.youtube.com/watch?v=dQw4w9WgXcQ'}]
}
}
}, {
"m": 'i', "o": [{"i": 1, "o": 2}, {"i": 2, "o": 2}], "e": [{"i": 1}, {"i": 2}]
}]
compiler_tests = {
'a': {"a": {"type": 'object'}}, 'a,b,c': {
"a": {"type": 'object'}, "b": {"type": 'object'}, "c": {"type": 'object'}
}, 'a/*/c': {
"a": {"type": 'object', "properties": {
'*': {"type": 'object', "properties": {
"c": {"type": 'object'}
}}
}}
}, 'a,b(d/*/g,b),c': {
"a": {"type": 'object'}, "b": {"type": 'array', "properties": {
"d": {"type": 'object', "properties": {
'*': {"type": 'object', "properties": {
"g": {"type": 'object'}
}}
}}, "b": {"type": 'object'}
}}, "c": {"type": 'object'}
}
}
# Filter tests
filter_test_compiled_mask = {
"a": {"type": 'object'},
"b": {
"type": 'array',
"properties": {
"d": {
"type": 'object',
"properties": {
'*': {
"type": 'object',
"properties": {
"z": {"type": 'object'}
}
}
}
},
"b": {
"type": 'array',
"properties": {
"g": {"type": 'object'}
}
}
}
},
"c": {"type": 'object'}
}
filter_test_object = {
"a": 11,
"n": 00,
"b": [{
"d": {"g": {"z": 22}, "b": 34, "c": {"a": 32}},
"b": [{"z": 33}],
"k": 99
}],
"c": 44,
"g": 99
}
filter_test_expected = {
"a": 11,
"b": [{
"d": {
"g": {
"z": 22
}
}
}],
"c": 44
}
class TestCase(unittest.TestCase):
def test_filter(self):
actual = jsonmask.apply_mask(filter_test_object, filter_test_compiled_mask)
self.assertEqual(filter_test_expected, actual)
def make_test(test):
e = test['e']
o = test['o']
m = test['m']
def _test(self):
self.assertEqual(e, jsonmask.Mask(m)(o))
_test.__doc__ = 'm = %s original = %s expected = %s\n' % (m, o, e)
return _test
def make_compiler_test(sel, expected_compiled):
def _test(self):
self.assertEqual(expected_compiled, jsonmask.compile_mask(sel))
_test.__doc__ = 'sel = %s expected = %s' % (sel, expected_compiled)
return _test
for i, test in enumerate(filter_tests):
setattr(TestCase, 'test_filter_%s' % i, make_test(test))
for i, (sel, expected) in enumerate(compiler_tests.items()):
setattr(TestCase, 'test_compiler_%s' %
i, make_compiler_test(sel, expected))
if __name__ == '__main__':
unittest.main()
| 2.296875 | 2 |
submissions/code-festival-2017-quala/c.py | m-star18/atcoder | 1 | 12760066 | <filename>submissions/code-festival-2017-quala/c.py
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
from collections import Counter
h, w = map(int, readline().split())
a = []
for i in range(h):
a += list(readline().rstrip().decode())
counter = Counter(a).values()
check = [1, h // 2 + w // 2, (h // 2) * (w // 2)]
if h % 2 == 0:
check[0] = 0
check[1] -= w // 2
if w % 2 == 0:
check[0] = 0
check[1] -= h // 2
for v in counter:
if check[2] < v // 4:
v -= 4 * check[2]
check[2] = 0
else:
check[2] -= v // 4
v %= 4
if check[1] < v // 2:
v -= 2 * check[1]
check[1] = 0
else:
check[1] -= v // 2
v %= 2
check[0] -= v
print('Yes' if all(check[i] == 0 for i in range(3)) else 'No')
| 2.609375 | 3 |
test0/executor.py | HERO5/python-learning | 1 | 12760067 | # python 3
import sys
import os
from java import jclass
from inspect import isfunction
from venv import logger
class CompileMixin:
@staticmethod
def _compile(code, funcName):
ns = {}
try:
exec(code, ns)
except Exception as e:
logger.error("code: `{0}` 编译时出错,exception: {1}".format(code, e))
raise e
func = None
may_be_function = ns[funcName]
if isfunction(may_be_function):
func = may_be_function
if not func:
logger.error("code: `{0}` 没有找到可用的函数".format(code))
raise ValueError("Code Error , function not found")
return func
def call(source, funcName, params):
func = CompileMixin._compile(source, funcName)
print("func: ", func)
return func(params)
def call1(source, funcName, params):
#func必须有返回值,且格式为[Object[], Object[]]
func = CompileMixin._compile(source, funcName)
#print("func: ", func)
ResultOfCall = jclass("com.mrl.communicate.middle.ResultOfCall")
result = ResultOfCall()
resultRaw = func(params)
result.setComplete(resultRaw[0])
result.setIntermission(resultRaw[1])
return result | 2.515625 | 3 |
cardbuilder/input/word_list.py | Mindful/cardbuilder | 4 | 12760068 | from abc import ABC
from copy import copy
from typing import List, Iterable, Optional, Union
from cardbuilder.common import Language
from cardbuilder.input.word import Word, WordForm
class WordList(ABC):
"""The base class for all word lists; all word lists inherit from this class. Behaves like a Python list by
implementing iteration, length, item retrieval by index and slicing."""
def __init__(self, word_input_forms: Iterable[str], language: Language, additional_forms: Optional[List[WordForm]]):
"""
Args:
word_input_forms: Strings representing the raw input forms of each word in the word list.
language: The language of words in the word list.
additional_forms: Any additional forms, such as conjugations, which these words can be retrieved as.
"""
self.words = [Word(input_form, language, additional_forms)
for input_form in word_input_forms]
def __getitem__(self, index: Union[int, slice]) -> Union[Word, 'WordList']:
if isinstance(index, int):
return self.words[index]
elif isinstance(index, slice):
list_copy = copy(self)
list_copy.words = self.words[index]
return list_copy
else:
raise TypeError('WordList indices must be either integers or slices')
def __iter__(self):
return iter(self.words)
def __len__(self):
return len(self.words)
def __repr__(self):
return repr(self.words)
| 3.546875 | 4 |
city_graph/city_io.py | MPI-IS/CityGraph | 1 | 12760069 | import os
import pickle
EXTENSION = ".citygraph"
def _fix_path(path):
# if path is None: set it to current directory
# then check that path is an existing directory
# (raises a FileNotFoundError if not)
if path is None:
path = os.getcwd()
if not os.path.isdir(path):
raise FileNotFoundError("CityGraph.city_io: " + path + " does not exist")
return path
def _get_abs_path(city, path):
# city : either a string (city name) or an instance of City
# path : absolute path to a folder with read/write access
if isinstance(city, str):
city_name = city
else:
city_name = city.name
return os.sep.join([path,
city_name]) + EXTENSION
def is_saved(city, path=None):
"""
Returns True if a city of the same name as already been saved.
:param city: city or city name
:type city: :py:class:`City<city_graph.city.City>` or str
:param str path: path of the folder where cities are saved. default: current directory
:returns: True if a city of the same name has already been saved.
:rtype: bool
"""
# set path to current directory if None.
# raise Exception if path does not exist
path = _fix_path(path)
# path to the file
path = _get_abs_path(city, path)
return os.path.isfile(path)
def save(city, path=None, overwrite=False):
"""
Save the city in a file.
:param obj or str city: city to save (:py:class:`City<city_graph.city.City>` or str)
:param str path: path of the folder where cities are saved. default: currrent directory
:param bool overwrite: if True, will overwrite any saved city of the same name.
default: False
:returns: the path of the file into which the city was saved
:raises: :py:class:`FileNotFoundError`: if path does not exist
:raises: :py:class:`FileExists`: if overwrite is False
and a city of the same name has already been saved.
"""
# set path to current directory if None.
# raise Exception if path does not exist
path = _fix_path(path)
# path to the file
path = _get_abs_path(city, path)
# file already exist, and overwrite is false:
if os.path.exists(path):
if not overwrite:
raise FileExistsError("CityGraph.city_io: can not save in", path, "(aleady exists)")
with open(path, "wb") as f:
pickle.dump(city, f)
return path
def load(city_name, path=None):
"""
:param str city_name: name of the city to load
:param str path: path of the folder where cities are saved. default: current directory
:raises: :py:class:`FileNotFoundError`: if no city of this name has been saved
:returns: An instance of :py:class:`City<city_graph.city.City>`
"""
if not is_saved(city_name, path):
raise FileNotFoundError("loading city: " + path + " does not exist")
path = _get_abs_path(city_name, _fix_path(path))
with open(path, "rb") as f:
city = pickle.load(f)
return city
| 3.4375 | 3 |
nemo/collections/nlp/models/dialogue_state_tracking_generative/dialogue_metrics.py | hamjam/NeMo | 1 | 12760070 | <filename>nemo/collections/nlp/models/dialogue_state_tracking_generative/dialogue_metrics.py<gh_stars>1-10
# Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numpy as np
class IntentSlotMetrics(object):
@staticmethod
def save_predictions(
filename,
generated_labels,
generated_slots,
ground_truth_labels,
ground_truth_slots,
generated_field,
ground_truth_field,
inputs,
):
"""
Save predictions as a jsonl file
Args:
Each arg is a list of strings (all args have the same length)
"""
docs = []
for i in range(len(generated_labels)):
docs.append(
{
"input": inputs[i],
"ground_truth": ground_truth_field[i],
"ground_truth_slots": ground_truth_slots[i],
"ground_truth_labels": ground_truth_labels[i],
"generated": generated_field[i],
"generated_slots": generated_slots[i],
"generated_labels": generated_labels[i],
}
)
with open(filename, 'w', encoding="UTF-8") as f:
for item in docs:
f.write(json.dumps(item) + "\n")
@staticmethod
def split_label_and_slots(fields, with_slots=False):
"""
Split target into label and slots when doing joint label (i.e. intent) classificaiton and slot filling
For instance, split "reserve_restaurant\nslots: time_of_day(7pm) number_of_people(3)" into
label = "reserve_restaurant" and slots = ["time_of_day(7pm)", "number_of_people(3)"]
Args:
fields: list of strings
"""
# self.cfg.dataset.target_template == "with_slots":
labels = []
slots_list = []
for field in fields:
if with_slots:
combo = [i.strip() for i in field.split('slots:', 1)]
label = 'none'
if len(combo) == 2:
label, slots = combo
elif len(combo) == 1:
slots = combo
label = 'none'
if isinstance(slots, str):
# temporary patch for purnendu model output
if 'possible intents:' in slots:
slots = slots.split('possible intents:')[0]
slots = slots.split(', ')
else:
slots = ['None']
else:
label = field
slots = []
slots_list.append(slots)
labels.append(label)
return labels, slots_list
@staticmethod
def get_slot_filling_metrics(generated_slots, ground_truth_slots):
"""
Args:
generated_slots: list of list of strings.
Each string is slot-name and slot-value pair e.g. location(Seattle)
ground_truth_slots: list of list of strings
"""
all_recall = []
all_precision = []
all_joint_goal_accuracy = []
for i in range(len(generated_slots)):
# depulicate and sort
ground_truth = sorted(list(set(ground_truth_slots[i])))
predicted = sorted(list(set(generated_slots[i])))
correct = [item for item in predicted if item in ground_truth]
recall = len(correct) / len(ground_truth) if len(ground_truth) > 0 else 0
precision = len(correct) / len(predicted) if len(predicted) > 0 else 0
joint_goal_accuracy = int(ground_truth == predicted)
all_recall.append(recall)
all_precision.append(precision)
all_joint_goal_accuracy.append(joint_goal_accuracy)
avg_joint_goal_accuracy = np.mean(all_joint_goal_accuracy) * 100
avg_precision = np.mean(all_precision) * 100
avg_recall = np.mean(all_recall) * 100
avg_f1 = 2 * (avg_recall * avg_precision) / (avg_recall + avg_precision + 1e-20)
return avg_precision, avg_recall, avg_f1, avg_joint_goal_accuracy
| 2.125 | 2 |
mtmlcv/deployment.py | mir-group/MLmtCV | 0 | 12760071 | <gh_stars>0
import sys
import numpy as np
from mtmlcv.data_loader import Data_Loader
from mtmlcv.xyz import JointAE
from mtmlcv.plot import Plot
import tensorflow as tf
import yaml
def main():
timestamp = sys.argv[1]
with open(f"{timestamp}.yaml") as fin:
args = yaml.load(fin, Loader=yaml.Loader)
# set up nn
net = JointAE(**args)
sess = tf.keras.backend.get_session()
net.load_weights(timestamp, sys.argv[2])
net.save_graph(sess, export_path=f"reload/{timestamp}")
## plot latent space
if (args["use_pe"]):
def get_pe(z):
ph_pe = net.pe_net(z, training=False)
return sess.run(ph_pe)
else:
get_pe = None
if (args["use_labels"]):
def get_label(z):
ph_label = net.label_net(z, training=False)
return sess.run(ph_label)
else:
get_label = None
filename = "../tps_300_damp0.2.npz"
testname = "../tps_800_damp0.2.npz"
for name, path in {'300':filename,
'800':testname,
#'290': "../tps_290_damp0.2.npz",
'umb':'../dist_round2.npz'}.items():
data = Data_Loader(filename=path,
shuffle=True,
input_label=["xyz"],
target_label=["pe", "label"],
n_sample=4000,
batch_size=4000,
weight_by_pe=True,
weight_by_label=True)
sess.run(data.iterator.initializer)
nextx = data.iterator.get_next()
ph_pred = net(nextx['x'], training=False)
data, pred = sess.run((nextx, ph_pred))
if "pe" in data and name == '300':
ids = np.where(data["pe_prefactor"]!=0)[0]
for k in data:
if len(data[k]) == len(data["pe"]):
data[k] = data[k][ids]
for k in pred:
if len(pred[k]) == len(data["pe"]):
pred[k] = pred[k][ids]
np.savez(f"{timestamp}_{name}_pred.npz", **pred)
intc = data["intc"]
plot = Plot(timestamp+name, data, pred, n_latent=args["n_latent"], intc=intc, save_data=True)
plot.plot(get_pe, get_label)
if __name__ == '__main__':
main()
| 2.0625 | 2 |
tangerine/factories.py | shacker/tangerine | 1 | 12760072 | import factory
import random
from faker import Faker
from titlecase import titlecase
from django.contrib.auth import get_user_model
from django.utils.text import slugify
from django.utils.timezone import get_current_timezone
from .models import Category, Post, RelatedLinkGroup, RelatedLink, Blog, Comment, AuthorPage
def gen_headline():
# faker doesn't provide a way to generate headlines in Title Case, without periods, so make our own.
fake = Faker()
return titlecase(fake.text(max_nb_chars=48).rstrip('.'))
def gen_html_content():
# faker doesn't provide raw html text, so convert the output of fake.paragraphs()
fake = Faker()
grafs = fake.paragraphs()
htmlstr = ''
for g in grafs:
htmlstr += "<p>{}</p>\n\n".format(g)
return htmlstr
def gen_comment_body():
# faker only provides sentences as a list; we want them with line breaks
fake = Faker()
sentences = fake.sentences(3)
return "\n\n".join(sentences)
def gen_tags():
# Rather than create a bazillion random tags, make a pool of 15 possible tags,
# and choose n tags from this pool to be added to calling post.
# Returns 1-5 random tags from this list:
TAGS = [
'Linux', 'Mac OS', 'Windows', 'Python', 'Perl', 'Rust', 'Go',
'JavaScript', 'Java', 'Swift', 'C++', 'PHP', 'CSS', 'SASS', 'SQL', ]
return random.sample(TAGS, random.randint(1, 5))
class CategoryFactory(factory.django.DjangoModelFactory):
class Meta:
model = Category
django_get_or_create = ('slug', )
title = factory.Faker('catch_phrase')
slug = factory.LazyAttribute(lambda o: slugify(o.title)[:20])
class PostFactory(factory.django.DjangoModelFactory):
class Meta:
model = Post
title = factory.LazyAttribute(lambda o: gen_headline())
slug = factory.LazyAttribute(lambda o: slugify(o.title)[:48])
content = factory.LazyAttribute(lambda o: gen_html_content())
author = factory.LazyAttribute(lambda o: get_user_model().objects.all().order_by('?').first())
summary = factory.Faker('text')
pub_date = factory.Faker('date_time_this_decade', tzinfo=get_current_timezone())
@factory.post_generation
# Associate zero or more tags with this post
def add_tags(self, build, extracted, **kwargs):
for tag in gen_tags():
self.tags.add(tag)
class CommentFactory(factory.django.DjangoModelFactory):
# MUST be called with a Post object as parent.
class Meta:
model = Comment
approved = factory.LazyAttribute(lambda o: True)
body = factory.LazyAttribute(lambda o: gen_comment_body())
email = factory.Faker('safe_email')
name = factory.Faker('name')
website = factory.Faker('url')
ip_address = factory.Faker('ipv4')
user_agent = factory.Faker('user_agent')
class RelatedLinkFactory(factory.django.DjangoModelFactory):
class Meta:
model = RelatedLink
# Must pass in a RelatedLinkGroup as group= when instantiating
# i.e. Most useful when instantiating a RelatedLinkGroupFactory
site_title = factory.Faker('company')
site_url = factory.Faker('url')
class RelatedLinkGroupFactory(factory.django.DjangoModelFactory):
class Meta:
model = RelatedLinkGroup
django_get_or_create = ('slug', )
@factory.post_generation
# Create related links
def related_links(self, build, extracted, **kwargs):
RelatedLinkFactory.create_batch(5, group=self)
class BlogFactory(factory.django.DjangoModelFactory):
class Meta:
model = Blog
title = factory.Faker('company')
slug = factory.LazyAttribute(lambda o: slugify(o.title)[:30])
tagline = factory.Faker('bs')
class AuthorPageFactory(factory.django.DjangoModelFactory):
class Meta:
model = AuthorPage
user = factory.LazyAttribute(lambda o: get_user_model().objects.all().order_by('?').first())
about = factory.Faker('text')
| 2.34375 | 2 |
src/python_pachyderm/experimental/mixin/auth.py | sjezewski/pypachy | 2 | 12760073 | <reponame>sjezewski/pypachy
from typing import Dict, List
from grpc import RpcError
from python_pachyderm.errors import AuthServiceNotActivated
from python_pachyderm.service import Service
from python_pachyderm.experimental.service import auth_proto
# bp_to_pb: OidcConfig -> OIDCConfig
# bp_to_pb: GetOidcLoginResponse -> GetOIDCLoginResponse
class AuthMixin:
"""A mixin for auth-related functionality."""
def activate_auth(self, root_token: str = None) -> str:
"""Activates auth on the cluster. Returns the root token, an
irrevocable superuser credential that should be stored securely.
Parameters
----------
root_token : str, optional
If set, the token used as the root user login token. In general,
it is safer to not set and let Pachyderm generate one for you.
Returns
-------
str
A token used as the root user login token.
"""
return self._req(Service.AUTH, "Activate", root_token=root_token).pach_token
def deactivate_auth(self) -> None:
"""Deactivates auth, removing all ACLs, tokens, and admins from the
Pachyderm cluster and making all data publicly accessible.
Raises
------
AuthServiceNotActivated
"""
try:
self._req(Service.AUTH, "Deactivate")
except RpcError as err:
raise AuthServiceNotActivated.try_from(err)
def get_auth_configuration(self) -> auth_proto.OidcConfig:
"""Gets the auth configuration.
Returns
-------
auth_proto.OidcConfig
A protobuf object with auth configuration information.
"""
return self._req(Service.AUTH, "GetConfiguration").configuration
def set_auth_configuration(self, configuration: auth_proto.OidcConfig) -> None:
"""Sets the auth configuration.
Parameters
----------
configuration : auth_proto.OidcConfig
A protobuf object with auth configuration information.
Examples
--------
>>> client.set_auth_configuration(auth_proto.OidcConfig(
... issuer="http://localhost:1658",
... client_id="client",
... client_secret="secret",
... redirect_uri="http://test.example.com",
... ))
"""
self._req(Service.AUTH, "SetConfiguration", configuration=configuration)
def get_role_binding(
self, resource: auth_proto.Resource
) -> Dict[str, auth_proto.Roles]:
"""Returns the current set of role bindings to the resource specified.
Parameters
----------
resource : auth_proto.Resource
A protobuf object representing the resource being checked.
Returns
-------
Dict[str, auth_proto.Roles]
A dictionary mapping the principals to the roles they have.
Examples
--------
>>> client.get_role_binding(auth_proto.Resource(type=auth_proto.CLUSTER))
{
'robot:someuser': roles {
key: "clusterAdmin"
value: true
},
'pach:root': roles {
key: "clusterAdmin"
value: true
}
}
...
>>> client.get_role_binding(auth_proto.Resource(type=auth_proto.REPO, name="foobar"))
{
'user:person_a': roles {
key: "repoWriter"
value: true
},
'pach:root': roles {
key: "repoOwner"
value: true
}
}
.. # noqa: W505
"""
return self._req(
Service.AUTH, "GetRoleBinding", resource=resource
).binding.entries
def modify_role_binding(
self, resource: auth_proto.Resource, principal: str, roles: List[str] = None
) -> None:
"""Sets the roles for a given principal on a resource.
Parameters
----------
resource : auth_proto.Resource
A protobuf object representing the resource to grant the roles on.
principal : str
The principal to grant the roles for.
roles : List[str], optional
The absolute list of roles for the principtal to have. If roles is
unset, the principal will have no roles.
Examples
--------
You can find some of the built-in roles here:
https://github.com/pachyderm/pachyderm/blob/master/src/auth/auth.go#L27
>>> client.modify_role_binding(
... auth_proto.Resource(type=auth_proto.CLUSTER),
... "user:someuser",
... roles=["clusterAdmin"]
... )
>>> client.modify_role_binding(
... auth_proto.Resource(type=auth_proto.REPO, name="foobar"),
... "user:someuser",
... roles=["repoWriter"]
... )
"""
self._req(
Service.AUTH,
"ModifyRoleBinding",
resource=resource,
principal=principal,
roles=roles,
)
def get_oidc_login(self) -> auth_proto.GetOidcLoginResponse:
"""Gets the OIDC login configuration.
Returns
-------
auth_proto.GetOidcLoginResponse
A protobuf object with the login configuration information.
"""
return self._req(Service.AUTH, "GetOIDCLogin")
def authenticate_oidc(self, oidc_state: str) -> str:
"""Authenticates a user to the Pachyderm cluster via OIDC.
Parameters
----------
oidc_state : str
An OIDC state token.
Returns
-------
str
A token that can be used for making authenticate requests.
"""
return self._req(Service.AUTH, "Authenticate", oidc_state=oidc_state).pach_token
def authenticate_id_token(self, id_token: str) -> str:
"""Authenticates a user to the Pachyderm cluster using an ID token
issued by the OIDC provider. The token must include the Pachyderm
client_id in the set of audiences to be valid.
Parameters
----------
id_token : str
The ID token.
Returns
-------
str
A token that can be used for making authenticate requests.
"""
return self._req(Service.AUTH, "Authenticate", id_token=id_token).pach_token
def authorize(
self,
resource: auth_proto.Resource,
permissions: List["auth_proto.Permission"] = None,
) -> auth_proto.AuthorizeResponse:
"""Tests a list of permissions that the user might have on a resource.
Parameters
----------
resource : auth_proto.Resource
The resource the user wants to test on.
permissions : List[auth_proto.Permission], optional
The list of permissions the users wants to test.
Returns
-------
auth_proto.AuthorizeResponse
A protobuf object that indicates whether the user/principal had all
the inputted permissions on the resource, which permissions the
user had, which permissions the user lacked, and the name of the
principal.
Examples
--------
>>> client.authorize(
... auth_proto.Resource(type=auth_proto.REPO, name="foobar"),
... [auth_proto.Permission.REPO_READ]
... )
authorized: true
satisfied: REPO_READ
principal: "pach:root"
"""
return self._req(
Service.AUTH, "Authorize", resource=resource, permissions=permissions
)
def who_am_i(self) -> auth_proto.WhoAmIResponse:
"""Returns info about the user tied to this `Client`.
Returns
-------
auth_proto.WhoAmIResponse
A protobuf object that returns the username and expiration for the
token used.
"""
return self._req(Service.AUTH, "WhoAmI")
def get_roles_for_permission(
self, permission: auth_proto.Permission
) -> List[auth_proto.Role]:
"""Returns a list of all roles that have the specified permission.
Parameters
----------
permission : auth_proto.Permission
The Permission enum to check for.
Returns
-------
List[auth_proto.Role]
A list of Role protobuf objects that all have the specified
permission.
Examples
--------
All available permissions can be found in auth proto Permissions enum
>>> roles = client.get_roles_for_permission(auth_proto.Permission.REPO_READ)
.. # noqa: W505
"""
return self._req(
Service.AUTH, "GetRolesForPermission", permission=permission
).roles
def get_robot_token(self, robot: str, ttl: int = None) -> str:
"""Gets a new auth token for a robot user.
Parameters
----------
robot : str
The name of the robot user.
ttl : int, optional
The remaining lifetime of this token in seconds. If unset,
token doesn't expire.
Returns
-------
str
The new auth token.
"""
return self._req(Service.AUTH, "GetRobotToken", robot=robot, ttl=ttl).token
def revoke_auth_token(self, token: str) -> None:
"""Revokes an auth token.
Parameters
----------
token : str
The Pachyderm token being revoked.
"""
self._req(Service.AUTH, "RevokeAuthToken", token=token)
def set_groups_for_user(self, username: str, groups: List[str]) -> None:
"""Sets the group membership for a user.
Parameters
----------
username : str
The username to be added.
groups : List[str]
The groups to add the username to.
Examples
--------
>>> client.set_groups_for_user("user:someuser", ["foogroup", "bargroup"])
.. # noqa: W505
"""
self._req(Service.AUTH, "SetGroupsForUser", username=username, groups=groups)
def modify_members(
self, group: str, add: List[str] = None, remove: List[str] = None
) -> None:
"""Adds and/or removes members of a group.
Parameters
----------
group : str
The group to modify.
add : List[str], optional
A list of strings specifying members to add.
remove : List[str], optional
A list of strings specifying members to remove.
Examples
--------
>>> client.modify_members(
... "foogroup",
... add=["user:otheruser"],
... remove=["user:someuser"]
... )
"""
self._req(
Service.AUTH,
"ModifyMembers",
group=group,
add=add,
remove=remove,
)
def get_groups(self) -> List[str]:
"""Gets a list of groups this user belongs to.
Returns
-------
List[str]
List of groups the user belongs to.
"""
return self._req(Service.AUTH, "GetGroups").groups
def get_users(self, group: str) -> List[str]:
"""Gets users in a group.
Parameters
----------
group : str
The group to list users from.
Returns
-------
List[str]
All the users in the specified group.
"""
return self._req(Service.AUTH, "GetUsers", group=group).usernames
def extract_auth_tokens(self):
"""This maps to an internal function that is only used for migration.
Pachyderm's `extract` and `restore` functionality calls
`extract_auth_tokens` and `restore_auth_tokens` to move Pachyderm tokens
between clusters during migration. Currently this function is only used
for Pachyderm internals, so we're avoiding support for this function in
python-pachyderm client until we find a use for it (feel free to file an
issue in github.com/pachyderm/pachyderm).
"""
raise NotImplementedError(
"extract/restore are for testing and internal use only"
)
def restore_auth_token(self, token=None):
"""This maps to an internal function that is only used for migration.
Pachyderm's `extract` and `restore` functionality calls
`extract_auth_tokens` and `restore_auth_tokens` to move Pachyderm tokens
between clusters during migration. Currently this function is only used
for Pachyderm internals, so we're avoiding support for this function in
python-pachyderm client until we find a use for it (feel free to file an
issue in github.com/pachyderm/pachyderm).
"""
raise NotImplementedError(
"extract/restore are for testing and internal use only"
)
| 2.203125 | 2 |
test_without_mediator.py | mghcdac/the_ultimate_causal_inference | 0 | 12760074 | <gh_stars>0
import numpy as np
from dataset import *
from causal_inference import *
if __name__=='__main__':
variables = ['A', 'Y']
methods = ['or', 'ipw', 'dr']
model_types = {'A':'logistic', 'Y':'linear'}
random_state = 100
"""
L, A, M, Y = create_real_dataset_framing()
#L, A, M, Y = create_real_dataset_hiv_ba()
L = (L-L.mean(axis=0, keepdims=True))/L.std(axis=0, keepdims=True)
M = None
"""
"""
import dowhy.datasets
data = dowhy.datasets.linear_dataset(
beta=10,
num_common_causes=5,
num_instruments = 2,
num_treatments=1,
num_samples=10000,
treatment_is_binary=True,
outcome_is_binary=False)
L = data["df"][['Z0','Z1','W0','W1','W2','W3','W4']].values.astype(float)
A = data["df"]['v0'].values.astype(float)
Y = data["df"]['y'].values.astype(float)
M = None
"""
L, A, M, Y, betaA, betaM, betaY, betaY_AL = create_simulated_linear_dataset(
N=1000, D=3, err_std=0.1, coef_std=1, with_mediator=False,
random_state=random_state)
for k in variables:
print(f'beta{k} =', eval(f'beta{k}'))
te_y = {}
ya = {}
for method in methods:
func = eval(f'TE_{method.upper()}')
args = (eval('predict_'+model_types['A']),
None,
eval('predict_'+model_types['Y']),
L,A,M,Y,
betaA,betaM,betaY)
te_y[method], ya[method] = func(*args)
print(f'TE Y {method.upper()}', te_y[method])
lambda1 = 100
#lambda2 = 10
betaA2, betaY2, opt_message, ll, R = infer_params(
model_types, L, A, M, Y, lambda1,# lambda2,
n_iter=1, random_state=random_state)
print()
print('lambda1', lambda1)
#print('lambda2', lambda2)
print('opt_message', opt_message)
print()
for k in variables:
print(f'beta{k}2 =', eval(f'beta{k}2'))
te_y2 = {}
ya2 = {}
for method in methods:
func = eval(f'TE_{method.upper()}')
args = (eval('predict_'+model_types['A']),
None,
eval('predict_'+model_types['Y']),
L,A,M,Y,
betaA2,None,betaY2)
te_y2[method], ya2[method] = func(*args)
print(f'TE Y {method.upper()}2', te_y2[method])
breakpoint()
| 2.359375 | 2 |
kafka_burrow_reporter.py | jongho/kafka-burrow-telegraf-translator | 6 | 12760075 | from __future__ import print_function
import argparse
import httplib
import json
import re
# VERSION 0.1
# FROM https://github.com/jongho/kafka-burrow-telegraf-reporter
# This code was written with inspiration from kafka_jolokia_reporter.py (https://github.com/paksu/kafka-jolokia-telegraf-collector)
def get_http_response(conn, path):
conn.request("GET", path)
response = conn.getresponse()
assert response.status == 200
return response.read()
def get_clusters_from_burrow(conn):
path="/v2/kafka"
#print(path)
response = json.loads(get_http_response(conn, path))
if 'clusters' not in response:
return []
return response['clusters']
def get_consumers_from_burrow(conn, cluster):
path="/v2/kafka/{}/consumer".format(cluster)
#print(path)
response = json.loads(get_http_response(conn, path))
if 'consumers' not in response:
return []
return response['consumers']
def get_consumer_lag_status_from_burrow(conn, cluster, consumer):
path="/v2/kafka/{}/consumer/{}/lag".format(cluster, consumer)
#print(path)
response = json.loads(get_http_response(conn, path))
if 'status' not in response:
return {}
return response['status']
def fetch_consumer_lags_from_burrow(host, port):
conn = httplib.HTTPConnection(host, port)
consumer_lags = []
for cluster in get_clusters_from_burrow(conn):
for consumer in get_consumers_from_burrow(conn, cluster):
consumer_lags.append(get_consumer_lag_status_from_burrow(conn, cluster, consumer))
conn.close()
return consumer_lags
def get_formated_str(dictionary, keys, prefix=''):
return ','.join(['{}{}={}'.format(prefix, k, dictionary[k]) for k in keys])
def translate_lag_data(lag_data):
"""
Parses a Kafka Consumer Lag data from burrow and converts it to set of InfluxDB Line protocol
Currently supports at least Kafka 0.10.2 and Burrow (https://github.com/linkedin/Burrow 2017-03-07 commit)
https://github.com/linkedin/Burrow
https://docs.influxdata.com/influxdb/v1.2/write_protocols/line_protocol_reference/
EXAMPLE:
- FROM:
{
"cluster": "test",
"complete": true,
"group": "TestGroup",
"maxlag": null,
"partition_count": 1,
"partitions": [
{
"end": {
"lag": 0,
"max_offset": 14132620,
"offset": 14132620,
"timestamp": 1491449760344
},
"partition": 0,
"start": {
"lag": 0,
"max_offset": 14132620,
"offset": 14132620,
"timestamp": 1491449751328
},
"status": "OK",
"topic": "Common-Test"
},
...
],
"status": "OK",
"totallag": 0
}
- TO:
kafka.consumer.lag,cluster=test,group=TestGroup complete=True,totallag=0,partition_count=1
kafka.consumer.tp.lag,cluster=test,group=TestGroup,topic=Common-Test,partition=0 start.lag=0,start.max_offset=14132620,start.offset=14132620,start.timestamp=1491449751328,end.lag=0,end.max_offset=14132620,end.offset=14132620,end.timestamp=1491449751328
...
"""
metrics = []
# kafka.consumer.lag
lag_measurement = 'kafka.consumer.lag'
lag_tags = get_formated_str(lag_data, ['cluster', 'group'])
lag_fields = get_formated_str(lag_data, ['complete', 'totallag', 'partition_count'])
#print("lag_tags: {}".format(lag_tags))
#print("lag_fields: {}".format(lag_fields))
metrics.append("{},{} {}".format(lag_measurement, lag_tags, lag_fields))
# kafka.consumer.tp.lag
tp_lag_measurement = 'kafka.consumer.tp.lag'
for tp_lag_data in lag_data['partitions']:
#print("tp_lag_data: {}".format(tp_lag_data))
tg_lag_tags = lag_tags + ',' + get_formated_str(tp_lag_data, ['topic', 'partition'])
tg_lag_fields = get_formated_str(tp_lag_data['start'], ['lag', 'max_offset', 'offset', 'timestamp'], 'start.') + ',' + \
get_formated_str(tp_lag_data['end'], ['lag', 'max_offset', 'offset', 'timestamp'], 'end.')
metrics.append("{},{} {}".format(tp_lag_measurement, tg_lag_tags, tg_lag_fields))
return metrics
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Kafka Burrow Reporter')
parser.add_argument('--burrow-host', default='localhost', help='Burrow host')
parser.add_argument('--burrow-port', type=int, default=8000, help='Burrow port')
args = parser.parse_args()
for lag_data in fetch_consumer_lags_from_burrow(args.burrow_host, args.burrow_port):
for line in translate_lag_data(lag_data):
print(line)
| 2.546875 | 3 |
transformer/config.py | zhebushisuibianqide/base_DialogueModels_ver.pytorch | 1 | 12760076 | # _*_ coding=utf-8 _*_
class Config():
def __init__(self):
self.device = '1'
self.data_dir = '../data'
self.logging_dir = 'log'
self.samples_dir = 'samples'
self.testing_dir = 'test_samples'
self.checkpt_dir = 'checkpoints'
self.max_srclen = 25
self.max_tgtlen = 25
self.vocab_path = '../data/vocab'
#self.embed_path = '../data/vocab_embeddings'
self.embed_path = None
self.vocab_size = 20000
self.emb_dim = 512
self.graph_seed = 123456
self.enc_num_block = 6
self.enc_head = 8
self.dec_num_block = 6
self.dec_head = 8
self.d_model = 512
self.d_k = 64
self.d_q = 64
self.d_v = 64
self.d_ff = 2048
self.dropout = 0.1
self.lr = 1e-3
self.warmming_up = 2000
self.StepLR_size = 5
self.StepLR_gamma = 0.95
self.batch_size = 512
self.total_epoch_num = 100
self.eval_per_batch = None # set 'number' of 'None'
| 1.828125 | 2 |
crawler/tool.py | dgut-group-ten/crawler | 0 | 12760077 | import os
import uuid
import paramiko
import requests
def random_filename(filename):
ext = os.path.splitext(filename)[1]
new_filename = uuid.uuid4().hex + ext
return new_filename
def sftp_upload(host, port, username, password, local, remote):
sf = paramiko.Transport(host, port)
sf.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(sf)
try:
if os.path.isdir(local): # 判断本地参数是目录还是文件
for f in os.listdir(local): # 遍历本地目录
sftp.put(os.path.join(local + f), os.path.join(remote + f)) # 上传目录中的文件
else:
sftp.put(local, remote) # 上传文件
except Exception as e:
print('upload exception:', e)
sf.close()
| 2.703125 | 3 |
pg_view/models/consumers.py | bocytko/pg_view | 402 | 12760078 | <filename>pg_view/models/consumers.py
import sys
if sys.hexversion >= 0x03000000:
from queue import Empty
else:
from Queue import Empty
class DiskCollectorConsumer(object):
""" consumes information from the disk collector and provides it for the local
collector classes running in the same subprocess.
"""
def __init__(self, q):
self.result = {}
self.cached_result = {}
self.q = q
def consume(self):
# if we haven't consumed the previous value
if len(self.result) != 0:
return
try:
self.result = self.q.get_nowait()
self.cached_result = self.result.copy()
except Empty:
# we are too fast, just do nothing.
pass
else:
self.q.task_done()
def fetch(self, wd):
data = None
if wd in self.result:
data = self.result[wd]
del self.result[wd]
elif wd in self.cached_result:
data = self.cached_result[wd]
return data
| 2.390625 | 2 |
tests/test_middleware.py | YPCrumble/django-reflinks | 12 | 12760079 | <filename>tests/test_middleware.py
import pytest
from django_reflinks import settings
from django_reflinks.models import ReferralHit
@pytest.mark.django_db
def test_anonymous_middleware(client, admin_user, ref_link):
ref_link_url = ref_link.get_absolute_url()
response = client.get(ref_link_url, HTTP_USER_AGENT="TestAgent")
assert response.status_code == 302
assert response.url == "/"
assert ReferralHit.objects.count() == 1
hit = ReferralHit.objects.latest("created")
assert not hit.authenticated
assert not hit.hit_user
cookie = response.cookies[settings.COOKIE_KEY]
assert cookie.value == str(hit.pk)
client.force_login(admin_user)
response = client.get("/foo")
assert ReferralHit.objects.count() == 1
assert response.status_code == 404
# Cookie should have been deleted
assert response.cookies[settings.COOKIE_KEY].value == ""
# And old request should have been updated
hit = ReferralHit.objects.get(pk=hit.pk)
assert hit.hit_user == admin_user
def test_anonymous_middleware_bad_cookie(admin_client, ref_link):
admin_client.cookies.load({settings.COOKIE_KEY: "looks nothing like a uuid"})
response = admin_client.get("/foo")
assert response.status_code == 404
admin_client.cookies.load({
settings.COOKIE_KEY: "00000000-0000-0000-0000-000000000000"
})
response = admin_client.get("/foo")
assert response.status_code == 404
def test_referral_link_middleware(admin_client, ref_link):
ref_link_url = ref_link.get_absolute_url()
response = admin_client.get("/foo?ref=" + ref_link.identifier)
assert response.status_code == 302
assert response.url == ref_link_url + "?next=%2Ffoo"
response = admin_client.post("/foo?ref=" + ref_link.identifier)
assert response.status_code == 404
| 2.203125 | 2 |
CNN/DetectGesture.py | Kenji-Ogami/IoT_Group5 | 0 | 12760080 | #
# キャプチャー画像を推定する
# キャプチャー画像を100x100にリサイズする
#
#---------------------------------------------------------
#import keras
import tensorflow as tf
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.layers import *
from tensorflow.python.keras.models import load_model
import numpy as np
import os
import serial
import time
from PIL import Image
import cv2
# 学習済みモデルのロード
model = load_model("./original_img.h5")
model.summary()
#['c', 'p', 'g', 'b']-> [0, 1, 2, 3]
# 動作確認用
img_arr = []
image = Image.open('./img/g/0.png')
image = image.convert("RGB")
image = image.resize((100, 100))
data = np.asarray(image)
img_arr.append(data)
img_arr = np.array(img_arr)
img_arr = img_arr.astype('float32')/255
img_arr.shape[:]
y_pred = model.predict(img_arr)
print(y_pred)
#['c', 'p', 'g', 'b']-> [0, 1, 2, 3]
# ビデオ初期化
#img_cap = cv2.VideoCapture(0)
img_cap = cv2.VideoCapture(1)
#with serial.Serial('/dev/cu.usbmodem14301', timeout=0.1) as ser:
while True:
# ビデオ画像の処理
img_arr = []
ret, img_base = img_cap.read()
xp = int(img_base.shape[1]) #1920
yp = int(img_base.shape[0]) #1080
cx = int(xp/2)
cy = int(yp/2)
#print(xp, " + ", yp)
resize = 100
img_crop = cv2.resize(img_base[cy-500:cy+500, cx-500:cx+500], (resize, resize))
cv2.imshow('Images for CNN', img_crop)
imgCV_RGB = img_crop[:, :, ::-1]
img_pil = Image.fromarray(imgCV_RGB)
data = np.asarray(img_pil)
img_arr.append(data)
img_arr = np.array(img_arr)
img_arr = img_arr.astype('float32')/255
img_arr.shape[:]
# 予測
#['c', 'p', 'g', 'b']-> [0, 1, 2, 3]
y_pred = model.predict(img_arr)
#print(y_pred)
# 結果の表示
if y_pred[0].argmax() == 0:
if (y_pred[0][0] > 0.7):
print("")
elif y_pred[0].argmax() == 1:
if (y_pred[0][1] > 0.7):
print("グー!!")
elif y_pred[0].argmax() == 2:
if (y_pred[0][2] > 0.7):
print("チョキ!!")
elif y_pred[0].argmax() == 3:
if (y_pred[0][3] > 0.7):
print("パー!!")
if cv2.waitKey(10) == 27:
break
# ビデオ開放
cv2.destroyAllWindows()
| 2.9375 | 3 |
src/Advanced/tuplas.py | Thiago18l/Python-Projects | 0 | 12760081 | <filename>src/Advanced/tuplas.py
t1 = ('OI', 2.0, [40, 50])
print(t1[2:])
t = 1, 4, "THiago"
tupla1 = 1, 2, 3, 4, 5
tulpla2 = 6, 7, 8, 9, 10
print(tupla1 + tulpla2) # concatena
| 2.921875 | 3 |
src/assignments/main_assignment2.py | acc-cosc-1336/cosc-1336-spring-2018-vcruz350 | 0 | 12760082 | <gh_stars>0
from assignment2 import faculty_evaluation_result
'''Write code to call the faculty_evaluation_result function with data of your choice'''
print(faculty_evaluation_result(0,9,20,6,9,32))
| 2.765625 | 3 |
new_training_loop.py | mimilazarova/dd2412_project_fixmatch_and_beyond | 0 | 12760083 | import os
import logging
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
logging.getLogger('tensorflow').disabled = True
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from tqdm import tqdm, tqdm_notebook
from augment import CTAugment
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
import math
from error import test_error
import logging
class OurCosineDecay(tf.keras.experimental.CosineDecay):
def __call__(self, step):
with ops.name_scope_v2(self.name or "CosineDecay"):
initial_learning_rate = ops.convert_to_tensor_v2(
self.initial_learning_rate, name="initial_learning_rate")
dtype = initial_learning_rate.dtype
decay_steps = math_ops.cast(self.decay_steps, dtype)
global_step_recomp = math_ops.cast(step, dtype)
global_step_recomp = math_ops.minimum(global_step_recomp, decay_steps)
completed_fraction = global_step_recomp / decay_steps
cosine_decayed = math_ops.cos(
constant_op.constant(7 / 16 * math.pi) * completed_fraction)
decayed = (1 - self.alpha) * cosine_decayed + self.alpha
return math_ops.multiply(initial_learning_rate, decayed)
def training(model, full_x_l, full_x_u, full_y_l, hparams, n_classes, file_name, log_interval=200):
def weak_transformation(x):
x = tf.image.random_flip_left_right(x)
max_shift = tf.cast(x.shape[1] * 0.125, dtype=tf.dtypes.int32)
shift = tf.random.uniform([x.shape[0], 2], minval=-max_shift, maxval=max_shift, dtype=tf.dtypes.int32)
return tfa.image.translate(x, tf.cast(shift, tf.dtypes.float32))
def pseudolabel(class_dist):
argmax = tf.math.argmax(class_dist, axis=1)
return tf.one_hot(argmax, class_dist.shape[1])
def threshold_gate(one_hot, logits, threshold):
max_probs = tf.math.multiply(one_hot, tf.nn.softmax(logits))
return tf.cast(max_probs > threshold, max_probs.dtype) # * max_probs
def sample_labeled_data(ds=full_x_l, y=full_y_l, batch_size=hparams['B']):
total_samples = ds.shape[0]
if total_samples >= batch_size:
choices = np.random.choice(np.arange(total_samples), batch_size, replace=False)
else:
choices = np.random.choice(np.arange(total_samples), batch_size, replace=True)
x_l = ds[choices, :, :, :]
y_l = y[choices]
return x_l, y_l
def step(x_l, y_l, x_u):
with tf.GradientTape() as tape:
# labeled data
x_l_weak = weak_transformation(x_l)
output_l_weak = model(x_l_weak, True)
loss_l = loss_fn_l(y_l, output_l_weak)
# update CTAugment weights
x_l_strong, choices, bins = cta.augment_batch(x_l)
output_l_strong = model(x_l_strong, True)
cta.update_weights_batch(y_l, output_l_strong, choices, bins)
# unlabeled data
x_u_weak = weak_transformation(x_u)
output_u_weak = model(x_u_weak, True)
y_u = pseudolabel(output_u_weak)
y_u = threshold_gate(y_u, output_u_weak, hparams['tau'])
x_u_strong, choices, bins = cta.augment_batch(x_u)
output_u_strong = model(x_u_strong, True)
loss_u = loss_fn_u(y_u, output_u_strong)
# add losses together
loss = loss_l + hparams['lamda'] * loss_u
gradients = tape.gradient(loss, model.trainable_weights)
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
schedule = OurCosineDecay(hparams['eta'], hparams['K'])
#optimizer = tf.keras.optimizers.SGD(schedule, momentum=hparams['beta'], nesterov=hparams['nesterov'])
optimizer = tfa.optimizers.SGDW(hparams['weight_decay'],
schedule, momentum=hparams['beta'],
nesterov=hparams['nesterov'])
loss_fn_u = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
loss_fn_l = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss_fn_u = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
loss_fn_l = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
cta = CTAugment(hparams['cta_classes'], hparams['cta_decay'], hparams['cta_threshold'], hparams['cta_depth'])
# ds_l = tf.data.Dataset.from_tensor_slices((full_x_l, full_y_l))
ds_u = tf.data.Dataset.from_tensor_slices(full_x_u)
# split into batches
# ds_l = ds_l.batch(hparams['B']).prefetch(-1)
ds_u = ds_u.batch(int(hparams['mu'] * hparams['B'])).prefetch(-1)
# if type casting needed: x = tf.cast(x, tf.float32)
training_step = 0
epoch = 0
best_training_accuracy = 0
# for epoch in range(hparams['epochs']):
# for (x_l, y_l), x_u in tqdm(zip(ds_l, ds_u), desc='epoch {}/{}'.format(epoch + 1, hparams['epochs']),
# total=val_interval, ncols=100, ascii=True):
# training_step += 1
# step(x_l, y_l, x_u)
#for epoch in range(hparams['epochs']):
while training_step < hparams['K']:
epoch += 1
for x_u in tqdm(ds_u, desc='epoch {}'.format(epoch),
total=hparams['total'], ncols=100, ascii=True):
training_step += 1
x_l, y_l = sample_labeled_data()
step(x_l, y_l, x_u)
if training_step >= hparams['K']:
break
err = test_error(model, full_x_l, full_y_l)
logging.info('epoch: {}, labeled accuracy: {}'.format(epoch, err))
if err > best_training_accuracy:
best_training_accuracy = err
tf.keras.models.save_model(model, filepath=file_name)
return model
| 2.046875 | 2 |
astropop/math/array.py | rudnerlq/astropop | 3 | 12760084 | <reponame>rudnerlq/astropop
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Small module for simple matrix works. Possible deprecated in future.
"""
import numpy as np
__all__ = ['xy2r', 'iraf_indices', 'trim_array']
def xy2r(x, y, data, xc, yc):
"""Convert (x, y) values to distance of a (xc, yc) position."""
r = np.hypot((x-xc), (y-yc))
return np.ravel(r), np.ravel(data)
def trim_array(data, box_size, position, indices=None, origin=0):
"""Trim a numpy array around a position."""
x, y = position
# Correct for 1-based indexing
x += origin
y += origin
dx = dy = float(box_size)/2
x_min = max(int(x-dx), 0)
x_max = min(int(x+dx)+1, data.shape[1])
y_min = max(int(y-dy), 0)
y_max = min(int(y+dy)+1, data.shape[0])
d = data[y_min:y_max, x_min:x_max]
if indices is None:
return d, x-x_min, y-y_min
else:
xi = indices[1][y_min:y_max, x_min:x_max]
yi = indices[0][y_min:y_max, x_min:x_max]
return d, xi, yi
def iraf_indices(data):
"""Create (x, y) index arrays from a data matrix using IRAF convention.
Iraf convention means (0, 0) on the center of bottom-left pixel.
"""
y, x = np.indices(data.shape)
# Matches (0, 0) to the center of the pixel
# FIXME: Check carefully if this is needed
# x = x - 0.5
# y = y - 0.5
return x, y
| 2.828125 | 3 |
modules/skaldship/metasploit.py | kimdane/Kvasir | 0 | 12760085 | # -*- coding: utf-8 -*-
__version__ = "1.0"
"""
##--------------------------------------#
## Kvasir
##
## (c) 2010-2013 Cisco Systems, Inc.
##
## Metasploit Utilities for Kvasir
##
## Author: <NAME> <<EMAIL>>
##--------------------------------------#
"""
from gluon import current
import logging
from skaldship.hosts import get_host_record, do_host_status
logger = logging.getLogger("web2py.app.kvasir")
##-------------------------------------------------------------------------
def msf_get_config(session={}):
"""
Returns a dict of metasploit configuration settings based on yaml or session
"""
msf_config = current.globalenv['settings']['kvasir_config'].get('metasploit') or {}
config = {}
config['key'] = session.get('msf_key', msf_config.get('api_key'))
config['url'] = session.get('msf_url', msf_config.get('url', 'https://localhost:3790'))
config['ws_num'] = session.get('msf_workspace_num', 1)
config['workspace'] = session.get('msf_workspace', 'default')
config['user'] = session.get('msf_user', None)
return config
##-------------------------------------------------------------------------
def process_pwdump_loot(loot_list=[], msf=None):
"""
Takes an array of loot records in loot_list, downloads the pwdump file and
adds the users.
"""
from skaldship.passwords import process_password_file, insert_or_update_acct
db = current.globalenv['db']
cache = current.globalenv['cache']
logging.debug('loot_list = %s' % (loot_list))
data = []
for loot_id in loot_list:
loot = msf.loot_download(loot_id)
if loot['ltype'] not in ['host.windows.pwdump', 'windows.hashes']:
logging.error("Loot is not a pwdump, it is a %s" % loot['ltype'])
continue
else:
# process the pwdump file
pw_data = loot['data'].split('\n')
accounts = process_password_file(
pw_data=pw_data,
file_type='PWDUMP',
source='Metasploit',
)
# find the info/0 service id for the host
host_id = get_host_record(loot['host'])
query = (db.t_services.f_number == '0') & (db.t_services.f_proto == 'info') & (db.t_services.f_hosts_id == host_id)
svc_id = db(query).select().first()
if svc_id is None:
# info/0 not found.. add it!
svc_id = db.t_services.insert(f_proto="info", f_number="0", f_status="info", f_hosts_id=host_id)
db.commit()
# insert or update the account records
resp_text = insert_or_update_acct(svc_id.id, accounts)
logging.info("Added pwdump records for host: %s" % (loot['host']))
data.append({ loot['host']: resp_text })
return data
##-------------------------------------------------------------------------
def process_screenshot_loot(loot_list=[], msf=None):
"""
Takes an array of loot records in loot_list, downloads the screenshot and
adds it to the database
"""
db = current.globalenv['db']
cache = current.globalenv['cache']
loot_count = 0
for loot_id in loot_list:
loot = msf.loot_download(loot_id)
ip = loot_dict[loot_id]
if loot['ltype'] != 'host.windows.screenshot':
logging.error(" [!] %s/%s is not a screenshot, it is a %s" % (ip, loot['name'], loot['ltype']))
else:
record = get_host_record(ip)
if not record:
logging.error(" [!] Cannot find record for %s" % (ip))
continue
db.t_evidence.update_or_insert(
f_hosts_id = record.id,
f_filename = "%s-msfpro-%s.png" % (ip, loot['name']),
f_evidence = "%s-msfpro-%s.png" % (ip, loot['name']),
f_data = loot['data'],
f_type = 'Screenshot',
f_text = 'From MetasploitPRO'
)
db.commit()
loot_count += 1
return loot_count
##-------------------------------------------------------------------------
def process_loot_files(loot_list=[]):
"""
Processes locally stored (to web2py) MSF password loot files into the
account database.
Args:
loot_list: an array of [filename, settings.password_file_types, port, host_id]
Returns:
An array of [filename, result text]
"""
from skaldship.passwords import process_password_file, insert_or_update_acct
import os
db = current.globalenv['db']
data = []
for loot in loot_list:
if isinstance(loot, []):
(filename, file_type, port) = loot
else:
logger.error("Invalid loot sent: %s" % (loot))
continue
try:
(proto, number) = port.split('/')
except AttributeError, e:
logger.error("Invalid port sent: %s", port)
try:
pw_data = open(filename, "rb").readlines().split('\n')
except IOError, e:
logger.error("Error opening %s: %s" % (filename, e))
accounts = process_password_file(
pw_data=pw_data,
file_type=file_type,
source='Metasploit',
)
# find the info/0 service id for the host
host_id = get_host_record(loot['host'])
query = (db.t_services.f_number == number) & (db.t_services.f_proto == proto) & (db.t_services.f_hosts_id == host_id)
svc_id = db(query).select().first()
if svc_id is None:
# info/0 not found.. add it!
svc_id = db.t_services.insert(f_proto=proto, f_number=number, f_hosts_id=host_id)
db.commit()
# insert or update the account records
resp_text = insert_or_update_acct(svc_id.id, accounts)
logging.info("Added loot accounts for host: %s" % ())
data.append({ loot['host']: resp_text })
##-------------------------------------------------------------------------
def process_report_xml(
filename=None,
ip_ignore_list=None,
ip_include_list=None,
engineer=1,
asset_group="Metasploit Import",
update_hosts=True,
):
"""
Processes a Metasploit XML Export for the following data and adds to the db:
- Hosts and services
- Credentials
Generate the XML report by using db_export -t xml filename.xml or through WebUI
TODO: Auto-exploits successful exploit attemps if matching CVE/VulnDB entry found
"""
db = current.globalenv['db']
cache = current.globalenv['cache']
try:
from lxml import etree
except ImportError:
try:
import xml.etree.cElementTree as etree
except ImportError:
try:
import xml.etree.ElementTree as etree
except:
raise Exception("Unable to find valid ElementTree module.")
# build the hosts only/exclude list
ip_exclude = []
if ip_ignore_list:
ip_exclude = ip_ignore_list.split('\r\n')
# TODO: check for ip subnet/range and break it out to individuals
ip_only = []
if ip_include_list:
ip_only = ip_include_list.split('\r\n')
# TODO: check for ip subnet/range and break it out to individuals
print(" [*] Processing Metasploit Pro report file: %s" % (filename))
try:
xml = etree.parse(filename)
except etree.ParseError, e:
raise Exception(" [!] Invalid XML file (%s): %s " % (filename, e))
return
root = xml.getroot()
# parse the hosts now
hosts = root.findall("hosts/host")
print(" [-] Parsing %d hosts" % (len(hosts)))
stats = {}
stats['hosts_added'] = 0
stats['hosts_skipped'] = 0
stats['hosts_updated'] = 0
stats['services_added'] = 0
stats['services_updated'] = 0
stats['accounts_added'] = 0
stats['accounts_updated'] = 0
from gluon.validators import IS_IPADDRESS
from skaldship.passwords import lookup_hash
for host in hosts:
didwhat = "Unknown"
if host.findtext('state') != "alive":
stats['hosts_skipped'] += 1
continue
hostfields = {}
ipaddr = host.findtext('address')
if len(ip_only) > 0 and ipaddr not in ip_only:
print(" [-] Node is not in the only list... skipping")
#sys.stderr.write(msg)
stats['hosts_skipped'] += 1
continue
if IS_IPADDRESS(is_ipv4=True)(ipaddr)[1] == None:
# address is IPv4:
hostfields['f_ipv4'] = ipaddr
elif IS_IPADDRESS(is_ipv6=True)(ipaddr)[1] == None:
hostfields['f_ipv6'] = ipaddr
else:
logger.error("Invalid IP Address in report: %s" % (ipaddr))
print(" [!] Invalid IP Address in report: %s" % (ipaddr))
continue
macaddr = host.findtext('mac')
if macaddr:
hostfields['f_macaddr'] = macaddr
hostname = host.findtext('name')
if hostname:
hostfields['f_hostname'] = hostname
# check to see if IP exists in DB already
if hostfields.has_key('f_ipv4'):
host_rec = db(db.t_hosts.f_ipv4 == hostfields['f_ipv4']).select().first()
else:
host_rec = db(db.t_hosts.f_ipv6 == hostfields['f_ipv6']).select().first()
if host_rec is None:
hostfields['f_asset_group'] = asset_group
hostfields['f_engineer'] = engineer
host_id = db.t_hosts.insert(**hostfields)
db.commit()
stats['hosts_added'] += 1
print(" [-] Adding IP: %s" % (ipaddr))
#sys.stderr.write(msg)
elif host_rec is not None and update_hosts:
db.commit()
if hostfields.has_key('f_ipv4'):
host_id = db(db.t_hosts.f_ipv4 == hostfields['f_ipv4']).update(**hostfields)
db.commit()
host_id = get_host_record(hostfields['f_ipv4'])
host_id = host_id.id
stats['hosts_updated'] += 1
print(" [-] Updating IP: %s" % (hostfields['f_ipv4']))
else:
host_id = db(db.t_hosts.f_ipv6 == hostfields['f_ipv6']).update(**hostfields)
db.commit()
host_id = get_host_record(hostfields['f_ipv6'])
host_id = host_id.id
stats['hosts_updated'] += 1
print(" [-] Updating IP: %s" % (hostfields['f_ipv6']))
else:
stats['hosts_skipped'] += 1
db.commit()
print(" [-] Skipped IP: %s" % (ipaddr))
continue
# add the <info> and <comments> as a note to the host
info_note = host.findtext('info') or ''
if info_note.startswith('Domain controller for '):
db.t_netbios.update_or_insert(
f_hosts_id=host_id,
f_type="PDC",
f_domain=info_note[22:].upper()
)
else:
db.t_host_notes.update_or_insert(
f_hosts_id=host_id,
f_note=info_note,
)
db.commit()
for comment in host.findall('comments/comment'):
db.t_host_notes.update_or_insert(
f_hosts_id=host_id,
f_note=comment.text,
)
# process the services, adding any new
for svc in host.findall('services/service'):
f_number = svc.findtext('port')
f_proto = svc.findtext('proto')
f_status = svc.findtext('state')
f_name = svc.findtext('name') or ''
f_banner = svc.findtext('info') or ''
if f_name in ['http', 'https']:
f_name = f_name.upper()
query = (db.t_services.f_proto==f_proto) & (db.t_services.f_number==f_number) & (db.t_services.f_hosts_id==host_id)
svc_row = db(query).select().first()
if svc_row:
# we found a service record! Check for similar status, names and banners
do_update = False
if svc_row.f_status != f_status:
svc_row.f_status=f_status
do_update = True
if svc_row.f_name != f_name:
svc_row.f_name=f_name
do_update = True
if svc_row.f_banner != f_banner:
svc_row.f_banner=f_banner
do_update = True
if do_update:
svc_row.update_record()
db.commit()
didwhat = "Updated"
stats['services_updated'] += 1
else:
# we have a new service!
svc_id = db.t_services.insert(
f_proto=f_proto,
f_number=f_number,
f_status=f_status,
f_name=f_name,
f_banner=f_banner,
f_hosts_id=host_id
)
db.commit()
didwhat = "Added"
stats['services_added'] += 1
print(" [-] %s service: (%s) %s/%s" % (didwhat, ipaddr, f_proto, f_number))
for cred in host.findall('creds/cred'):
# handle credential data
f_password = None
f_compromised = False
cred_type = cred.findtext('ptype')
if cred_type == "smb_hash":
# add smb hashes to info/0 service
query = (db.t_services.f_proto=='info') & (db.t_services.f_number=='0') & (db.t_services.f_hosts_id==host_id)
svc_row = db(query).select().first()
if not svc_row:
svc_id = db.t_services.insert(f_proto='info', f_number='0', f_hosts_id=host_id)
else:
svc_id = svc_row.id
pwhash = cred.findtext('pass')
f_password = <PASSWORD>_<PASSWORD>(<PASSWORD>)
(lm, nt) = pwhash.split(':')
user = cred.findtext('user')
query = (db.t_accounts.f_services_id==svc_id) & (db.t_accounts.f_username.upper()==user.upper())
acct_row = db(query).select().first()
if acct_row:
# we have an account already, lets see if the hashes are in there
h1 = acct_row.f_hash1
if isinstance(h1, str):
if acct_row.f_hash1.upper() != lm.upper():
acct_row.f_hash1=lm.upper()
acct_row.f_hash1_type = "LM"
acct_row.f_hash2=nt.upper()
acct_row.f_hash2_type = "NT"
if f_password:
acct_row.f_compromised = True
acct_row.f_password = <PASSWORD>
if not acct_row.f_source:
acct_row.f_source = "Metasploit Import"
acct_row.update_record()
db.commit()
stats['accounts_updated'] += 1
didwhat = "Updated"
else:
# add a new account record
if f_password:
f_compromised = True
else:
f_compromised = False
acct_data = dict(
f_services_id=svc_id,
f_username=user,
f_password=<PASSWORD>,
f_compromised=f_compromised,
f_hash1=lm.upper(),
f_hash1_type='LM',
f_hash2=nt.upper(),
f_hash2_type='NT',
f_source="Metasploit Import"
)
acct_id = db.t_accounts.insert(**acct_data)
db.commit()
stats['accounts_added'] += 1
didwhat = "Added"
elif cred_type == 'smb_challenge':
# add smb challenge hashes to info/0 service
query = (db.t_services.f_proto=='info') & (db.t_services.f_number=='0') & (db.t_services.f_hosts_id==host_id)
svc_row = db(query).select().first()
if not svc_row:
svc_id = db.t_services.insert(f_proto='info', f_number='0', f_hosts_id=host_id)
else:
svc_id = svc_row.id
user = cred.findtext('user')
query = (db.t_accounts.f_services_id==svc_id) & (db.t_accounts.f_username.upper()==user.upper())
acct_row = db(query).select().first()
if acct_row:
# we have an account already, lets see if the hashes are in there
h1 = acct_row.f_hash1
if isinstance(h1, str):
if acct_row.f_hash1.upper() != lm.upper():
acct_row.f_password = <PASSWORD>
acct_row.f_hash1 = pwhash.upper()
acct_row.f_hash1_type = 'NTCHALLENGE'
acct_row.f_domain = cred.findtext('proof')
if not acct_row.f_source:
acct_row.f_source = "Metasploit Capture"
acct_row.update_record()
db.commit()
stats['accounts_updated'] += 1
didwhat = "Updated"
else:
# new account record
f_password = lookup_hash(pwhash)
if f_password:
f_compromised = True
else:
f_compromised = False
acct_data = dict(
f_services_id=svc_id,
f_username=user,
f_password=f_password,
f_compromised=f_compromised,
f_hash1=pwhash.upper(),
f_hash1_type='NTCHALLENGE',
f_source="Metasploit Capture"
)
acct_id = db.t_accounts.insert(**acct_data)
db.commit()
stats['accounts_added'] += 1
didwhat = "Added"
else:
# for cred_type == 'password' or 'exploit':
# add regular password
f_proto = 'tcp'
f_number = cred.findtext('port')
if f_number == '445':
f_proto='info'
f_number='0'
query = (db.t_services.f_proto==f_proto) & (db.t_services.f_number==f_number) & (db.t_services.f_hosts_id==host_id)
svc_row = db(query).select().first()
if not svc_row:
svc_id = db.t_services.insert(f_proto=f_proto, f_number=f_number, f_hosts_id=host_id)
else:
svc_id = svc_row.id
f_password = cred.findtext('pass')
if f_password == "*<PASSWORD>*":
f_password = ''
user = cred.findtext('user')
svcname = cred.findtext('sname')
# do some case mangling for known variations we want in all upper case
if svcname == "vnc":
user = "vnc"
query = (db.t_accounts.f_services_id==svc_id) & (db.t_accounts.f_username.upper()==user.upper())
acct_row = db(query).select().first()
f_source = cred.findtext('type')
if f_source == 'captured':
f_source = "Metasploit Capture"
else:
f_source = "Metasploit Import"
if acct_row:
# we have an account already, lets see if the hashes are in there
if acct_row.f_password != f_password:
acct_row.f_password = <PASSWORD>
acct_row.f_compromised = True
if not acct_row.f_source:
acct_row.f_source = f_source
acct_row.update_record()
db.commit()
stats['accounts_updated'] += 1
didwhat = "Updated"
else:
# new account record
acct_data = dict(
f_services_id=svc_id,
f_username=user,
f_password=<PASSWORD>,
f_source=f_source,
f_compromised=True
)
acct_id = db.t_accounts.insert(**acct_data)
db.commit()
stats['accounts_added'] += 1
didwhat = "Added"
print(" [-] Account %s: (%s) %s" % (didwhat, ipaddr, user))
do_host_status()
msg = " [*] Import complete: hosts: (%s/A, %s/U, %s/S) - services: (%s/A, %s/U), creds: (%s/A, %s/U)"\
% (
stats['hosts_added'],
stats['hosts_updated'],
stats['hosts_skipped'],
stats['services_added'],
stats['services_updated'],
stats['accounts_added'],
stats['accounts_updated']
)
print(msg)
return msg
| 1.992188 | 2 |
Vision/colorDetection/colorDetection.py | nicolasiscoding/IEEEOpenCV2016 | 0 | 12760086 | import cv2
import numpy
def colorDetection(image):
#Converts image HSV type image
hsvImage = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
testImage = image
#Ranges for color detection
lowerYellow = numpy.array([20, 100, 100])
upperYellow = numpy.array([30,255, 255])
lowerBlue = numpy.array([85,100,100])
upperBlue = numpy.array([124,255,255])
lowerRed = numpy.array([0,100,100])
upperRed = numpy.array([19,255,255])
#Ranges applied to the hsvImage
yMask = cv2.inRange(hsvImage, lowerYellow, upperYellow)
bMask = cv2.inRange(hsvImage, lowerBlue, upperBlue)
rMask = cv2.inRange(hsvImage, lowerRed, upperRed)
#Finding the contours on the image
yContours, yHierarchy = cv2.findContours(yMask, cv2.cv.CV_RETR_TREE,
cv2.cv.CV_CHAIN_APPROX_SIMPLE)
bContours, bHierarchy = cv2.findContours(bMask, cv2.cv.CV_RETR_TREE,
cv2.cv.CV_CHAIN_APPROX_SIMPLE)
rContours, rHierarchy = cv2.findContours(rMask, cv2.cv.CV_RETR_TREE,
cv2.cv.CV_CHAIN_APPROX_SIMPLE)
#Given at least one yellow contour
if len(yContours) > 0:
# Find the index of the largest contour
yAreas = [cv2.contourArea(i) for i in yContours]
yMaxIndex = numpy.argmax(yAreas)
yCnt = yContours[yMaxIndex]
#Find coordinate for boundary rectangle
yx,yy,yw,yh = cv2.boundingRect(yCnt)
#Draw rectangle
cv2.rectangle(testImage,(yx-15,yy-15),(yx+yw+15,yy+yh+15),(0,255,255),0)
#Given at least one blue contour
if len(bContours) > 0:
# Find the index of the largest contour
bAreas = [cv2.contourArea(i) for i in bContours]
bMaxIndex = numpy.argmax(bAreas)
bCnt = bContours[bMaxIndex]
#Find coordinate for boundary rectangle
bx,by,bw,bh = cv2.boundingRect(bCnt)
#Draw rectangle
cv2.rectangle(testImage,(bx-15,by-15),(bx+bw+15,by+bh+15),(255,0,0),0)
#Given at least one red contour
if len(rContours) > 0:
# Find the index of the largest contour
rAreas = [cv2.contourArea(i) for i in rContours]
rMaxIndex = numpy.argmax(rAreas)
rCnt = rContours[rMaxIndex]
#Find coordinate for boundary rectangle
rx,ry,rw,rh = cv2.boundingRect(rCnt)
#Draw rectangle
cv2.rectangle(testImage,(rx-15,ry-15),(rx+rw+15,ry+rh+15),(0,0,255),0)
# #Displaying the masks individually and the final image
# cv2.imshow('Yellow Mask', yMask)
# cv2.imshow('Blue Mask', bMask)
# cv2.imshow('Red Mask', rMask)
# cv2.imshow('Altered image', testImage)
return testImage
def main():
#Default Camera (cv2.videocapture(-1) the parameter indexes your cameras)
camera = cv2.VideoCapture(-1)
while camera.isOpened():
_, image = camera.read()
cv2.imshow('Original', image)
rectImg = colorDetection(image)
cv2.imshow('Color Detector', rectImg)
cv2.waitKey(5)
if __name__ == '__main__':
main()
| 3.234375 | 3 |
asyncstdlib/_core.py | mgorny/asyncstdlib | 0 | 12760087 | <reponame>mgorny/asyncstdlib<filename>asyncstdlib/_core.py
from inspect import iscoroutinefunction
from typing import (
TypeVar,
AsyncIterator,
Iterable,
AsyncIterable,
Union,
Generic,
Optional,
Awaitable,
Callable,
)
T = TypeVar("T")
AnyIterable = Union[Iterable[T], AsyncIterable[T]]
class Sentinel:
"""Placeholder with configurable ``repr``"""
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
__ITER_SENTINEL = Sentinel("<no default>")
def aiter(subject: AnyIterable[T]) -> AsyncIterator[T]:
"""
An async iterator object yielding elements from ``subject``
:raises TypeError: if ``subject`` does not support any iteration protocol
The ``subject`` must support
the async iteration protocol (the :py:meth:`object.__aiter__` method),
the regular iteration protocol (the :py:meth:`object.__iter__` method),
or it must support the sequence protocol (the :py:meth:`object.__getitem__`
method with integer arguments starting at 0).
In either case, an async iterator is returned.
"""
if isinstance(subject, AsyncIterable):
return subject.__aiter__()
else:
return _aiter_sync(subject).__aiter__()
async def _aiter_sync(iterable: Iterable[T]) -> AsyncIterator[T]:
"""Helper to provide an async iterator for a regular iterable"""
for item in iterable:
yield item
class ScopedIter(Generic[T]):
"""Context manager that provides and cleans up an iterator for an iterable"""
def __init__(self, iterable: AnyIterable[T]):
self._iterable = iterable
self._iterator: Optional[AsyncIterator[T]] = None
async def __aenter__(self) -> AsyncIterator[T]:
assert self._iterator is None, f"{self.__class__.__name__} is not re-entrant"
self._iterator = aiter(self._iterable)
self._iterable = None
return self._iterator
async def __aexit__(self, exc_type, exc_val, exc_tb):
try:
aclose = self._iterator.aclose()
except AttributeError:
pass
else:
await aclose
return False
async def borrow(iterator: AsyncIterator):
"""Borrow an async iterator for iteration, preventing it from being closed"""
async for item in iterator:
yield item
def awaitify(
function: Union[Callable[..., T], Callable[..., Awaitable[T]]]
) -> Callable[..., Awaitable[T]]:
"""Ensure that ``function`` can be used in ``await`` expressions"""
if iscoroutinefunction(function):
return function
else:
return Awaitify(function)
class Awaitify(Generic[T]):
"""Helper to peek at the return value of ``function`` and make it ``async``"""
__slots__ = "__wrapped__", "_async_call"
def __init__(self, function: Union[Callable[..., T], Callable[..., Awaitable[T]]]):
self.__wrapped__ = function
self._async_call: Optional[Callable[..., Awaitable[T]]] = None
def __call__(self, *args, **kwargs) -> Awaitable[T]:
async_call = self._async_call
if async_call is None:
value = self.__wrapped__(*args, **kwargs)
if isinstance(value, Awaitable):
self._async_call = self.__wrapped__
return value
else:
self._async_call = force_async(self.__wrapped__)
return await_value(value)
else:
return async_call(*args, **kwargs)
async def await_value(value: T) -> T:
return value
def force_async(call: Callable[..., T]) -> Callable[..., Awaitable[T]]:
async def async_wrapped(*args, **kwargs):
return call(*args, **kwargs)
return async_wrapped
| 2.359375 | 2 |
uw_uwnetid/tests/test_category.py | uw-it-aca/uw-restclients-uwnetid | 1 | 12760088 | <reponame>uw-it-aca/uw-restclients-uwnetid<gh_stars>1-10
# Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from unittest import TestCase
from uw_uwnetid.category import get_netid_categories
from uw_uwnetid.models import Category
from uw_uwnetid.util import fdao_uwnetid_override
@fdao_uwnetid_override
class Office365EduSubsTest(TestCase):
def test_get_netid_categories(self):
cats = get_netid_categories('javerage', [])
self.assertEquals(len(cats), 4)
self.assertEquals(cats[0].category_code, 4)
self.assertEquals(cats[3].status_name, "Active")
def test_get_netid_category_25(self):
cats = get_netid_categories('javerage', [25])
self.assertEquals(len(cats), 1)
self.assertEquals(cats[0].category_code, 25)
self.assertEquals(cats[0].status_name, "Active")
| 2.015625 | 2 |
nasa01/apod.py | gjesionowski/mycode | 0 | 12760089 | #!/usr/bin/env python3
import requests
from pprint import pprint as pp # part of the standard library
from datetime import date
# import webbrowser
## define some constants
NASAAPI = 'https://api.nasa.gov/planetary/apod?'
with open('nasa_api_key', 'r') as file:
MYKEY = "&api_key=" + file.read().replace('\n', '')
## pretty print json
def main():
"""run-time code"""
## Variables
today = date.today()
d = "&date=" + today.isoformat()
print("The default request is for today's date. Would you like to request a particular date? Yes/No:")
answer = input()
if answer == 'Yes' or answer == 'yes':
d = "&date=" + input("Enter the date in the format YEAR-MM-DD:")
nasaapiobj = requests.get(NASAAPI + MYKEY + d) # call the webservice
nasaread = nasaapiobj.json() # parse the JSON blob returned
# Show converted json
print(nasaread) # show converted JSON without pprint
input('\nThis is converted json. Press ENTER to continue.') # pause for enter
# Show Pretty Print json
pp(nasaread) # this is pretty print in action
# pprint.pprint(convertedjson) # if you do a simple import pprint, the result is a long usage
input('\nThis is pretty printed JSON. Press ENTER to continue.') # pause for ENTER
# Print the description of the photo we are about to view
print(nasaread['explanation']) # display the value for the key explanation
print("Link to the APOD:", nasaread.get('hdurl',"No HD URL for today!"))
#input('\nPress ENTER to view this photo of the day') # pause for ENTER
# webbrowser.open(nasaread['hdurl']) # open in the webbrowser
main()
| 3.734375 | 4 |
homeworks/hw7/num2.py | NikaEgorova/goiteens-python3-egorova | 0 | 12760090 | <gh_stars>0
avtobots = {"О<NAME>": "<NAME> 379",
"Бамблбі": "Chevrolet Camaro", "Джаз": "Porsche 935 Turbo"}
for key in avtobots:
if key == "Оптімус Прайм":
print("Оптімус Прайм прибув")
| 2.828125 | 3 |
src/ui_pages.py | vincentX3/TrainingRecords | 0 | 12760091 | from PyQt5.QtWidgets import QWidget
from ui_home import Ui_home
from ui_sidebar import Ui_sidebar
from ui_history import Ui_history
from ui_analysis import Ui_analysis
from ui_settings import Ui_settings
class Home(QWidget, Ui_home):
def __init__(self):
super(Home, self).__init__()
self.setupUi(self)
class Sidebar(QWidget, Ui_sidebar):
def __init__(self):
super(Sidebar, self).__init__()
self.setupUi(self)
class History(QWidget, Ui_history):
def __init__(self):
super(History, self).__init__()
self.setupUi(self)
class Analysis(QWidget, Ui_analysis):
def __init__(self):
super(Analysis, self).__init__()
self.setupUi(self)
class Settings(QWidget, Ui_settings):
def __init__(self):
super(Settings, self).__init__()
self.setupUi(self) | 2.21875 | 2 |
test/test_delete_contact_in_group.py | Tatyana1985/python_training | 0 | 12760092 | from model.contact import Contact
from model.group import Group
import random
def test_delete_contact_in_group(app, orm):
if len(orm.get_group_list()) == 0:
app.group.create(Group(name="test"))
if len(orm.get_contact_list()) == 0:
app.contact.add_contact(Contact(firstname="asdfg", middlename="asdfg", lastname="asdfg", nickname="asdfg", title="asdfg",
company="asdfg", address="asdfg", homephone="565656", mobilephone="677565", workphone="76876687", fax="67678678",
email="<EMAIL>", email2="<EMAIL>",email3="<EMAIL>", homepage="<EMAIL>",
day="7", month="August", year="2000",
address2="trtyyt", secondaryphone="75757", notes="uiygfhjkf"))
contact = random.choice(orm.get_contact_list())
group = random.choice(orm.get_group_list())
if len(orm.get_groups_this_contacts(contact)) == 0:
app.contact.add_contact_in_group(contact, group)
groups_this_contacts_before = orm.get_groups_this_contacts(contact)
group_for_del = random.choice(groups_this_contacts_before)
app.contact.delete_contact_in_group(contact, group_for_del)
groups_this_contacts_after = orm.get_groups_this_contacts(contact)
groups_this_contacts_before.remove(group)
assert sorted(groups_this_contacts_before, key=Contact.id_or_max) == sorted(groups_this_contacts_after, key=Contact.id_or_max) | 2.5625 | 3 |
ThinkMatch/src/displacement_layer.py | hpi-sam/gnn_graph_counting_query_matching | 0 | 12760093 | import torch
import torch.nn as nn
from torch import Tensor
class Displacement(nn.Module):
r"""
Displacement Layer computes the displacement vector for each point in the source image, with its corresponding point
(or points) in target image.
The output is a displacement matrix constructed from all displacement vectors.
This metric measures the shift from source point to predicted target point, and can be applied for matching
accuracy.
Together with displacement matrix d, this function will also return a grad_mask, which helps to filter out dummy
nodes in practice.
.. math::
\mathbf{d}_i = \sum_{j \in V_2} \left( \mathbf{S}_{i, j} P_{2j} \right)- P_{1i}
Proposed by `"<NAME> al. Deep Learning of Graph Matching. CVPR 2018."
<http://openaccess.thecvf.com/content_cvpr_2018/html/Zanfir_Deep_Learning_of_CVPR_2018_paper.html>`_
"""
def __init__(self):
super(Displacement, self).__init__()
def forward(self, s: Tensor, P_src: Tensor, P_tgt: Tensor, ns_gt: Tensor=None):
r"""
:param s: :math:`(b\times n_1 \times n_2)` permutation or doubly stochastic matrix. :math:`b`: batch size.
:math:`n_1`: number of nodes in source image. :math:`n_2`: number of nodes in target image
:param P_src: :math:`(b\times n_1 \times 2)` point set on source image
:param P_tgt: :math:`(b\times n_2 \times 2)` point set on target image
:param ns_gt: :math:`(b)` number of exact pairs. We support batched instances with different number of nodes,
therefore ``ns_gt`` is required to specify the exact number of nodes of each instance in the batch.
:return: displacement matrix d,
mask for dummy nodes grad_mask. If ``ns_gt=None``, it will not be calculated and None is returned.
"""
if ns_gt is None:
max_n = s.shape[1]
P_src = P_src[:, 0:max_n, :]
grad_mask = None
else:
grad_mask = torch.zeros_like(P_src)
for b, n in enumerate(ns_gt):
grad_mask[b, 0:n] = 1
d = torch.matmul(s, P_tgt) - P_src
return d, grad_mask | 3.59375 | 4 |
tests/unit/plugins/openstack/scenarios/vm/test_vmtasks.py | ewhseo/rally | 0 | 12760094 | <reponame>ewhseo/rally<filename>tests/unit/plugins/openstack/scenarios/vm/test_vmtasks.py
# Copyright 2013: Rackspace UK
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally import exceptions
from rally.plugins.openstack.scenarios.vm import vmtasks
from tests.unit import test
class VMTasksTestCase(test.ScenarioTestCase):
def setUp(self):
super(VMTasksTestCase, self).setUp()
self.context.update({"user": {"keypair": {"name": "keypair_name"},
"credential": mock.MagicMock()}})
self.scenario = vmtasks.VMTasks(context=self.context)
self.ip = {"id": "foo_id", "ip": "foo_ip", "is_floating": True}
self.scenario._boot_server_with_fip = mock.Mock(
return_value=("foo_server", self.ip))
self.scenario._wait_for_ping = mock.Mock()
self.scenario._delete_server_with_fip = mock.Mock()
self.scenario._create_volume = mock.Mock(
return_value=mock.Mock(id="foo_volume"))
self.scenario._run_command = mock.MagicMock(
return_value=(0, "\"foo_out\"", "foo_err"))
self.scenario.add_output = mock.Mock()
def test_boot_runcommand_delete(self):
self.scenario._run_command = mock.MagicMock(
return_value=(0, "{\"foo\": 42}", "foo_err"))
self.scenario.boot_runcommand_delete(
"foo_image", "foo_flavor",
command={"script_file": "foo_script",
"interpreter": "foo_interpreter"},
username="foo_username",
password="<PASSWORD>",
use_floating_ip="use_fip",
floating_network="ext_network",
force_delete="foo_force",
volume_args={"size": 16},
foo_arg="foo_value")
self.scenario._create_volume.assert_called_once_with(
16, imageRef=None)
self.scenario._boot_server_with_fip.assert_called_once_with(
"foo_image", "foo_flavor", key_name="keypair_name",
use_floating_ip="use_fip", floating_network="ext_network",
block_device_mapping={"vdrally": "foo_volume:::1"},
foo_arg="foo_value")
self.scenario._wait_for_ping.assert_called_once_with("foo_ip")
self.scenario._run_command.assert_called_once_with(
"foo_ip", 22, "foo_username", "foo_password",
command={"script_file": "foo_script",
"interpreter": "foo_interpreter"})
self.scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete="foo_force")
self.scenario.add_output.assert_called_once_with(
additive={"title": "Command output", "chart_plugin": "Lines",
"data": [["foo", 42.0]]})
def test_boot_runcommand_delete_command(self):
self.scenario.boot_runcommand_delete(
"foo_image", "foo_flavor",
command={"remote_path": "foo"},
username="foo_username",
password="<PASSWORD>",
use_floating_ip="use_fip",
floating_network="ext_network",
force_delete="foo_force",
volume_args={"size": 16},
foo_arg="foo_value")
self.scenario._create_volume.assert_called_once_with(
16, imageRef=None)
self.scenario._boot_server_with_fip.assert_called_once_with(
"foo_image", "foo_flavor", key_name="keypair_name",
use_floating_ip="use_fip", floating_network="ext_network",
block_device_mapping={"vdrally": "foo_volume:::1"},
foo_arg="foo_value")
self.scenario._run_command.assert_called_once_with(
"foo_ip", 22, "foo_username", "<PASSWORD>",
command={"remote_path": "foo"})
self.scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete="foo_force")
def test_boot_runcommand_delete_script_fails(self):
self.scenario._run_command = mock.MagicMock(
return_value=(1, "\"foo_out\"", "foo_err"))
self.assertRaises(exceptions.ScriptError,
self.scenario.boot_runcommand_delete,
"foo_image", "foo_flavor", "foo_interpreter",
"foo_script", "foo_username")
self.scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete=False)
self.assertFalse(self.scenario.add_output.called)
def test_boot_runcommand_delete_command_timeouts(self):
self.scenario._run_command.side_effect = exceptions.SSHTimeout()
self.assertRaises(exceptions.SSHTimeout,
self.scenario.boot_runcommand_delete,
"foo_image", "foo_flavor", "foo_interpreter",
"foo_script", "foo_username")
self.scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete=False)
self.assertFalse(self.scenario.add_output.called)
def test_boot_runcommand_delete_ping_wait_timeouts(self):
self.scenario._wait_for_ping.side_effect = exceptions.TimeoutException(
resource_type="foo_resource",
resource_name="foo_name",
resource_id="foo_id",
desired_status="foo_desired_status",
resource_status="foo_resource_status")
exc = self.assertRaises(exceptions.TimeoutException,
self.scenario.boot_runcommand_delete,
"foo_image", "foo_flavor", "foo_interpreter",
"foo_script", "foo_username",
wait_for_ping=True)
self.assertEqual(exc.kwargs["resource_type"], "foo_resource")
self.assertEqual(exc.kwargs["resource_name"], "foo_name")
self.assertEqual(exc.kwargs["resource_id"], "foo_id")
self.assertEqual(exc.kwargs["desired_status"], "foo_desired_status")
self.assertEqual(exc.kwargs["resource_status"], "foo_resource_status")
self.scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete=False)
self.assertFalse(self.scenario.add_output.called)
@mock.patch("rally.plugins.openstack.scenarios.vm.vmtasks.json")
def test_boot_runcommand_delete_json_fails(self, mock_json):
mock_json.loads.side_effect = ValueError()
self.assertRaises(exceptions.ScriptError,
self.scenario.boot_runcommand_delete,
"foo_image", "foo_flavor", "foo_interpreter",
"foo_script", "foo_username")
self.scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete=False)
self.assertFalse(self.scenario.add_output.called)
def test_boot_runcommand_delete_custom_image(self):
context = {
"user": {
"tenant_id": "tenant_id",
"credential": mock.Mock()
},
"tenant": {
"custom_image": {"id": "image_id"}
}
}
scenario = vmtasks.VMTasks(context)
scenario.boot_runcommand_delete = mock.Mock()
scenario.boot_runcommand_delete_custom_image(
flavor="flavor_id",
command={
"script_file": "foo_script",
"interpreter": "bar_interpreter"},
username="username")
scenario.boot_runcommand_delete.assert_called_once_with(
image="image_id", flavor="flavor_id", username="username",
command={
"script_file": "foo_script",
"interpreter": "bar_interpreter"}
)
@mock.patch("rally.plugins.openstack.scenarios.vm.vmtasks.heat")
@mock.patch("rally.plugins.openstack.scenarios.vm.vmtasks.sshutils")
def test_runcommand_heat(self, mock_sshutils, mock_heat):
fake_ssh = mock.Mock()
fake_ssh.execute.return_value = [0, "key:val", ""]
mock_sshutils.SSH.return_value = fake_ssh
fake_stack = mock.Mock()
fake_stack.stack.outputs = [{"output_key": "gate_node",
"output_value": "ok"}]
mock_heat.main.Stack.return_value = fake_stack
context = {
"user": {"keypair": {"name": "name", "private": "pk"},
"credential": "ok"},
"tenant": {"networks": [{"router_id": "1"}]}
}
scenario = vmtasks.VMTasks(context)
scenario.generate_random_name = mock.Mock(return_value="name")
scenario.add_output = mock.Mock()
workload = {"username": "admin",
"resource": ["foo", "bar"]}
scenario.runcommand_heat(workload, "template",
{"file_key": "file_value"},
{"param_key": "param_value"})
expected = {"chart_plugin": "Table",
"data": {"rows": [["key", "val"]],
"cols": ["key", "value"]},
"description": "Data generated by workload",
"title": "Workload summary"}
scenario.add_output.assert_called_once_with(complete=expected)
| 1.914063 | 2 |
store_app/views.py | nastinsk/django-models | 0 | 12760095 | from django.views.generic import ListView, DetailView
from .models import Product
class HomePageView(ListView):
template_name = 'home.html'
model = Product
context_object_name = 'products'
class ProductDetailView(DetailView):
template_name = 'product_detail.html'
model = Product
| 1.890625 | 2 |
kaplot/astro/utils.py | maartenbreddels/kaplot | 0 | 12760096 | <filename>kaplot/astro/utils.py
yformat = u"%(d1)i\xB0%(d2)02i'%(d3)02.2f''"
xformat = u"%(hour)ih%(minute)02im%(second)02.2fs"
def getFormatDict(tick):
degree1 = int(tick)
degree2 = int((tick * 100 - degree1 * 100))
degree3 = ((tick * 10000 - degree1 * 10000 - degree2 * 100))
tick = (tick + 360) % 360
totalhours = float(tick * 24.0 / 360)
hours = int(totalhours)
minutes = int((totalhours*60 - hours*60))
seconds = (totalhours * 3600 - hours * 3600 - minutes * 60)
values = {"d1":degree1, "d2":degree2, "d3":degree3, "hour":hours, "minute":minutes, "second":seconds}
return values
def formatX(tick):
values = getFormatDict(tick)
return xformat % values
def formatY(tick):
values = getFormatDict(tick)
return yformat % values
| 2.765625 | 3 |
tests/services/segmentor/test_special_characters.py | MDAkramSiddiqui/vwo-python-sdk | 14 | 12760097 | # Copyright 2019-2021 Wingify Software Pvt. Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import json
import sys
from vwo.services.segmentor.segment_evaluator import SegmentEvaluator
with open("tests/data/segmentor_test_cases.json") as json_file:
segmentor_test_cases = json.load(json_file)
class TestSpecialCharacters(unittest.TestCase):
def setUp(self):
self.segment_evaluator = SegmentEvaluator()
self.test_cases = segmentor_test_cases.get("special_characters")
def test_test_special_character_pound(self):
test_case = self.test_cases.get("test_special_character_pound")
if sys.version_info[0] < 3:
test_case["custom_variables"]["eq"] = test_case["custom_variables"]["eq"].encode("utf-8")
self.assertIs(
self.segment_evaluator.evaluate(test_case.get("dsl"), test_case.get("custom_variables")),
test_case.get("expectation"),
)
| 2.515625 | 3 |
package/Network.py | MehdiSaffar/cmpe487-term-project | 0 | 12760098 | <filename>package/Network.py
import asyncio as aio
import socket
import janus
import fcntl
import json
import struct
import sys
import netifaces
from .constants import *
from .utils import sock_recvfrom, sock_sendto
class Network():
def __init__(self):
self.is_ready = False
# Cannot initialize queues here since they depend on asyncio loop to be running
self.recv_q = None
self.tcp_send_q = None
self.udp_send_q = None
self.udp_port = UDP_PORT
self.tcp_port = TCP_PORT
self.get_interface_info(sys.argv[1])
print('self.ip', self.ip)
print('self.broadcast_ip', self.broadcast_ip)
def get_interface_info(self, interface_name):
interface = netifaces.ifaddresses(interface_name)[netifaces.AF_INET][0]
self.ip = interface['addr']
self.broadcast_ip = interface['broadcast']
async def _udp_recv_loop(self, addr):
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.setblocking(False)
sock.bind(addr)
while True:
line, addr = await sock_recvfrom(self.loop, sock, 1000)
print(addr)
if addr[0] == self.ip:
continue
line = line.decode('utf-8').strip()
print('_udp_recv_loop', addr, line)
await self.recv_q.async_q.put(('udp', (addr[0], self.udp_port), line))
except Exception as e:
print('_udp_recv_loop', e)
async def _tcp_recv_loop(self, addr):
async def read_all(sock: socket.socket):
line = bytearray()
while True:
buffer = await self.loop.sock_recv(sock, 1024)
if not buffer:
return line
line += buffer
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.setblocking(False)
sock.bind(addr)
sock.listen()
while True:
client, addr = await self.loop.sock_accept(sock)
with client:
line = await read_all(client)
line = line.decode('utf-8').strip()
await self.recv_q.async_q.put(('tcp', (addr[0], self.tcp_port), line))
except Exception as e:
print('_tcp_recv_loop', e)
async def _tcp_send_loop(self):
try:
while True:
addr, data = await self.tcp_send_q.async_q.get()
#print('tcp_send', addr, data)
await self._tcp_send(addr, data)
self.tcp_send_q.async_q.task_done()
except Exception as e:
print('_tcp_send_loop', e)
async def _udp_send_loop(self):
try:
while True:
addr, data = await self.udp_send_q.async_q.get()
print('udp_send', addr, data)
await self._udp_send(addr, data)
self.udp_send_q.async_q.task_done()
except Exception as e:
print('_udp_send_loop', e)
async def _tcp_send(self, addr, data):
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.setblocking(False)
await self.loop.sock_connect(sock, addr)
await self.loop.sock_sendall(sock, json.dumps(data).encode('utf8'))
except Exception as e:
print('_tcp_send', e)
async def _udp_send(self, addr, data):
try:
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.setblocking(False)
if addr[0] == '<broadcast>':
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
_addr = (self.broadcast_ip if addr[0] == '<broadcast>' else addr[0], addr[1])
await sock_sendto(self.loop, sock, json.dumps(data).encode('utf8'), _addr)
except Exception as e:
print('_udp_send', e)
def send(self, packet):
type, ip, data = packet
if type == 'udp':
self.udp_send_q.sync_q.put(((ip, self.udp_port), data))
elif type == 'tcp':
self.tcp_send_q.sync_q.put(((ip, self.tcp_port), data))
else:
raise Exception(f'Unknown type {type}')
async def main(self):
self.recv_q = janus.Queue()
self.udp_send_q = janus.Queue()
self.tcp_send_q = janus.Queue()
self.loop = aio.get_running_loop()
udp_listen_loop_task = aio.create_task(
self._udp_recv_loop(('', self.udp_port)))
tcp_listen_loop_task = aio.create_task(
self._tcp_recv_loop(('', self.tcp_port)))
tcp_send_loop_task = aio.create_task(self._tcp_send_loop())
udp_send_loop_task = aio.create_task(self._udp_send_loop())
self.is_ready = True
await aio.wait([udp_listen_loop_task, tcp_listen_loop_task, tcp_send_loop_task, udp_send_loop_task])
| 2.65625 | 3 |
jsonpath_ng/bin/jsonpath.py | transfluxus/jsonpath-ng | 339 | 12760099 | <gh_stars>100-1000
#!/usr/bin/python
# encoding: utf-8
# Copyright © 2012 <NAME> <<EMAIL>>
# This work is free. You can redistribute it and/or modify it under the
# terms of the Do What The Fuck You Want To Public License, Version 2,
# as published by Sam Hocevar. See the COPYING file for more details.
# Use modern Python
from __future__ import unicode_literals, print_function, absolute_import
# Standard Library imports
import json
import sys
import glob
import argparse
# JsonPath-RW imports
from jsonpath_ng import parse
def find_matches_for_file(expr, f):
return expr.find(json.load(f))
def print_matches(matches):
print('\n'.join(['{0}'.format(match.value) for match in matches]))
def main(*argv):
parser = argparse.ArgumentParser(
description='Search JSON files (or stdin) according to a JSONPath expression.',
formatter_class=argparse.RawTextHelpFormatter,
epilog="""
Quick JSONPath reference (see more at https://github.com/kennknowles/python-jsonpath-rw)
atomics:
$ - root object
`this` - current object
operators:
path1.path2 - same as xpath /
path1|path2 - union
path1..path2 - somewhere in between
fields:
fieldname - field with name
* - any field
[_start_?:_end_?] - array slice
[*] - any array index
""")
parser.add_argument('expression', help='A JSONPath expression.')
parser.add_argument('files', metavar='file', nargs='*', help='Files to search (if none, searches stdin)')
args = parser.parse_args(argv[1:])
expr = parse(args.expression)
glob_patterns = args.files
if len(glob_patterns) == 0:
# stdin mode
print_matches(find_matches_for_file(expr, sys.stdin))
else:
# file paths mode
for pattern in glob_patterns:
for filename in glob.glob(pattern):
with open(filename) as f:
print_matches(find_matches_for_file(expr, f))
def entry_point():
main(*sys.argv)
| 2.9375 | 3 |
catalog/views.py | l-a-motta/talehub | 0 | 12760100 | # Django imports
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, Http404
from django.urls import reverse
# Internal imports
from .models import Book, Chapter
# External imports
from django.utils import timezone
# * Function to list the five latest books
def index(request):
# We get a list (maximum of 10 books as of this version) with all books that have
# been published before now, ordered by their publishing time
latest_books_list = Book.objects.filter(published_at__lte=timezone.now()).order_by('-published_at')[:10]
context = {'latest_books_list': latest_books_list}
return render(request, 'catalog/index.html', context)
# * Function to show all details of a specific book, including a list of chapters
def details(request, book_id):
try:
# The book needs the right ID, and to be published before now().
book = Book.objects.get(pk=book_id, published_at__lte=timezone.now())
# The list of chapters already come from the book's chapter_set, so it only needs
# to filter the chapters based on publishing time
chapters = book.chapter_set.filter(published_at__lte=timezone.now())
except (Book.DoesNotExist) as e:
raise Http404("Error: ", e)
context = {'book': book, 'chapters': chapters}
return render(request, 'catalog/details.html', context)
# * Function to view a specific chapter
def chapter(request, book_id, chapter_id):
try:
# The book needs the right ID, and to be published before now().
book = Book.objects.get(pk=book_id, published_at__lte=timezone.now())
# This is just one chapter so it needs the ID to differentiate from the others in the
# book's chapter_set, and also the publishing time check
chapter = book.chapter_set.get(pk=chapter_id, published_at__lte=timezone.now())
except (Book.DoesNotExist, Chapter.DoesNotExist) as e:
raise Http404("Error: ", e)
context = {'chapter': chapter}
return render(request, 'catalog/chapter.html', context)
# * Function to vote in a specific chapter
def vote(request, book_id, chapter_id):
try:
# The book needs the right ID, and to be published before now().
book = Book.objects.get(pk=book_id, published_at__lte=timezone.now())
# This is just one chapter so it needs the ID to differentiate from the others in the
# book's chapter_set, and also the publishing time check
chapter = book.chapter_set.get(pk=chapter_id, published_at__lte=timezone.now())
except (Book.DoesNotExist, Chapter.DoesNotExist) as e:
raise Http404("Error: ", e)
try:
selected_choice = request.POST['choice'] # Gets the choice from the POST data
except (KeyError):
# Redisplay the question voting form because there was no vote (KeyError from lack of vote in POST).
context = {'chapter': chapter, 'error_message': "You didn't vote."}
return render(request, 'catalog/chapter.html', context)
else:
# Check for the selected vote, 1 for positive and 0 for negative vote
if selected_choice == "1":
chapter.score += 1
elif selected_choice == "0":
chapter.score -= 1
chapter.save() # TODO: Look into F(), at Django docs. Better performance
# ! Always return an HttpResponseRedirect after successfully dealing with POST data.
# This prevents data from being posted twice if a user hits the Back button.
return HttpResponseRedirect(reverse('catalog:details', args=(book.id,))) | 2.625 | 3 |
01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/01-Gabarito/090.py | moacirsouza/nadas | 1 | 12760101 | print("""
090) Faça um programa que leia nome e média de um aluno, guardando também
a situação em um dicionário. No final, mostre o conteúdo a estrutura na tela.
""")
nome = input('Informe o nome do(a) aluno(a): ').strip()
media = float(input(f'Informe e a média de {nome}: ').strip())
erro = 0
if 7 <= media <= 10:
situacao = 'Aprovado'
elif 5 <= media <= 6.9:
situacao = 'Em recuperação'
elif 0 <= media < 5:
situacao = 'Reprovado'
else:
erro = 1
situacao = 'Erro ao informar dados. Procure a Adminstração da Escola.'
if erro == 1:
print(situacao)
else:
aluno = {'Nome': nome, 'Média': media, 'Situação': situacao }
print('-'*30)
for chave, valor in aluno.items():
print(f'{chave}: {valor}')
print('-'*30)
| 4.0625 | 4 |
CondTools/BeamSpot/test/BeamSpotOnlineRecordsWriter_cfg.py | malbouis/cmssw | 852 | 12760102 | <reponame>malbouis/cmssw
import FWCore.ParameterSet.Config as cms
import FWCore.ParameterSet.VarParsing as VarParsing
process = cms.Process("write2DB")
options = VarParsing.VarParsing()
options.register('unitTest',
False, # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.bool, # string, int, or float
"are we running the unit test?")
options.register('inputFile',
"BeamFitResults_Run306171.txt", # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"location of the input data")
options.register('inputTag',
"myTagName", # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"output tag name")
options.register('inputRecord',
"BeamSpotOnlineLegacyObjectsRcd", # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.string, # string, int, or float
"type of record")
options.register('startRun',
306171, # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.int, # string, int, or float
"location of the input data")
options.register('startLumi',
497, # default value
VarParsing.VarParsing.multiplicity.singleton, # singleton or list
VarParsing.VarParsing.varType.int, # string, int, or float
"IOV Start Lumi")
options.parseArguments()
process.load("FWCore.MessageLogger.MessageLogger_cfi")
from CondCore.CondDB.CondDB_cfi import *
if options.unitTest :
if options.inputRecord == "BeamSpotOnlineLegacyObjectsRcd" :
tag_name = 'BSLegacy_tag'
else:
tag_name = 'BSHLT_tag'
else:
tag_name = options.inputTag
#################################
# Produce a SQLITE FILE
#################################
CondDBBeamSpotObjects = CondDB.clone(connect = cms.string('sqlite_file:test_%s.db' % tag_name)) # choose an output name
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
CondDBBeamSpotObjects,
timetype = cms.untracked.string('lumiid'), #('lumiid'), #('runnumber')
toPut = cms.VPSet(cms.PSet(record = cms.string(options.inputRecord), # BeamSpotOnline record
tag = cms.string(tag_name))), # choose your favourite tag
loadBlobStreamer = cms.untracked.bool(False)
)
process.source = cms.Source("EmptySource")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(1))
process.beamspotonlinewriter = cms.EDAnalyzer("BeamSpotOnlineRecordsWriter",
isHLT = cms.bool((options.inputRecord == "BeamSpotOnlineHLTObjectsRcd")),
InputFileName = cms.untracked.string(options.inputFile), # choose your input file
)
if(options.startRun>0 and options.startLumi>0):
process.beamspotonlinewriter.IOVStartRun = cms.untracked.uint32(options.startRun) # Customize your Run
process.beamspotonlinewriter.IOVStartLumi = cms.untracked.uint32(options.startLumi) # Customize your Lumi
process.p = cms.Path(process.beamspotonlinewriter)
| 2.0625 | 2 |
monitoring/prober/scd/test_operation_references_error_cases_v0_3_17.py | Orbitalize/InterUSS-Platform | 58 | 12760103 | <filename>monitoring/prober/scd/test_operation_references_error_cases_v0_3_17.py<gh_stars>10-100
"""Operation References corner cases error tests:
"""
import datetime
import json
import uuid
import yaml
from monitoring.monitorlib.infrastructure import default_scope
from monitoring.monitorlib import scd
from monitoring.monitorlib.scd import SCOPE_SC
from monitoring.prober.infrastructure import for_api_versions, register_resource_type
OP_TYPE = register_resource_type(342, 'Primary operational intent')
OP_TYPE2 = register_resource_type(343, 'Conflicting operational intent')
@for_api_versions(scd.API_0_3_17)
def test_ensure_clean_workspace(ids, scd_api, scd_session):
for op_id in (ids(OP_TYPE), ids(OP_TYPE2)):
resp = scd_session.get('/operational_intent_references/{}'.format(op_id), scope=SCOPE_SC)
if resp.status_code == 200:
resp = scd_session.delete('/operational_intent_references/{}'.format(op_id), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
elif resp.status_code == 404:
# As expected.
pass
else:
assert False, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_ref_area_too_large(scd_api, scd_session):
with open('./scd/resources/op_ref_area_too_large_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operational_intent_references/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_ref_start_end_times_past(scd_api, scd_session):
with open('./scd/resources/op_ref_start_end_times_past_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operational_intent_references/query', json=req)
# It is ok (and useful) to query for past Operations that may not yet have
# been explicitly deleted. This is unlike remote ID where ISAs are
# auto-removed from the perspective of the client immediately after their end
# time.
assert resp.status_code == 200, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_ref_incorrect_units(scd_api, scd_session):
with open('./scd/resources/op_ref_incorrect_units_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operational_intent_references/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_ref_incorrect_altitude_ref(scd_api, scd_session):
with open('./scd/resources/op_ref_incorrect_altitude_ref_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.post('/operational_intent_references/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_uss_base_url_non_tls(ids, scd_api, scd_session):
with open('./scd/resources/op_uss_base_url_non_tls_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_bad_subscription_id(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_subscription_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_bad_subscription_id_random(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_subscription_v15.json', 'r') as f:
req = json.load(f)
req['subscription_id'] = uuid.uuid4().hex
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_new_and_existing_subscription(ids, scd_api, scd_session):
with open('./scd/resources/op_new_and_existing_subscription_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_end_time_past(ids, scd_api, scd_session):
with open('./scd/resources/op_end_time_past_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_already_exists(ids, scd_api, scd_session):
with open('./scd/resources/op_request_1_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 200, resp.content
ovn = resp.json()['operational_intent_reference']['ovn']
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 409, resp.content
# Delete operation
resp = scd_session.delete('/operational_intent_references/{}/{}'.format(ids(OP_TYPE), ovn))
assert resp.status_code == 200, resp.content
# Verify deletion
resp = scd_session.get('/operational_intent_references/{}'.format(ids(OP_TYPE)))
assert resp.status_code == 404, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_400_version1(ids, scd_api, scd_session):
with open('./scd/resources/op_400_version1_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_bad_state_version0(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_state_version0_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_bad_lat_lon_range(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_lat_lon_range_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_area_too_large_put(ids, scd_api, scd_session):
with open('./scd/resources/op_area_too_large_put_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_bad_time_format(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_time_format_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_bad_volume(ids, scd_api, scd_session):
with open('./scd/resources/op_bad_volume_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_repeated_requests(ids, scd_api, scd_session):
with open('./scd/resources/op_request_1_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 200, resp.content
ovn = resp.json()['operational_intent_reference']['ovn']
print(resp.json()['operational_intent_reference']['ovn'])
assert 'operational_intent_reference' in resp.json(), resp.content
assert 'ovn' in resp.json()['operational_intent_reference'], resp.content
ovn = resp.json()['operational_intent_reference']['ovn']
with open('./scd/resources/op_request_1.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 409, resp.content
# Delete operation
resp = scd_session.delete('/operational_intent_references/{}/{}'.format(ids(OP_TYPE), ovn))
assert resp.status_code == 200, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_op_invalid_id(scd_api, scd_session):
with open('./scd/resources/op_request_1_v15.json', 'r') as f:
req = json.load(f)
resp = scd_session.put('/operational_intent_references/not_uuid_format', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_missing_conflicted_operation(ids, scd_api, scd_session):
# Emplace the initial version of Operation 1
with open('./scd/resources/op_missing_initial.yaml', 'r') as f:
req = yaml.full_load(f)
dt = datetime.datetime.utcnow() - scd.start_of(req['extents'])
req['extents'] = scd.offset_time(req['extents'], dt)
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE)), json=req)
assert resp.status_code == 200, resp.content
ovn1a = resp.json()['operational_intent_reference']['ovn']
sub_id = resp.json()['operational_intent_reference']['subscription_id']
# Emplace the pre-existing Operation that conflicted in the original observation
with open('./scd/resources/op_missing_preexisting_unknown.yaml', 'r') as f:
req = yaml.full_load(f)
req['extents'] = scd.offset_time(req['extents'], dt)
req['key'] = [ovn1a]
resp = scd_session.put('/operational_intent_references/{}'.format(ids(OP_TYPE2)), json=req)
assert resp.status_code == 200, resp.content
# Attempt to update Operation 1 without OVN for the pre-existing Operation
with open('./scd/resources/op_missing_update.json', 'r') as f:
req = json.load(f)
req['extents'] = scd.offset_time(req['extents'], dt)
req['key'] = [ovn1a]
req['subscription_id'] = sub_id
resp = scd_session.put('/operational_intent_references/{}/{}'.format(ids(OP_TYPE), ovn1a), json=req)
assert resp.status_code == 409, resp.content
# checking entity conflicts
conflicts = []
data = resp.json()
assert 'missing_operational_intents' in data
assert ids(OP_TYPE2) in [intent['id'] for intent in data['missing_operational_intents']], resp.content
# Perform an area-based query on the area occupied by Operation 1
with open('./scd/resources/op_missing_query.json', 'r') as f:
req = json.load(f)
req['area_of_interest'] = scd.offset_time([req['area_of_interest']], dt)[0]
resp = scd_session.post('/operational_intent_references/query', json=req)
assert resp.status_code == 200, resp.content
ops = [op['id'] for op in resp.json()['operational_intent_references']]
assert ids(OP_TYPE) in ops, resp.content
# ids(OP_ID2) not expected here because its ceiling is <575m whereas query floor is
# >591m.
assert ids(OP_TYPE2) not in ops, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_big_operation_search(scd_api, scd_session):
with open('./scd/resources/op_big_operation.json', 'r') as f:
req = json.load(f)
dt = datetime.datetime.utcnow() - scd.start_of([req['area_of_interest']])
req['area_of_interest'] = scd.offset_time([req['area_of_interest']], dt)[0]
resp = scd_session.post('/operational_intent_references/query', json=req)
assert resp.status_code == 400, resp.content
@for_api_versions(scd.API_0_3_17)
@default_scope(SCOPE_SC)
def test_clean_up(ids, scd_api, scd_session):
for op_id in (ids(OP_TYPE), ids(OP_TYPE2)):
resp = scd_session.get('/operational_intent_references/{}'.format(op_id), scope=SCOPE_SC)
if resp.status_code == 200:
# only the owner of the subscription can delete a operation reference.
assert 'operational_intent_reference' in resp.json(), resp.content
assert 'ovn' in resp.json()['operational_intent_reference'], resp.content
ovn = resp.json()['operational_intent_reference']['ovn']
resp = scd_session.delete('/operational_intent_references/{}/{}'.format(op_id, ovn), scope=SCOPE_SC)
assert resp.status_code == 200, resp.content
elif resp.status_code == 404:
# As expected.
pass
else:
assert False, resp.content
| 2.015625 | 2 |
run.py | gems-uff/sms | 0 | 12760104 | from app import create_app
from config import Config
app = create_app(Config)
| 1.351563 | 1 |
fmtcvt/core.py | skmatz/fmtcvt | 0 | 12760105 | <reponame>skmatz/fmtcvt
import json
import os.path
import yaml
def convert(in_path: str, out_path: str):
_, in_ext = os.path.splitext(in_path)
_, out_ext = os.path.splitext(out_path)
if in_ext == out_ext:
raise ValueError(
"in file path and out file path must have different extensions"
)
if in_ext == ".json":
with open(in_path, mode="r") as f:
data = json.load(f)
elif in_ext in ".yaml":
with open(in_path, mode="r") as f:
data = yaml.load(f, Loader=yaml.SafeLoader)
if out_ext == ".json":
with open(out_path, mode="w") as f:
json.dump(data, f)
elif out_ext in ".yaml":
with open(out_path, mode="w") as f:
yaml.dump(data, f)
| 2.828125 | 3 |
sample/app/views.py | naviat/CruiseControl-Micro-service-deployment | 86 | 12760106 | <filename>sample/app/views.py
from app import app
from flask import render_template
from flask import send_from_directory
import json
from json import dumps
from os.path import join
from flask import make_response, request, redirect, url_for
import awslib
import os
bucket_name = os.environ.get('IPLIST_CONFIG_BUCKET')
s3path = os.environ.get('IPLIST_CONFIG_PATH')
nohttps = os.environ.get('NOHTTPS')
path = join('iplist_config', 'config.json')
if s3path == None:
print "No Env Labeled IPLIST_CONFIG_PATH"
elif bucket_name == None:
print "No bucket name specified"
else:
awslib._get_file(bucket_name, s3path, path)
@app.route('/')
def handle_index():
redir = None
if nohttps == None:
proto = request.headers.get("X-Forwarded-Proto")
if not proto == "https":
redir = _check_ssl(request.url)
if not redir == None:
return redir
with open(path) as json_data:
data = json.load(json_data)
return render_template("index.html", apps=[app['name'] for app in data['apps']])
@app.route('/healthcheck')
def handle_healthcheck():
return "I'm still here."
@app.route('/<appname>')
def handle_app(appname):
with open(path) as json_data:
data = json.load(json_data)
verbose = False
chosen_region = None
ret = {}
query_string = request.query_string
if not query_string == "":
for query in query_string.split('&'):
if "verbose" in query.lower():
if query.endswith("1"):
verbose = True
elif "region" in query.lower():
chosen_region = query[7:]
if verbose:
print request.url
redir = None
if nohttps == None:
proto = request.headers.get("X-Forwarded-Proto")
if not proto == "https":
redir = _check_ssl(request.url, verbose)
if not redir == None:
return redir
for app in data['apps']:
if appname.lower() == app['name'].lower():
app_config = app['config']
for config in app_config:
if config.get('s3filepath'):
datapath = config.get('localpath')
awslib._get_file(bucket_name, config['s3filepath'], datapath)
with open(datapath) as filedata:
output = json.load(filedata)
return jsonify(**output)
elif config.get('R53'):
ret = {}
for item in config['R53']:
ret[item['Name']] = {}
ret[item['Name']]['all_ips'] = []
ret[item['Name']]['all_ips'] = awslib._get_records_from_zone(item['HostedZoneId'], item['Pattern'], item['Domain'])
return jsonify(**ret)
dnsname = config['dnsname']
bs_app = config['beanstalk_app_name']
region = config['region']
if not chosen_region == None:
if not region == chosen_region:
continue
exclusions = config['exclusions']
eip_check = config.get('show_eip')
lb_check = config.get('show_lb_ip')
inst_check = config.get('show_inst_ip')
if ret.get(region) == None:
ret[region] = {}
lb_name = awslib._active_balancer(dnsname, region)
if ret[region].get('all_ips') == None:
ret[region]['all_ips'] = []
if not eip_check == None:
eips = awslib._list_eips(region, filter=exclusions)
if verbose:
if ret[region].get('eips') == None:
ret[region]['eips'] = eips
else:
ret[region]['eips'].extend(eips)
if eip_check:
ret[region]['all_ips'].extend(eips)
if not lb_check == None:
lb_url = awslib._environment_descr(bs_app, lb_name, region)
elb = awslib._balancer_ip(lb_url)
if verbose:
if ret[region].get('elb') == None:
ret[region]['elb'] = elb
else:
ret[region]['elb'].extend(elb)
if lb_check:
ret[region]['all_ips'].extend(elb)
if not inst_check == None:
inst_ips = awslib._instance_ip(lb_name, region)
if verbose:
if ret[region].get('instance_ips') == None:
ret[region]['instance_ips'] = inst_ips
else:
ret[region]['instance_ips'].extend(inst_ips)
if inst_check:
ret[region]['all_ips'].extend(inst_ips)
if not ret:
return redirect(url_for('handle_index'), code=302)
else:
return jsonify(**ret)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.ico', mimetype='image/vnd.microsoft.icon')
def jsonify(status=200, indent=4, sort_keys=False, **kwargs):
response = make_response(dumps(dict(**kwargs), indent=indent, sort_keys=sort_keys))
response.headers['Content-Type'] = 'application/json; charset=utf-8'
response.headers['mimetype'] = 'application/json'
response_code = status
return response
def _check_ssl(url, verbose=False):
if verbose:
print "Current scheme: %s" % url[:5]
if url[:5] == "https":
return None
else:
return redirect("https" + url[4:], code=302)
| 2.53125 | 3 |
src/agreement/predicate_mention.py | hitzkrieg/Okr-Test | 0 | 12760107 | <gh_stars>0
"""
Author: <NAME> and <NAME>
Receives two annotated graphs and computes the agreement on the predicate mentions.
We average the accuracy of the two annotators, each computed while taking the other as a gold reference.
"""
import sys
sys.path.append('../common')
from mention_common import *
from constants import NULL_VALUE
from filter_propositions import filter_verbal, filter_non_verbal
from collections import defaultdict
def compute_predicate_mention_agreement(graph1, graph2):
"""
Compute predicate mention agreement on two graphs
:param graph1: the first annotator's graph
:param graph2: the second annotator's graph
:return predicate mention accuracy and the consensual graphs
"""
# Get the consensual mentions and the mentions in each graph
consensual_mentions, graph1_prop_mentions, graph2_prop_mentions = extract_consensual_mentions(graph1, graph2)
# Compute the accuracy, each time taking one annotator as the gold
accuracy1 = len(consensual_mentions) * 1.0 / len(graph1_prop_mentions) if len(graph1_prop_mentions) > 0 else 0.0
accuracy2 = len(consensual_mentions) * 1.0 / len(graph2_prop_mentions) if len(graph1_prop_mentions) > 0 else 0.0
prop_mention_acc = (accuracy1 + accuracy2) / 2
consensual_graph1 = filter_mentions(graph1, consensual_mentions)
consensual_graph2 = filter_mentions(graph2, consensual_mentions)
return prop_mention_acc, consensual_graph1, consensual_graph2
def compute_predicate_mention_agreement_verbal(graph1, graph2):
"""
Compute predicate mention agreement only on verbal predicates
:param graph1: the first annotator's graph
:param graph2: the second annotator's graph
:return predicate mention accuracy on verbal predicates
"""
verbal_graph1 = filter_verbal(graph1)
verbal_graph2 = filter_verbal(graph2)
accuracy, _, _ = compute_predicate_mention_agreement(verbal_graph1, verbal_graph2)
return accuracy
def compute_predicate_mention_agreement_non_verbal(graph1, graph2):
"""
Compute predicate mention agreement only on non verbal predicates
:param graph1: the first annotator's graph
:param graph2: the second annotator's graph
:return predicate mention accuracy on non verbal predicates
"""
non_verbal_graph1 = filter_non_verbal(graph1)
non_verbal_graph2 = filter_non_verbal(graph2)
accuracy, _, _ = compute_predicate_mention_agreement(non_verbal_graph1, non_verbal_graph2)
return accuracy
def filter_mentions(graph, consensual_mentions):
"""
Remove mentions that are not consensual
:param graph: the original graph
:param consensual_mentions: the mentions that both annotators agreed on
:return: the graph, containing only the consensual mentions
"""
consensual_graph = graph.clone()
for prop in consensual_graph.propositions.values():
prop.mentions = { id : mention for id, mention in prop.mentions.iteritems()
if str(mention) in consensual_mentions}
# Remove them also from the entailment graph
if prop.entailment_graph != NULL_VALUE:
prop.entailment_graph.mentions_graph = [(m1, m2) for (m1, m2) in prop.entailment_graph.mentions_graph
if m1 in consensual_mentions and m2 in consensual_mentions]
# Remove propositions with no mentions
if len(prop.mentions) == 0:
consensual_graph.propositions.pop(prop.id, None)
return consensual_graph
def extract_consensual_mentions(graph1, graph2):
"""
Receives two graphs, and returns the consensual predicate mentions, and the predicate mentions in each graph.
:param graph1: the first annotator's graph
:param graph2: the second annotator's graph
:return the consensual predicate mentions, and the predicate mentions in each graph
"""
# Get the predicate mentions in both graphs
graph1_prop_mentions = set.union(*[set(map(str, prop.mentions.values())) for prop in graph1.propositions.values()])
graph2_prop_mentions = set.union(*[set(map(str, prop.mentions.values())) for prop in graph2.propositions.values()])
# Exclude sentence that weren't annotated by both annotators
common_sentences = set([x.split('[')[0] for x in graph1_prop_mentions]).intersection(
set([x.split('[')[0] for x in graph2_prop_mentions]))
graph1_prop_mentions = set([a for a in graph1_prop_mentions if a.split('[')[0] in common_sentences])
graph2_prop_mentions = set([a for a in graph2_prop_mentions if a.split('[')[0] in common_sentences])
# Exclude ignored words
# TODO: Rachel - document ignored words
if not graph2.ignored_indices == None:
graph1_prop_mentions = set([a for a in graph1_prop_mentions if len(overlap_set(a, graph2.ignored_indices)) == 0])
if not graph1.ignored_indices == None:
graph2_prop_mentions = set([a for a in graph2_prop_mentions if len(overlap_set(a, graph1.ignored_indices)) == 0])
# Compute the accuracy, each time treating a different annotator as the gold
consensual_mentions = graph1_prop_mentions.intersection(graph2_prop_mentions)
return consensual_mentions, graph1_prop_mentions, graph2_prop_mentions
def argument_mention_to_terms(mention, sentence):
"""
Receives the argument mention and the sentence(list of tokens), and returns the string associated with the argument mention.
:param mention: the Argument mention
:param sentence: the list of tokens of string representing the sentence
"""
terms = ' '.join([sentence[int(id)] for id in str(mention).rstrip(']').split('[')[1].split(', ') ])
return terms
def analyse_predicate_mentions_individually(graph1, graph2):
"""
Receives gold and pred graphs, and prints errors in predicate extraction.
:param graph1: the gold graph
:param graph2: the predicted graph
:for now no returns
"""
# Extract the proposition mentions
graph1_prop_mentions = set.union(*[set(map(str, prop.mentions.values())) for prop in graph1.propositions.values()])
graph2_prop_mentions = set.union(*[set(map(str, prop.mentions.values())) for prop in graph2.propositions.values()])
# List of the ids of Common sentences (as string)
common_sentences = set([x.split('[')[0] for x in graph1_prop_mentions]).intersection(set([x.split('[')[0] for x in graph2_prop_mentions]))
# classify mentions into categories
consensual_mentions = graph1_prop_mentions.intersection(graph2_prop_mentions)
predicted_mentions_but_not_in_gold = graph2_prop_mentions - graph1_prop_mentions
gold_mentions_but_not_predicted = graph1_prop_mentions - graph2_prop_mentions
# Predicates ignored in current evaluation scheme (because only common sentences are currently considered)
ignored_gold_predicates = set([a for a in graph1_prop_mentions if a.split('[')[0] not in common_sentences])
ignored_pred_predicates = set([a for a in graph2_prop_mentions if a.split('[')[0] not in common_sentences])
# dict1 & dict2 (type dictionary) --> sentID: list of indices (each indices corresponds to a predicate mention)
dict1 = defaultdict(list)
dict2 = defaultdict(list)
for a in gold_mentions_but_not_predicted:
dict1[a.split('[')[0]].append(a.split('[')[1].rstrip(']').split(', '))
for a in predicted_mentions_but_not_in_gold:
dict2[a.split('[')[0]].append(a.split('[')[1].rstrip(']').split(', '))
# matches: the number of examples where the predicted proposition is not there in gold, but shares some lexical overlap with the gold Propositions
# match_pc: the precentage of overlap (in terms of number ofwords)
# thresh: Minimum lexical overlap to record and print (may be used in further analysis)
matches = 0
match_pc = 0.0
thresh = 0.0
for sentID in dict2.keys():
list1 = dict1[sentID]
list2 = dict2[sentID]
for j in list1:
overlapped = False
for i in list2:
intersect = set(i).intersection(j)
if len(intersect)!=0:
matches+=1
lexical_overal_pc = len(intersect)/len(set(i).union(j))
if(lexical_overal_pc >= thresh):
print(" \n------Example of Gold proposition which was missed by predicted (but there was some overlap) --------")
sentence = graph1.sentences[int(sentID)]
gold_prop_mention = graph1.prop_mentions_by_key[sentID+'['+', '.join(j)+']']
predicted_prop_mention = graph2.prop_mentions_by_key[sentID+'['+', '.join(i)+']']
print("Sentence: {}".format(' '.join(sentence)))
print("Gold proposition: {}. Explicit: {}".format( ' '.join([sentence[int(index)] for index in j]) + '[' + ', '.join([argument_mention_to_terms(argument_mention, sentence) for argument_mention in gold_prop_mention.argument_mentions.values()]) + ']', gold_prop_mention.is_explicit ) )
# Arguments of predicted proposition not printed because info not available in code (from prop_ex)
print("Predicted proposition: {}.".format( ' '.join([sentence[int(index)] for index in i] ) ))
overlapped = True
match_pc += lexical_overal_pc
break
if(overlapped==False):
sentence = graph1.sentences[int(sentID)]
gold_prop_mention = graph1.prop_mentions_by_key[sentID+'['+', '.join(j)+']']
print '\n-------Gold proposition which was completely missed (no overlap): ----------- '
print("Sentence: {}".format(' '.join(sentence)))
print("Gold proposition: {}. Explicit: {}".format( ' '.join([sentence[int(index)] for index in j]) + '[' + ', '.join([argument_mention_to_terms(argument_mention, sentence) for argument_mention in gold_prop_mention.argument_mentions.values()]) + ']', gold_prop_mention.is_explicit ) )
if matches!=0:
match_pc = match_pc/matches*100
print ('\nOther statistics:')
print('No of consensual mentions: {}'.format(len(consensual_mentions)))
print('No of predicted mentions not in gold: {}'.format(len(predicted_mentions_but_not_in_gold)))
print('No of gold mentions but not in predicted: {}'.format(len(gold_mentions_but_not_predicted)))
print('No of gold mentions which have been ignored from evaluation: {}'.format(len(ignored_gold_predicates)))
print('No of predicted mentions which have been ignored from evaluation: {}'.format(len(ignored_pred_predicates)))
print('No of predicted mentions which have some intersection with the unmatched gold predicates: {}'.format(matches))
print('*******************\n')
| 2.71875 | 3 |
colaboradores/migrations/0003_auto_20211119_1406.py | victorescosta/Site-django-para-cadastro-de-funcionarios | 3 | 12760108 | <reponame>victorescosta/Site-django-para-cadastro-de-funcionarios
# Generated by Django 3.2.9 on 2021-11-19 14:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('colaboradores', '0002_alter_colaborador_cpf'),
]
operations = [
migrations.CreateModel(
name='Endereco',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('rua', models.CharField(max_length=300)),
('bairro', models.CharField(max_length=100, null=True)),
('numero', models.IntegerField(null=True)),
],
),
migrations.AddField(
model_name='colaborador',
name='endereco_id',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.CASCADE, to='colaboradores.endereco'),
),
]
| 1.679688 | 2 |
main.py | Sava-Bullion/bullion_yield1_bot | 0 | 12760109 | <gh_stars>0
# Copyright (c) ChernV (@otter18), 2021.
import os
import random
from setup import bot, logger
from webhook import app
# --------------- dialog params -------------------
dialog = {
'hello': {
'in': ['привет', 'hello', 'hi', 'privet', 'hey'],
'out': ['Приветствую', 'Здравствуйте', 'Привет!']
},
'how r u': {
'in': ['как дела', 'как ты', 'how are you', 'дела', 'how is it going'],
'out': ['Хорошо', 'Отлично', 'Good. And how are u?']
},
'name': {
'in': ['зовут', 'name', 'имя'],
'out': [
'Я telegram-template-bot',
'Я бот шаблон, но ты можешь звать меня в свой проект',
'Это секрет. Используй команду /help, чтобы узнать'
]
}
}
# --------------- bot -------------------
@bot.message_handler(commands=['start'])
def say_welcome(message):
logger.info(f'</code>@{message.<EMAIL>.username}<code> ({message.chat.id}) used /start or /help')
bot.send_message(
message.chat.id,
'<b>Hello! Wellcome to <a href="https://bullion.media/">Bullion.Media.</a></b>',
parse_mode='html'
)
bot.send_message(
message.chat.id,
'<b>Type /admin to get admin login page. Type /publisher to get publisher stats page. Type /advertiser to get advertiser stats page</b>',
parse_mode='html'
)
@bot.message_handler(commands=['publisher'])
def say_welcome(message):
logger.info(f'</code>@{<EMAIL>}<code> ({message.chat.id}) used /start or /help')
bot.send_message(
message.chat.id,
'<b>Hello! Get your publisher`s stats <a href="https://admin.bullionyield.com/publisher/login">Bullion.Yield</a></b>',
parse_mode='html'
)
@bot.message_handler(commands=['advertiser'])
def say_welcome(message):
logger.info(f'</code>@{<EMAIL>}<code> ({message.chat.id}) used /start or /help')
bot.send_message(
message.chat.id,
'<b>Hello! Get your advertiser`s stats <a href="https://admin.bullionyield.com/advertiser/login">Bullion.Yield</a></b>',
parse_mode='html'
)
@bot.message_handler(commands=['admin'])
def say_welcome(message):
logger.info(f'</code>@{<EMAIL>}<code> ({message.chat.id}) used /start or /help')
bot.send_message(
message.chat.id,
'<b>Let`s ROCK them today! <a href="https://admin.bullionyield.com/admin/login">Bullion.Yield</a></b>',
parse_mode='html'
)
@bot.message_handler(func=lambda message: True)
def echo(message):
for t, resp in dialog.items():
if sum([e in message.text.lower() for e in resp['in']]):
logger.info(f'</code>@{<EMAIL>}<code> ({message.chat.id}) used {t}:\n\n%s', message.text)
bot.send_message(message.chat.id, random.choice(resp['out']))
return
logger.info(f'</code>@{<EMAIL>}<code> ({message.chat.id}) used echo:\n\n%s', message.text)
bot.send_message(message.chat.id, message.text)
if __name__ == '__main__':
if os.environ.get("IS_PRODUCTION", "False") == "True":
app.run()
else:
bot.infinity_polling()
| 2.21875 | 2 |
Python scripts/Rookie/KeywordArguments.py | shartrooper/My-python-scripts | 0 | 12760110 | <reponame>shartrooper/My-python-scripts
print('MyMy',end=' ')
print('Popsicle')
print('Balloon','Helium','Blimp',sep=' and ')
| 1.507813 | 2 |
src/exoplanet/theano_ops/kepler/kepler.py | t-brandt/exoplanet | 0 | 12760111 | # -*- coding: utf-8 -*-
__all__ = ["KeplerOp"]
import theano
import theano.tensor as tt
from theano import gof
from ..build_utils import get_cache_version, get_compile_args, get_header_dirs
class KeplerOp(gof.COp):
__props__ = ()
func_file = "./kepler.cc"
func_name = "APPLY_SPECIFIC(kepler)"
def __init__(self, **kwargs):
super(KeplerOp, self).__init__(self.func_file, self.func_name)
def c_code_cache_version(self):
return get_cache_version()
def c_headers(self, compiler):
return ["exoplanet/theano_helpers.h", "exoplanet/kepler.h"]
def c_header_dirs(self, compiler):
return get_header_dirs(eigen=False)
def c_compile_args(self, compiler):
return get_compile_args(compiler)
def make_node(self, mean_anom, eccen):
in_args = [
tt.as_tensor_variable(mean_anom),
tt.as_tensor_variable(eccen),
]
return gof.Apply(self, in_args, [in_args[0].type(), in_args[0].type()])
def infer_shape(self, node, shapes):
return shapes[0], shapes[0]
def grad(self, inputs, gradients):
M, e = inputs
sinf, cosf = self(M, e)
bM = tt.zeros_like(M)
be = tt.zeros_like(M)
# e * cos(f)
ecosf = e * cosf
# 1 - e^2
ome2 = 1 - e ** 2
# Partials
dfdM = (1 + ecosf) ** 2 / ome2 ** 1.5
dfde = (2 + ecosf) * sinf / ome2
if not isinstance(gradients[0].type, theano.gradient.DisconnectedType):
bM += gradients[0] * cosf * dfdM
be += gradients[0] * cosf * dfde
if not isinstance(gradients[1].type, theano.gradient.DisconnectedType):
bM -= gradients[1] * sinf * dfdM
be -= gradients[1] * sinf * dfde
return [bM, be]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return eval_points
return self.grad(inputs, eval_points)
| 2.078125 | 2 |
step_1.py | GreeeNic/lesson_201128 | 0 | 12760112 | <filename>step_1.py<gh_stars>0
print('hello, world!')
hol = 3
print(hol)
| 1.53125 | 2 |
feynman/S_combine_pareto.py | harshjp93/AI_Feynman_edit | 0 | 12760113 | <filename>feynman/S_combine_pareto.py
# Combines 2 pareto fromtier obtained from the separability test into a new one.
from get_pareto import Point, ParetoSet
from S_get_symbolic_expr_error import get_symbolic_expr_error
from sympy.parsing.sympy_parser import parse_expr
import numpy as np
import matplotlib.pyplot as plt
import os
from os import path
from sympy import Symbol, lambdify, N
from get_pareto import Point, ParetoSet
from S_get_expr_complexity import get_expr_complexity
def combine_pareto(input_data,PA1,PA2,idx_list_1,idx_list_2,PA,sep_type = "+"):
possible_vars = ["x%s" %i for i in np.arange(0,30,1)]
PA1 = np.array(PA1.get_pareto_points()).astype('str')
PA2 = np.array(PA2.get_pareto_points()).astype('str')
for i in range(len(PA1)):
for j in range(len(PA2)):
try:
# replace the variables from the separated parts with the variables reflecting the new combined equation
exp1 = PA1[i][2]
exp2 = PA2[j][2]
for k in range(len(idx_list_1)-1,-1,-1):
exp1 = exp1.replace(possible_vars[k],possible_vars[idx_list_1[k]])
for k in range(len(idx_list_2)-1,-1,-1):
exp2 = exp2.replace(possible_vars[k],possible_vars[idx_list_2[k]])
new_eq = "(" + exp1 + ")" + sep_type + "(" + exp2 + ")"
compl = get_expr_complexity(new_eq)
PA.add(Point(x=compl,y=get_symbolic_expr_error(input_data,new_eq),data=new_eq))
except:
continue
return PA
| 3.03125 | 3 |
lg_common/src/lg_common/interactive_messages_factory.py | carlosvquezada/lg_ros_nodes | 0 | 12760114 | <gh_stars>0
import sys
import json
import rospy
from interactivespaces_msgs.msg import GenericMessage
class InteractiveSpacesMessagesFactory:
"""
This class contains easy access to test messages
"""
def __init__(self):
self.publisher = None
self.test_one_browser_with_extension_msg = self._create_message("""
{
"description": "one browser with extension",
"duration": 100,
"name": "one browser with extension",
"resource_uri": "/director_api/scene/one_browser_with_extension/",
"slug": "one_browser_with_extension",
"windows": [
{
"activity": "browser",
"activity_config": {
"google_chrome":{
"extensions": [
"test_extension1"
]
}
},
"assets": [
"http://12172.16.58.3:8008/lg_common/webapps/example/index.html"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_one_browser_with_allowed_urls_msg = self._create_message("""
{
"description": "one browser with allowed urls",
"duration": 100,
"name": "one browser with allowed urls",
"resource_uri": "/director_api/scene/one_browser_with_allowed_urls/",
"slug": "one_browser_with_allowed_urls",
"windows": [
{
"activity": "browser",
"activity_config": {
"google_chrome":{
"allowed_urls": ["google.com", "endpoint.com"]
}
},
"assets": [
"https://maps.google.com"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_one_browser_with_two_extensions_msg = self._create_message("""
{
"description": "one browser with extension",
"duration": 100,
"name": "one browser with extension",
"resource_uri": "/director_api/scene/one_browser_with_extension/",
"slug": "one_browser_with_extension",
"windows": [
{
"activity": "browser",
"activity_config": {
"google_chrome":{
"extensions": [
"test_extension1",
"test_extension2"
]
}
},
"assets": [
"https://maps.google.com"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_one_browser_with_two_extensions_and_preloading_msg = self._create_message("""
{
"description": "one_browser_tiwh_two_extensions_and_preloading",
"duration": 100,
"name": "one_browser_tiwh_two_extensions_and_preloading",
"resource_uri": "/director_api/scene/one_browser_tiwh_two_extensions_and_preloading/",
"slug": "one_browser_tiwh_two_extensions_and_preloading",
"windows": [
{
"activity": "browser",
"activity_config": {
"preload": true,
"google_chrome":{
"extensions": [
"test_extension1",
"test_extension2"
]
}
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/window_ready_mock/test.html"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_one_browser_with_custom_cmdargs_msg = self._create_message("""
{
"description": "one browser with extension",
"duration": 100,
"name": "one browser with extension",
"resource_uri": "/director_api/scene/one_browser_with_extension/",
"slug": "one_browser_with_extension",
"windows": [
{
"activity": "browser",
"activity_config": {
"google_chrome":{
"command_line_args": [
"--disable-out-of-process-pac",
"--enable-benchmarking",
"--enable-crash-reporter"
]
}
},
"assets": [
"https://maps.google.com"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_one_browser_with_custom_user_agent_msg = self._create_message("""
{
"description": "custom user agent",
"duration": 100,
"name": "custom_user_agent",
"resource_uri": "/director_api/scene/one_browser_with_custom_user_agent/",
"slug": "one_browser_with_custom_user_agent",
"windows": [
{
"activity": "browser",
"activity_config": {
"google_chrome":{
"user_agent": "lol<PASSWORD>"
}
},
"assets": [
"https://maps.google.com"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_one_browser_with_custom_binary_msg = self._create_message("""
{
"description": "custom binary",
"duration": 100,
"name": "custom_user_agent",
"resource_uri": "/director_api/scene/one_browser_with_custom_binary/",
"slug": "one_browser_with_custom_binary",
"windows": [
{
"activity": "browser",
"activity_config": {
"google_chrome":{
"version": "beta"
}
},
"assets": [
"https://maps.google.com"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_one_browser_on_center_alt_slug_msg = self._create_message("""
{
"description": "one_browser_on_center_alt_slug",
"duration": 100,
"name": "one_browser_on_center_alt_slug",
"resource_uri": "/director_api/scene/one_browser_on_center_alt_slug/",
"slug": "one_browser_on_center_alt_slug",
"windows": [
{
"activity": "browser",
"activity_config": {
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/example/index.html"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_one_browser_on_center_msg = self._create_message("""
{
"description": "one_browser_on_center",
"duration": 100,
"name": "custom_user_agent",
"resource_uri": "/director_api/scene/one_browser_on_center/",
"slug": "one_browser_on_center",
"windows": [
{
"activity": "browser",
"activity_config": {
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/example/index.html"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_no_browsers_msg = self._create_message("""
{
"description": "no_browsers",
"duration": 100,
"name": "no_browsers",
"resource_uri": "/director_api/scene/no_browsers/",
"slug": "no_browsers",
"windows": [
]
}
""")
self.test_one_browser_with_preloading_and_wrong_url_msg = self._create_message("""
{
"description": "one_browser_with_preloading_and_wrong_url",
"duration": 100,
"name": "one_browser_with_preloading_and_wrong_url",
"resource_uri": "/director_api/scene/one_browser_with_preloading_and_wrong_url/",
"slug": "one_browser_with_preloading_and_wrong_url",
"windows": [
{
"activity": "browser",
"activity_config": {
"preload": true
},
"assets": [
"http://asdasdasdaqweqwenonexistentpage.com"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_one_browser_with_preloading_msg = self._create_message("""
{
"description": "one_browser_with_preloading",
"duration": 100,
"name": "one_browser_with_preloading",
"resource_uri": "/director_api/scene/one_browser_with_preloading/",
"slug": "one_browser_with_preloading",
"windows": [
{
"activity": "browser",
"activity_config": {
"preload": true
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/window_ready_mock/test.html"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_one_browser_with_preloading_alt_slug_msg = self._create_message("""
{
"description": "one_browser_with_preloading_alt_slug",
"duration": 100,
"name": "one_browser_with_preloading_alt_slug",
"resource_uri": "/director_api/scene/one_browser_with_preloading_alt_slug/",
"slug": "one_browser_with_preloading_alt_slug",
"windows": [
{
"activity": "browser",
"activity_config": {
"preload": true
},
"assets": [
"http://1192.168.127.12:8008/lg_common/webapps/window_ready_mock/test.html"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_two_browsers_with_preloading_mix_msg = self._create_message("""
{
"description": "one_two_browsers_with_preloading_mix",
"duration": 100,
"name": "two_browsers_with_preloading_mix",
"resource_uri": "/director_api/scene/two_browsers_with_preloading_mix/",
"slug": "two_browsers_with_preloading_mix",
"windows": [
{
"activity": "browser",
"activity_config": {
"preload": true
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/window_ready_mock/test.html"
],
"height": 100,
"presentation_viewport": "center",
"width": 100,
"x_coord": 300,
"y_coord": 300
},
{
"activity": "browser",
"activity_config": {
"preload": false
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/window_ready_mock/test.html"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_two_browsers_with_preloading_mix_alt_slug_msg = self._create_message("""
{
"description": "one_two_browsers_with_preloading_mix_alt_slug",
"duration": 100,
"name": "two_browsers_with_preloading_mix_alt_slug",
"resource_uri": "/director_api/scene/two_browsers_with_preloading_mix_alt_slug/",
"slug": "two_browsers_with_preloading_mix_alt_slug",
"windows": [
{
"activity": "browser",
"activity_config": {
"preload": true
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/window_ready_mock/test.html"
],
"height": 100,
"presentation_viewport": "center",
"width": 100,
"x_coord": 300,
"y_coord": 300
},
{
"activity": "browser",
"activity_config": {
"preload": false
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/window_ready_mock/test.html"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_four_browsers_with_preloading_mix_msg = self._create_message("""
{
"description": "four_browsers_with_preloading_mix",
"duration": 100,
"name": "four_browsers_with_preloading_mix",
"resource_uri": "/director_api/scene/four_browsers_with_preloading_mix/",
"slug": "four_browsers_with_preloading_mix",
"windows": [
{
"activity": "browser",
"activity_config": {
"preload": true
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/window_ready_mock/test.html"
],
"height": 100,
"presentation_viewport": "center",
"width": 100,
"x_coord": 300,
"y_coord": 300
},
{
"activity": "browser",
"activity_config": {
"preload": false
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/window_ready_mock/test.html"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
},
{
"activity": "browser",
"activity_config": {
"preload": true
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/window_ready_mock/test.html"
],
"height": 100,
"presentation_viewport": "left",
"width": 100,
"x_coord": 300,
"y_coord": 300
},
{
"activity": "browser",
"activity_config": {
"preload": false
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/window_ready_mock/test.html"
],
"height": 333,
"presentation_viewport": "left",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
self.test_one_browser_with_preloading_and_custom_preloading_event_msg = self._create_message("""
{
"description": "one_browser_with_preloading_and_custom_preloading_event",
"duration": 100,
"name": "one_browser_with_preloading_and_custom_preloading_event",
"resource_uri": "/director_api/scene/one_browser_with_preloading_and_custom_preloading_event/",
"slug": "one_browser_with_preloading_and_custom_preloading_event",
"windows": [
{
"activity": "browser",
"activity_config": {
"preload": true,
"custom_preload_event": true
},
"assets": [
"http://127.0.0.1:8008/lg_common/webapps/window_ready_mock/custom_event.html?use_app_event=1"
],
"height": 333,
"presentation_viewport": "center",
"width": 333,
"x_coord": 22,
"y_coord": 22
}
]
}
""")
def emit_message(self, ivar_name):
"""
emits message using instance variable name
"""
if not self.publisher:
self.publisher = rospy.Publisher(
'/director/scene', GenericMessage, queue_size=3
)
rospy.init_node("ispaces_messages_factory")
message = self._get_message(ivar_name)
print "message string: %s" % message
self.publisher.publish(message)
return True
def _create_message(self, msg_string):
message = GenericMessage()
message.type = 'json'
try:
message_json = json.loads(msg_string)
message.message = json.dumps(message_json)
return message
except ValueError:
print "Could not decode json message from InteractiveSpacesMessagesFactory"
sys.exit(1)
def _init_publisher(self):
pass
def _get_message(self, message_name):
"""
Returns message as json
"""
return getattr(self, message_name)
if __name__ == "__main__":
try:
messages = InteractiveSpacesMessagesFactory()
message_name = sys.argv[1]
print "Emitting %s message" % message_name
messages.emit_message(message_name)
except IndexError:
print ""
print "This file, if called directly, will emit an interactivespaces.msgs.GenericMessage"
print ""
print "You must provide message name to emit:\n%s" % \
'\n'.join(["- " + ivar for ivar in dir(messages) if ivar.startswith('test_')])
print ""
print "NOTE: methods beginning with 'test' are used by test suite"
print ""
sys.exit(1)
| 2.265625 | 2 |
massassi/settings/dev.py | saberworks/massassi-django | 0 | 12760115 | from .base import *
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['www.massassi.org']
CSRF_TRUSTED_ORIGINS=['https://www.massassi.org']
# SECURITY WARNING: don't allow this many fields in production
DATA_UPLOAD_MAX_NUMBER_FIELDS = 5000
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'default': {
'format': '[DJANGO] %(asctime)s %(levelname)s %(name)s.%(funcName)s:%(lineno)s: %(message)s'
},
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG', # change to debug to see queries
'handlers': ['console'],
'propagate': False,
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'default',
},
},
'root': {
'handlers': ['console'],
'level': 'DEBUG',
},
}
| 1.695313 | 2 |
code/arc042_a_01.py | KoyanagiHitoshi/AtCoder | 3 | 12760116 | <reponame>KoyanagiHitoshi/AtCoder<gh_stars>1-10
N,M=map(int,input().split())
A=[int(input()) for i in range(M)][::-1]
ans=[]
s=set()
for a in A:
if a not in s:ans.append(a)
s.add(a)
for i in range(1,N+1):
if i not in s:ans.append(i)
print(*ans,sep="\n") | 2.9375 | 3 |
jmilkfansblog/tests/test_urls.py | xiaoyh121/program | 176 | 12760117 | import unittest
from jmilkfansblog.controllers import admin
from jmilkfansblog.controllers import rest_api
from jmilkfansblog import create_app
from jmilkfansblog.models import db
class TestURLs(unittest.TestCase):
"""Unit test for route functions."""
def setUp(self):
# Destroy the Flask-Admin and Flask-Result object after delete app
# object
admin._views = []
rest_api.resource = []
app = create_app('jmilkfansblog.config.TestConfig')
self.client = app.test_client()
# Using Test app for db
db.app = app
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
if __name__ == '__main__':
unittest.main()
| 2.40625 | 2 |
st_app.py | Gooogr/DeepDreamer | 0 | 12760118 | <filename>st_app.py
import streamlit as st
import cv2
import numpy as np
from PIL import Image
import base64
from io import BytesIO
from collections import defaultdict
from main import predict
from keras import backend as K
from keras.applications import inception_v3
from utils import get_loss
DEMO_IMAGE_PATH = './imgs/flower_valley.jpg'
st.title("DeepDream interactive demo")
# Get content image from file_uploader
img_file_buffer = st.sidebar.file_uploader("Select image", type=["png", "jpg", "jpeg"])
if img_file_buffer is not None:
image = np.array(Image.open(img_file_buffer))
else:
image = np.array(Image.open(DEMO_IMAGE_PATH))
# Create mixed layers selector
layers_names = st.multiselect(
'What InceptionV3 layers we will use?',
options=['mixed{}'.format(i) for i in range(11)],
default=['mixed2', 'mixed3', 'mixed4', 'mixed5'])
# Set default values of layers coefficients
layers_coeff = defaultdict(lambda: 0.0)
layers_coeff['mixed2'] = 0.2
layers_coeff['mixed3'] = 3.
layers_coeff['mixed4'] = 2.
layers_coeff['mixed5'] = 1.5
for layer_name in layers_names:
layers_coeff[layer_name] = st.slider(layer_name, 0.0, 5.0, layers_coeff[layer_name]) # dict update automatically
# Setting up sidebar hyperparameters
st.sidebar.markdown('Additional hyperparameters. Be careful.')
num_octave = st.sidebar.slider("Octave number", min_value=2, max_value=5, value=3)
step = st.sidebar.slider("Step", min_value=0.005, max_value=0.05, value=0.01, step = 0.005, format='%f')
octave_scale = st.sidebar.slider("Octave scale", min_value=1.1, max_value=2.0, value=1.4)
iterations = st.sidebar.slider("Iterations", min_value=10, max_value=30, value=20)
max_loss = st.sidebar.slider("Maximum loss", min_value=5, max_value=20, value=10)
# Setting up model and loss
K.set_learning_phase(0)
model = inception_v3.InceptionV3(weights='imagenet',
include_top=False)
loss = get_loss(layers_coeff, model)
def get_image_download_link(img):
"""Generates a link allowing the PIL image to be downloaded
in: PIL image
out: href string
"""
buffered = BytesIO()
img.save(buffered, format="JPEG")
img_str = base64.b64encode(buffered.getvalue()).decode()
href = f'<a href="data:file/jpg;base64,{img_str}">Download result</a>'
return href
if st.button('Start to dream'):
predicted_img = predict(img_file=image,
num_octave=num_octave,
octave_scale=octave_scale,
iterations=iterations,
step=step,
max_loss=max_loss,
model=model,
loss=loss)
st.image(predicted_img)
result = Image.fromarray(predicted_img)
st.markdown(get_image_download_link(result),
unsafe_allow_html=True)
| 2.609375 | 3 |
Independence_proof.py | ErikNikolajsen/Smart-Home-Simulator-Backend | 0 | 12760119 | <reponame>ErikNikolajsen/Smart-Home-Simulator-Backend
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import requests
import json
print("Proof for the independence of the backend.\n")
response = requests.get("http://localhost:8080/api/system/ping")
print(response.text)
f = open('floorplan.json',)
floorplan = json.load(f)
f.close()
url1 = "http://localhost:8080/api/roomConfig/floorplan"
floorplanreq = requests.post(url1, json = floorplan)
print("\nsending floorplan: ")
print(floorplanreq.text)
f = open('input.json',)
inputFile = json.load(f)
f.close()
url2 = "http://localhost:8080/api/simulation/input"
inputreq = requests.post(url2, json = inputFile)
print("\nsending inputs: ")
print(inputreq.text)
f = open('simulator.json',)
simulator = json.load(f)
f.close()
url3 = "http://localhost:8080/api/simulation/simulator"
simulatorreq = requests.post(url3, json = simulator)
print("\nsending simulator info: ")
print(simulatorreq.text)
| 2.484375 | 2 |
backend/melive/urls.py | dmpe/django-wohn | 2 | 12760120 | """melive URL Configuration
See:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
import debug_toolbar
from django.conf.urls import url
from django.contrib import admin
from django.contrib.sitemaps import views
from django.contrib.sitemaps.views import sitemap
from django.urls import include, path
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import TemplateView
from graphene_django.views import GraphQLView
from core.sitemap import B40_Sitemap, UserMNG_Sitemap
sitemaps = {"core": B40_Sitemap, "userMng": UserMNG_Sitemap}
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("core.urls")),
path("", include("userMng.urls")),
path("oauth/", include("social_django.urls", namespace="social")),
url(
r"^robots.txt$", TemplateView.as_view(template_name="robots.txt", content_type="text/plain"), name="robots_file"
),
path("sitemap.xml", sitemap, {"sitemaps": sitemaps}, name="django.contrib.sitemaps.views.sitemap"),
path("__debug__/", include(debug_toolbar.urls)),
# expose graphql server api, incl. GraphQL IDE - and
# disable CSRF token requirement because for now it is PUBLIC API
# TODO it should be protected
path("graphql", csrf_exempt(GraphQLView.as_view(graphiql=True))),
]
| 2.390625 | 2 |
app/api/health.py | rbarbioni/python-flask-api | 3 | 12760121 |
from flask import Blueprint
from .constants import GET, COUNTER
RESOURCE = 'health'
PATH = f'/api/v1/{RESOURCE}'
api = Blueprint(RESOURCE, __name__, url_prefix=PATH)
@api.route('/', methods=[GET])
def get():
COUNTER.labels(GET, PATH).inc()
return {'status': 'ok'}
| 2.046875 | 2 |
client.py | maker2413/Socket-Squares | 1 | 12760122 | """
client.py
"""
# Standard library
import socket
import re
import pickle
import sys
# Third party
import pygame
# Local source
import game_functions as gf
import square
# Server port, IPv4 will be prompted
PORT = 26256
# Server data constraints
HEADER_SIZE = 16
FORMAT_TYPE = 'utf-8'
def main():
server_ip = ipPrompt()
# Create and connect client socket
client = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
client.connect((server_ip,PORT))
# Report to client that connection has been established with server
print(f"[CLIENT] You have connected to the server @ {server_ip}")
# Initialize and manage pygame settings
print("[CLIENT] Launching game window...")
pygame.init()
pygame.display.set_caption("Socket Squares")
clock = pygame.time.Clock()
# Declare pygame screen and resolution
screen = pygame.display.set_mode((800,600))
# Done
print("[CLIENT] Receiving character data...")
header_data = client.recv(HEADER_SIZE).decode(FORMAT_TYPE)
if header_data:
header_data = int(header_data)
my_square = client.recv(header_data)
my_square = pickle.loads(my_square)
my_square = square.PlayerSquare(my_square, screen)
print("[CLIENT] Character data received.")
# List of all current player squares
player_squares = [None,None,None,None,None,None,None,None]
clock.tick(60)
while True:
gf.check_events(screen, my_square)
player_squares = pickleSwap(my_square, client)
gf.update_screen(screen, my_square, player_squares)
else:
# Exit cleanly
client.close()
print("You have disconnected from the server. Now exiting...")
pygame.quit()
sys.exit()
def ipPrompt():
# Prompt user for IPv4, determine if given IPv4 is "valid" using regex. Don't continue until pass regex
temp_ipv4 = ""
regex_passed = None
while not regex_passed:
temp_ipv4 = input("\nEnter the IPv4 Address of a server to connect to: ")
regex_passed = re.search("^[0-9]{1,3}\.{1}[0-9]{1,3}\.{1}[0-9]{1,3}\.{1}[0-9]{1,3}$", temp_ipv4)
if not regex_passed:
print("Invalid IPv4. Please try again following the format: X.X.X.X")
return temp_ipv4
def printArray(given_array):
for item in given_array:
if item is not None:
pass
def pickleSwap(data, client):
# Turn coordinates of player square into a tuple, send to server and receive all square updates
alldata = pickle.dumps((data.center_x, data.center_y))
send_length = f"{len(alldata):<{HEADER_SIZE}}"
send_length = str(send_length).encode(FORMAT_TYPE)
client.send(send_length)
client.send(alldata)
# Receive player_squares list
squares = client.recv(HEADER_SIZE)
squares = int(squares)
squares = client.recv(squares)
squares = pickle.loads(squares)
printArray(squares)
return squares
main() | 3.125 | 3 |
pycardano/serialization.py | jgailbreath/pycardano | 1 | 12760123 | <gh_stars>1-10
"""Defines CBOR serialization interfaces and provides useful serialization classes."""
from __future__ import annotations
import re
from collections import OrderedDict, defaultdict
from copy import deepcopy
from dataclasses import Field, dataclass, fields
from datetime import datetime
from decimal import Decimal
from inspect import isclass
from pprint import pformat
from typing import Any, Callable, ClassVar, List, Type, TypeVar, Union, get_type_hints
from cbor2 import CBOREncoder, CBORSimpleValue, CBORTag, dumps, loads, undefined
from typeguard import check_type, typechecked
from pycardano.exception import (
DeserializeException,
InvalidArgumentException,
SerializeException,
)
__all__ = [
"default_encoder",
"IndefiniteList",
"Primitive",
"CBORBase",
"CBORSerializable",
"ArrayCBORSerializable",
"MapCBORSerializable",
"DictCBORSerializable",
"list_hook",
]
class IndefiniteList:
def __init__(self, items):
self.items = items
def __eq__(self, other):
if isinstance(other, IndefiniteList):
return self.items == other.items
else:
return False
Primitive = TypeVar(
"Primitive",
bytes,
bytearray,
str,
int,
float,
Decimal,
bool,
type(None),
tuple,
list,
IndefiniteList,
dict,
defaultdict,
OrderedDict,
type(undefined),
datetime,
re.Pattern,
CBORSimpleValue,
CBORTag,
set,
frozenset,
)
"""
A list of types that could be encoded by
`Cbor2 encoder <https://cbor2.readthedocs.io/en/latest/modules/encoder.html>`_ directly.
"""
CBORBase = TypeVar("CBORBase", bound="CBORSerializable")
def default_encoder(
encoder: CBOREncoder, value: Union[CBORSerializable, IndefiniteList]
):
"""A fallback function that encodes CBORSerializable to CBOR"""
assert isinstance(value, (CBORSerializable, IndefiniteList)), (
f"Type of input value is not CBORSerializable, " f"got {type(value)} instead."
)
if isinstance(value, IndefiniteList):
# Currently, cbor2 doesn't support indefinite list, therefore we need special
# handling here to explicitly write header (b'\x9f'), each body item, and footer (b'\xff') to
# the output bytestring.
encoder.write(b"\x9f")
for item in value.items:
encoder.encode(item)
encoder.write(b"\xff")
else:
encoder.encode(value.to_primitive())
@typechecked
class CBORSerializable:
"""
CBORSerializable standardizes the interfaces a class should implement in order for it to be serialized to and
deserialized from CBOR.
Two required interfaces to implement are :meth:`to_primitive` and :meth:`from_primitive`.
:meth:`to_primitive` converts an object to a CBOR primitive type (see :const:`Primitive`), which could be then
encoded by CBOR library. :meth:`from_primitive` restores an object from a CBOR primitive type.
To convert a CBORSerializable to CBOR, use :meth:`to_cbor`.
To restore a CBORSerializable from CBOR, use :meth:`from_cbor`.
.. note::
:meth:`to_primitive` needs to return a pure CBOR primitive type, meaning that the returned value and all its
child elements have to be CBOR primitives, which could mean a good amount of work. An alternative but simpler
approach is to implement :meth:`to_shallow_primitive` instead. `to_shallow_primitive` allows the returned object
to be either CBOR :const:`Primitive` or a :class:`CBORSerializable`, as long as the :class:`CBORSerializable`
does not refer to itself, which could cause infinite loops.
"""
def to_shallow_primitive(self) -> Primitive:
"""
Convert the instance to a CBOR primitive. If the primitive is a container, e.g. list, dict, the type of
its elements could be either a Primitive or a CBORSerializable.
Returns:
:const:`Primitive`: A CBOR primitive.
Raises:
:class:`pycardano.exception.SerializeException`: When the object could not be converted to CBOR primitive
types.
"""
raise NotImplementedError(
f"'to_shallow_primitive()' is not implemented by {self.__class__}."
)
def to_primitive(self) -> Primitive:
"""Convert the instance and its elements to CBOR primitives recursively.
Returns:
:const:`Primitive`: A CBOR primitive.
Raises:
:class:`pycardano.exception.SerializeException`: When the object or its elements could not be converted to
CBOR primitive types.
"""
result = self.to_shallow_primitive()
container_types = (
dict,
OrderedDict,
defaultdict,
set,
frozenset,
tuple,
list,
CBORTag,
IndefiniteList,
)
def _helper(value):
if isinstance(value, CBORSerializable):
return value.to_primitive()
elif isinstance(value, container_types):
return _dfs(value)
else:
return value
def _dfs(value):
if isinstance(value, (dict, OrderedDict, defaultdict)):
new_result = type(value)()
if hasattr(value, "default_factory"):
new_result.setdefault(value.default_factory)
for k, v in value.items():
new_result[_helper(k)] = _helper(v)
return new_result
elif isinstance(value, set):
return {_helper(v) for v in value}
elif isinstance(value, frozenset):
return frozenset({_helper(v) for v in value})
elif isinstance(value, tuple):
return tuple([_helper(k) for k in value])
elif isinstance(value, list):
return [_helper(k) for k in value]
elif isinstance(value, IndefiniteList):
return IndefiniteList([_helper(k) for k in value.items])
elif isinstance(value, CBORTag):
return CBORTag(value.tag, _helper(value.value))
else:
return value
return _dfs(result)
@classmethod
def from_primitive(cls: CBORBase, value: Primitive) -> CBORBase:
"""Turn a CBOR primitive to its original class type.
Args:
cls (CBORBase): The original class type.
value (:const:`Primitive`): A CBOR primitive.
Returns:
CBORBase: A CBOR serializable object.
Raises:
:class:`pycardano.exception.DeserializeException`: When the object could not be restored from primitives.
"""
raise NotImplementedError(
f"'from_primitive()' is not implemented by {cls.__name__}."
)
def to_cbor(self, encoding: str = "hex") -> Union[str, bytes]:
"""Encode a Python object into CBOR format.
Args:
encoding (str): Encoding to use. Choose from "hex" or "bytes".
Returns:
Union[str, bytes]: CBOR encoded in a hex string if encoding is hex (default) or bytes if encoding is bytes.
Examples:
>>> class Test(CBORSerializable):
... def __init__(self, number1, number2):
... self.number1 = number1
... self.number2 = number2
...
... def to_primitive(value):
... return [value.number1, value.number2]
...
... @classmethod
... def from_primitive(cls, value):
... return cls(value[0], value[1])
...
... def __repr__(self):
... return f"Test({self.number1}, {self.number2})"
>>> a = Test(1, 2)
>>> a.to_cbor()
'820102'
"""
valid_encodings = ("hex", "bytes")
# Make sure encoding is selected correctly before proceeding further.
if encoding not in ("hex", "bytes"):
raise InvalidArgumentException(
f"Invalid encoding: {encoding}. Please choose from {valid_encodings}"
)
cbor = dumps(self, default=default_encoder)
if encoding == "hex":
return cbor.hex()
else:
return cbor
@classmethod
def from_cbor(cls, payload: Union[str, bytes]) -> CBORSerializable:
"""Restore a CBORSerializable object from a CBOR.
Args:
payload (Union[str, bytes]): CBOR bytes or hex string to restore from.
Returns:
CBORSerializable: Restored CBORSerializable object.
Examples:
Basic use case:
>>> class Test(CBORSerializable):
... def __init__(self, number1, number2):
... self.number1 = number1
... self.number2 = number2
...
... def to_primitive(value):
... return [value.number1, value.number2]
...
... @classmethod
... def from_primitive(cls, value):
... return cls(value[0], value[1])
...
... def __repr__(self):
... return f"Test({self.number1}, {self.number2})"
>>> a = Test(1, 2)
>>> cbor_hex = a.to_cbor()
>>> print(Test.from_cbor(cbor_hex))
Test(1, 2)
For a CBORSerializable that has CBORSerializables as attributes, we will need to pass
each child value to the :meth:`from_primitive` method of its corresponding CBORSerializable. Example:
>>> class TestParent(CBORSerializable):
... def __init__(self, number1, test):
... self.number1 = number1
... self.test = test
...
... def to_shallow_primitive(value): # Implementing `to_shallow_primitive` simplifies the work.
... return [value.number1, value.test]
...
... @classmethod
... def from_primitive(cls, value):
... test = Test.from_primitive(value[1]) # Restore test by passing `value[1]` to
... # `Test.from_primitive`
... return cls(value[0], test)
...
... def __repr__(self):
... return f"TestParent({self.number1}, {self.test})"
>>> a = Test(1, 2)
>>> b = TestParent(3, a)
>>> b
TestParent(3, Test(1, 2))
>>> cbor_hex = b.to_cbor()
>>> cbor_hex
'8203820102'
>>> print(TestParent.from_cbor(cbor_hex))
TestParent(3, Test(1, 2))
"""
if type(payload) == str:
payload = bytes.fromhex(payload)
value = loads(payload)
return cls.from_primitive(value)
def __repr__(self):
return pformat(vars(self))
def _restore_dataclass_field(
f: Field, v: Primitive
) -> Union[Primitive, CBORSerializable]:
"""Try to restore a value back to its original type based on information given in field.
Args:
f (dataclass_field): A data class field.
v (:const:`Primitive`): A CBOR primitive.
Returns:
Union[:const:`Primitive`, CBORSerializable]: A CBOR primitive or a CBORSerializable.
"""
if "object_hook" in f.metadata:
return f.metadata["object_hook"](v)
elif isclass(f.type) and issubclass(f.type, CBORSerializable):
return f.type.from_primitive(v)
elif isclass(f.type) and issubclass(f.type, IndefiniteList):
return IndefiniteList(v)
elif hasattr(f.type, "__origin__") and f.type.__origin__ is Union:
t_args = f.type.__args__
for t in t_args:
if isclass(t) and issubclass(t, CBORSerializable):
try:
return t.from_primitive(v)
except DeserializeException:
pass
elif t in Primitive.__constraints__ and isinstance(v, t):
return v
raise DeserializeException(
f"Cannot deserialize object: \n{v}\n in any valid type from {t_args}."
)
return v
ArrayBase = TypeVar("ArrayBase", bound="ArrayCBORSerializable")
"""A generic type that is bounded by ArrayCBORSerializable."""
@dataclass(repr=False)
class ArrayCBORSerializable(CBORSerializable):
"""
A base class that can serialize its child `dataclass <https://docs.python.org/3/library/dataclasses.html>`_
into a `CBOR array <https://datatracker.ietf.org/doc/html/rfc8610#section-3.4>`_.
The class is useful when the position of each item in a list have its own semantic meaning.
Examples:
Basic usages:
>>> from dataclasses import dataclass
>>> @dataclass
... class Test1(ArrayCBORSerializable):
... a: str
... b: str=None
>>> @dataclass
... class Test2(ArrayCBORSerializable):
... c: str
... test1: Test1
>>> t = Test2(c="c", test1=Test1(a="a"))
>>> t
Test2(c='c', test1=Test1(a='a', b=None))
>>> cbor_hex = t.to_cbor()
>>> cbor_hex
'826163826161f6'
>>> Test2.from_cbor(cbor_hex) # doctest: +SKIP
Test2(c='c', test1=Test1(a='a', b=None))
A value of `None` will be encoded as nil (#7.22) in cbor. This will become a problem if the field is meant to be
optional. To exclude an optional attribute from cbor, we can use `field` constructor with a metadata field
"optional" set to True and default value set to `None`.
.. Note::
In ArrayCBORSerializable, all non-optional fields have to be declared before any optional field.
Example:
>>> from dataclasses import dataclass, field
>>> @dataclass
... class Test1(ArrayCBORSerializable):
... a: str
... b: str=field(default=None, metadata={"optional": True})
>>> @dataclass
... class Test2(ArrayCBORSerializable):
... c: str
... test1: Test1
>>> t = Test2(c="c", test1=Test1(a="a"))
>>> t
Test2(c='c', test1=Test1(a='a', b=None))
>>> t.to_primitive() # Notice below that attribute "b" is not included in converted primitive.
['c', ['a']]
>>> cbor_hex = t.to_cbor()
>>> cbor_hex
'826163816161'
>>> Test2.from_cbor(cbor_hex) # doctest: +SKIP
Test2(c='c', test1=Test1(a='a', b=None))
"""
field_sorter: ClassVar[Callable[[List], List]] = lambda x: x
def to_shallow_primitive(self) -> List[Primitive]:
"""
Returns:
:const:`Primitive`: A CBOR primitive.
Raises:
:class:`pycardano.exception.SerializeException`: When the object could not be converted to CBOR primitive
types.
"""
primitives = []
for f in self.__class__.field_sorter(fields(self)):
val = getattr(self, f.name)
if val is None and f.metadata.get("optional"):
continue
primitives.append(val)
return primitives
@classmethod
def from_primitive(cls: ArrayBase, values: List[Primitive]) -> ArrayBase:
"""Restore a primitive value to its original class type.
Args:
cls (ArrayBase): The original class type.
values (List[Primitive]): A list whose elements are CBOR primitives.
Returns:
:const:`ArrayBase`: Restored object.
Raises:
:class:`pycardano.exception.DeserializeException`: When the object could not be restored from primitives.
"""
all_fields = [f for f in fields(cls) if f.init]
if type(values) != list:
raise DeserializeException(
f"Expect input value to be a list, got a {type(values)} instead."
)
restored_vals = []
type_hints = get_type_hints(cls)
for f, v in zip(all_fields, values):
if not isclass(f.type):
f.type = type_hints[f.name]
v = _restore_dataclass_field(f, v)
restored_vals.append(v)
return cls(*restored_vals)
def __repr__(self):
return super().__repr__()
MapBase = TypeVar("MapBase", bound="MapCBORSerializable")
"""A generic type that is bounded by MapCBORSerializable."""
@dataclass(repr=False)
class MapCBORSerializable(CBORSerializable):
"""
A base class that can serialize its child `dataclass <https://docs.python.org/3/library/dataclasses.html>`_
into a `CBOR Map <https://datatracker.ietf.org/doc/html/rfc8610#section-3.5.1>`_.
The class is useful when each key in a map have its own semantic meaning.
Examples:
Basic usage:
>>> from dataclasses import dataclass
>>> @dataclass
... class Test1(MapCBORSerializable):
... a: str=""
... b: str=""
>>> @dataclass
... class Test2(MapCBORSerializable):
... c: str=None
... test1: Test1=Test1()
>>> t = Test2(test1=Test1(a="a"))
>>> t
Test2(c=None, test1=Test1(a='a', b=''))
>>> t.to_primitive()
{'c': None, 'test1': {'a': 'a', 'b': ''}}
>>> cbor_hex = t.to_cbor()
>>> cbor_hex
'a26163f6657465737431a261616161616260'
>>> Test2.from_cbor(cbor_hex) # doctest: +SKIP
Test2(c=None, test1=Test1(a='a', b=''))
In the example above, all keys in the map share the same name as their corresponding attributes. However,
sometimes we want to use different keys when serializing some attributes, this could be achieved by adding a
"key" value to the metadata of a field. Example:
>>> from dataclasses import dataclass, field
>>> @dataclass
... class Test1(MapCBORSerializable):
... a: str=field(default="", metadata={"key": "0"})
... b: str=field(default="", metadata={"key": "1"})
>>> @dataclass
... class Test2(MapCBORSerializable):
... c: str=field(default=None, metadata={"key": "0", "optional": True})
... test1: Test1=field(default=Test1(), metadata={"key": "1"})
>>> t = Test2(test1=Test1(a="a"))
>>> t
Test2(c=None, test1=Test1(a='a', b=''))
>>> t.to_primitive()
{'1': {'0': 'a', '1': ''}}
>>> cbor_hex = t.to_cbor()
>>> cbor_hex
'a16131a261306161613160'
>>> Test2.from_cbor(cbor_hex) # doctest: +SKIP
Test2(c=None, test1=Test1(a='a', b=''))
"""
def to_shallow_primitive(self) -> Primitive:
primitives = {}
for f in fields(self):
if "key" in f.metadata:
key = f.metadata["key"]
else:
key = f.name
if key in primitives:
raise SerializeException(f"Key: '{key}' already exists in the map.")
val = getattr(self, f.name)
if val is None and f.metadata.get("optional"):
continue
primitives[key] = val
return primitives
@classmethod
def from_primitive(cls: MapBase, values: Primitive) -> MapBase:
"""Restore a primitive value to its original class type.
Args:
cls (MapBase): The original class type.
values (:const:`Primitive`): A CBOR primitive.
Returns:
:const:`MapBase`: Restored object.
Raises:
:class:`pycardano.exception.DeserializeException`: When the object could not be restored from primitives.
"""
all_fields = {f.metadata.get("key", f.name): f for f in fields(cls) if f.init}
if type(values) != dict:
raise DeserializeException(
f"Expect input value to be a dict, got a {type(values)} instead."
)
kwargs = {}
type_hints = get_type_hints(cls)
for key in values:
if key not in all_fields:
raise DeserializeException(f"Unexpected map key {key} in CBOR.")
f = all_fields[key]
v = values[key]
if not isclass(f.type):
f.type = type_hints[f.name]
v = _restore_dataclass_field(f, v)
kwargs[f.name] = v
return cls(**kwargs)
def __repr__(self):
return super().__repr__()
DictBase = TypeVar("DictBase", bound="DictCBORSerializable")
"""A generic type that is bounded by DictCBORSerializable."""
class DictCBORSerializable(CBORSerializable):
"""A dictionary class where all keys share the same type and all values share the same type.
Examples:
>>> @dataclass
... class Test1(ArrayCBORSerializable):
... a: int
... b: str
>>>
>>> class Test2(DictCBORSerializable):
... KEY_TYPE = str
... VALUE_TYPE = Test1
>>>
>>> t = Test2()
>>> t["x"] = Test1(a=1, b="x")
>>> t["y"] = Test1(a=2, b="y")
>>> primitives = t.to_primitive()
>>> deserialized = Test2.from_primitive(primitives)
>>> assert t == deserialized
>>> t[1] = 2
Traceback (most recent call last):
...
TypeError: type of key must be str; got int instead
"""
KEY_TYPE = Any
VALUE_TYPE = Any
def __init__(self, *args, **kwargs):
self.data = dict(*args, **kwargs)
def __getattr__(self, item):
return getattr(self.data, item)
def __setitem__(self, key: KEY_TYPE, value: VALUE_TYPE):
check_type("key", key, self.KEY_TYPE)
check_type("value", value, self.VALUE_TYPE)
self.data[key] = value
def __getitem__(self, key):
return self.data[key]
def __eq__(self, other):
if isinstance(other, DictCBORSerializable):
return self.data == other.data
else:
return False
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __delitem__(self, key):
del self.data[key]
def __repr__(self):
return self.data.__repr__()
def __copy__(self):
return self.__class__(self)
def __deepcopy__(self, memodict={}):
return self.__class__(deepcopy(self.data))
def to_shallow_primitive(self) -> dict:
# Sort keys in a map according to https://datatracker.ietf.org/doc/html/rfc7049#section-3.9
def _get_sortable_val(key):
if isinstance(key, CBORSerializable):
cbor_bytes = key.to_cbor("bytes")
else:
cbor_bytes = dumps(key)
return len(cbor_bytes), cbor_bytes
return dict(sorted(self.data.items(), key=lambda x: _get_sortable_val(x[0])))
@classmethod
def from_primitive(cls: DictBase, value: dict) -> DictBase:
"""Restore a primitive value to its original class type.
Args:
cls (DictBase): The original class type.
value (:const:`Primitive`): A CBOR primitive.
Returns:
:const:`DictBase`: Restored object.
Raises:
:class:`pycardano.exception.DeserializeException`: When the object could not be restored from primitives.
"""
if not value:
raise DeserializeException(f"Cannot accept empty value {value}.")
restored = cls()
for k, v in value.items():
k = (
cls.KEY_TYPE.from_primitive(k)
if isclass(cls.VALUE_TYPE)
and issubclass(cls.KEY_TYPE, CBORSerializable)
else k
)
v = (
cls.VALUE_TYPE.from_primitive(v)
if isclass(cls.VALUE_TYPE)
and issubclass(cls.VALUE_TYPE, CBORSerializable)
else v
)
restored[k] = v
return restored
def copy(self) -> DictBase:
return self.__class__(self)
@typechecked
def list_hook(cls: Type[CBORBase]) -> Callable[[List[Primitive]], List[CBORBase]]:
"""A factory that generates a Callable which turns a list of Primitive to a list of CBORSerializables.
Args:
cls (CBORBase): The type of CBORSerializable the list will be converted to.
Returns:
Callable[[List[Primitive]], List[CBORBase]]: An Callable that restores a list of Primitive to a list of
CBORSerializables.
"""
return lambda vals: [cls.from_primitive(v) for v in vals]
| 2.15625 | 2 |
fb.py | Enlight-UW/Badger-Glass | 1 | 12760124 | import facebook
import urllib3
import requests
token = '<KEY>'
graph = facebook.GraphAPI(access_token=token, version = 2.8)
#events = graph.request('me/id')
thisid = graph.get_object(id = 'me?fields=id,name,email,gender,posts')
print "User Name :", thisid['name']
print "User ID :", thisid['id']
print "User Email :", thisid['email']
print "User Gender :", thisid['gender']
print "User Post :", thisid['posts']['data'][0]['message']
print "User Post :", thisid['posts']['data'][1]['message']
| 2.90625 | 3 |
plugin/commands.py | themilkman/RainbowBrackets | 0 | 12760125 | <reponame>themilkman/RainbowBrackets
import re
import sublime
import sublime_plugin
from sublime import Region
from .consts import SETTINGS_FILE
from .debug import Debuger
from .manager import RainbowBracketsViewManager as _manager
from .color_scheme import cs_mgr
class RbToggleDebugCommand(sublime_plugin.ApplicationCommand):
def run(self):
Debuger.debug = not Debuger.debug
sublime.load_settings(SETTINGS_FILE).set("debug", Debuger.debug)
sublime.save_settings(SETTINGS_FILE)
class RbClearColorSchemesCommand(sublime_plugin.ApplicationCommand):
def run(self):
cs_mgr.clear_color_schemes()
class RbViewCommand(sublime_plugin.TextCommand):
def get_executor(self):
return _manager.get_view_executor(self.view)
def is_coloring(self):
executor = self.get_executor()
return bool(executor and executor.coloring)
class RbColorCommand(RbViewCommand):
def run(self, edit):
_manager.color_view(self.view)
def is_enabled(self):
return not self.is_coloring()
class RbSweepCommand(RbViewCommand):
def run(self, edit):
_manager.sweep_view(self.view)
def is_enabled(self):
return self.is_coloring()
class RbSetupCommand(RbViewCommand):
def run(self, edit):
_manager.setup_view_executor(self.view)
def is_enabled(self):
return self.get_executor() is None
class RbCloseCommand(RbViewCommand):
def run(self, edit):
_manager.close_view_executor(self.view)
def is_enabled(self):
return self.get_executor() is not None
class RbEditBracketsCommand(sublime_plugin.TextCommand):
def run(self, edit, operation="", to="", select_content=True):
def find_cursor_brackets(regex=None):
last_bracket = None
for region in view.sel():
bracket = self.find_nearest(trees, region, regex)
if bracket is None or bracket == last_bracket:
continue
else:
last_bracket = bracket
yield bracket
view = self.view
trees = _manager.get_view_bracket_trees(view)
if not trees:
return
if operation == "select":
regex = to and re.compile(to + r'\b') or None
for p in find_cursor_brackets(regex=regex):
region = self.cover(p)
view.sel().add(region)
elif operation == "remove":
pairs = [p for p in find_cursor_brackets()]
regions = [r for p in pairs for r in p]
regions.sort()
for r in reversed(regions):
view.erase(edit, r)
if select_content:
selections = []
for p in pairs:
begin = p[0].a - regions.index(p[0])
end = p[1].a - regions.index(p[1])
selections.append(Region(begin, end))
view.sel().add_all(selections)
elif operation == "transform":
mapping = _manager.get_view_bracket_pairs(view)
replace_list = []
for p in find_cursor_brackets():
if view.substr(p[0]) == to:
continue
replace_list.append((p[0], to))
replace_list.append((p[1], mapping[to]))
replace_list.sort(key=lambda i:i[0], reverse=True)
for region, content in replace_list:
view.replace(edit, region, content)
def cover(self, bracket_pair):
return Region(bracket_pair[0].a, bracket_pair[1].b)
def find_nearest(self, trees, r, regex):
pairs = self.binary_path_search(trees, r.begin(), r.end())
bracket = None
if pairs and regex is not None:
for p in reversed(pairs):
point = p[0].end()
text = self.view.substr(Region(point, point + 31))
if regex.match(text) is not None:
bracket = p
break
else:
bracket = pairs[0]
elif pairs:
bracket = pairs[-1]
if bracket is None and r.empty():
for tree in trees:
if (tree.opening.a == r.a or
tree.closing.b == r.a):
bracket = (tree.opening, tree.closing)
break
return bracket
def binary_path_search(self, trees, r_begin, r_end):
bracket_path = []
while True:
found_closer = False
lo, hi = 0, len(trees) - 1
while lo <= hi:
mi = (lo + hi) >> 1
tr = trees[mi]
oa = tr.opening.a
cb = tr.closing.b
if cb < r_begin:
lo = mi + 1
elif oa > r_end:
hi = mi - 1
else:
if (oa < r_begin and r_end < cb or
r_begin == r_end and (r_end == oa or r_begin == cb)):
found_closer = True
trees = tr.contain
p = (tr.opening, tr.closing)
bracket_path.append(p)
break
if not found_closer:
break
return bracket_path
| 1.859375 | 2 |
calliope_app/api/tests/test_urls.py | NREL/engage | 3 | 12760126 | <filename>calliope_app/api/tests/test_urls.py
"""
Unit tests for Django app "api" urls.
"""
import uuid
from django.conf import settings
from django.test import TestCase
from django.urls import reverse, resolve
from django.utils import translation
class APIUserURLTestCase(TestCase):
def test_user_registration(self):
view_name = "user_registration"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/user_registration/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_user_activation(self):
activation_uuid = str(uuid.uuid4())
view_name = "user_activation"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/user_activation/{activation_uuid}"
self.assertEqual(
reverse(view_name, kwargs={"activation_uuid": activation_uuid}), url
)
self.assertEqual(resolve(url).view_name, view_name)
class APIModelURLTestCase(TestCase):
def test_add_model(self):
view_name = "add_model"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/add_model/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_remove_model(self):
view_name = "remove_model"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/remove_model/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_duplicate_model(self):
view_name = "duplicate_model"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/duplicate_model/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_add_collaborator(self):
view_name = "add_collaborator"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/add_collaborator/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def testt_add_model_comment(self):
view_name = "add_model_comment"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/add_model_comment/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
class APILocationURLTestCase(TestCase):
def test_update_location(self):
view_name = "update_location"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/update_location/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_delete_location(self):
view_name = "delete_location"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/delete_location/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
class APIParameterURLTestCase(TestCase):
def test_convert_to_timeseries(self):
view_name = "convert_to_timeseries"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/convert_to_timeseries/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_update_favorite(self):
view_name = "update_favorite"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/update_favorite/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
class APITechnologyURLTestCase(TestCase):
def test_add_technology(self):
view_name = "add_technology"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/add_technology/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_delete_technology(self):
view_name = "delete_technology"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/delete_technology/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_update_tech_params(self):
view_name = "update_tech_params"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/update_tech_params/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
class APILocTechURLTestCase(TestCase):
def test_add_loc_tech(self):
view_name = "add_loc_tech"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/add_loc_tech/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_delete_loc_tech(self):
view_name = "delete_loc_tech"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/delete_loc_tech/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_update_loc_tech_params(self):
view_name = "update_loc_tech_params"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/update_loc_tech_params/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
class APIScenarioURLTestCase(TestCase):
def test_add_scenario(self):
view_name = "add_scenario"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/add_scenario/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_toggle_scenario_loc_tech(self):
view_name = "toggle_scenario_loc_tech"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/toggle_scenario_loc_tech/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_update_scenario_params(self):
view_name = "update_scenario_params"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/update_scenario_params/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_delete_scenario(self):
view_name = "delete_scenario"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/delete_scenario/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
class APIFileURLTestCase(TestCase):
def test_upload_file(self):
view_name = "upload_file"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/upload_file/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_delete_file(self):
view_name = "delete_file"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/delete_file/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_upload_timeseries(self):
view_name = "upload_timeseries"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/upload_timeseries/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_delete_timeseries(self):
view_name = "delete_timeseries"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/delete_timeseries/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
class APIRunURLTestCase(TestCase):
def test_build(self):
view_name = "build"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/build/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_optimize(self):
view_name = "optimize"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/optimize/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_delete_run(self):
view_name = "delete_run"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/delete_run/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_update_run_description(self):
view_name = "update_run_description"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/update_run_description/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
def test_download(self):
view_name = "download"
for language_code, _ in settings.LANGUAGES:
with translation.override(language_code):
url = f"/{language_code}/api/download/"
self.assertEqual(reverse(view_name), url)
self.assertEqual(resolve(url).view_name, view_name)
| 2.65625 | 3 |
GPy/util/gpu_init.py | beckdaniel/GPy | 0 | 12760127 | """
The package for scikits.cuda initialization
Global variables: initSuccess
providing CUBLAS handle: cublas_handle
"""
gpu_initialized = False
gpu_device = None
gpu_context = None
MPI_enabled = False
try:
from mpi4py import MPI
MPI_enabled = True
except:
pass
def initGPU():
try:
if MPI_enabled and MPI.COMM_WORLD.size>1:
from .parallel import get_id_within_node
gpuid = get_id_within_node()
import pycuda.driver
pycuda.driver.init()
if gpuid>=pycuda.driver.Device.count():
print('['+MPI.Get_processor_name()+'] more processes than the GPU numbers!')
raise
gpu_device = pycuda.driver.Device(gpuid)
gpu_context = gpu_device.make_context()
gpu_initialized = True
else:
import pycuda.autoinit
gpu_initialized = True
except:
pass
def closeGPU():
if gpu_context is not None:
gpu_context.detach()
| 2.5 | 2 |
djay/application.py | aleontiev/django-cli | 24 | 12760128 | from __future__ import absolute_import
import os
from .addon import Addon
from .generator import Generator
from .blueprint import Blueprint
from .dependency import DependencyManager, Dependency
from .blueprint import get_core_blueprints
from .utils.system import (
get_directories,
get_last_touched,
find_nearest,
touch,
stdout as _stdout,
)
from .utils import style
from .config import Config
from .runtime import Runtime
from redbaron import RedBaron
class Application(object):
def __init__(self, stdout=None, directory=None):
self.stdout = stdout or _stdout
current = os.getcwd()
nearest_setup_file = find_nearest(current, "setup.py")
self.directory = directory or (
os.path.dirname(nearest_setup_file) if nearest_setup_file else current
)
self.config = Config(self.directory)
self.setup_file = os.path.join(self.directory, "setup.py")
self.requirements_file = os.path.join(
self.directory, self.config.get("requirements")
)
self.dev_requirements_file = os.path.join(
self.directory, self.config.get("devRequirements")
)
self.local_requirements_file = os.path.join(
self.directory, self.config.get("localRequirements")
)
self.runtime = Runtime(self.config.get("runtime"))
def __str__(self):
return self.__unicode__()
def __unicode__(self):
return "%s (%s)" % (self.name, self.directory)
@property
def exists(self):
if not hasattr(self, "_exists"):
self._exists = os.path.exists(self.setup_file)
return self._exists
@staticmethod
def parse_application_name(setup_filename):
"""Parse a setup.py file for the name.
Returns:
name, or None
"""
with open(setup_filename, "rt") as setup_file:
fst = RedBaron(setup_file.read())
for node in fst:
if node.type == "atomtrailers" and str(node.name) == "setup":
for call in node.call:
if str(call.name) == "name":
value = call.value
if hasattr(value, "to_python"):
value = value.to_python()
name = str(value)
break
if name:
break
return name
def _get_name(self):
name = self.config.get("name")
if name:
return name
if self.exists:
try:
name = Application.parse_application_name(self.setup_file)
except Exception:
name = "unknown"
self.config.set("name", name)
self.config.save()
return name
@property
def name(self):
if not hasattr(self, "_name"):
self._name = self._get_name()
return self._name
@property
def addons(self):
if not hasattr(self, "_addons"):
self._addons = {a.name: a for a in self.get_addons()}
return self._addons
def get_addons(self):
self.build()
addons = []
for directory in get_directories(
self.environment.package_directory,
filter=lambda x: x.endswith("/blueprints"),
):
parent_directory = "/".join(directory.split("/")[0:-1])
name = os.path.basename(parent_directory)
addons.append(Addon(name, parent_directory))
return addons
def refresh(self):
if hasattr(self, "_name"):
del self._name
if hasattr(self, "_blueprints"):
del self._blueprints
if hasattr(self, "_addons"):
del self._addons
if hasattr(self, "_exists"):
del self._exists
@property
def blueprints(self):
if not hasattr(self, "_blueprints"):
self._blueprints = {}
for b in self.get_blueprints():
# add by full name, e.g. dj.model
self._blueprints[b.full_name] = b
if not b.addon or b.name not in self._blueprints:
# for blueprints other that init or core,
# add them to the global namespace
self._blueprints[b.name] = b
return self._blueprints
def get_blueprints(self):
addons = self.addons.values()
blueprints = [a.blueprints.values() for a in addons]
return get_core_blueprints() + [x for s in blueprints for x in s]
@property
def requirements_last_modified(self):
return get_last_touched(self.requirements_file)
@property
def dev_requirements_last_modified(self):
return get_last_touched(self.dev_requirements_file)
@property
def local_requirements_last_modified(self):
return get_last_touched(self.local_requirements_file)
@property
def setup_last_modified(self):
# timestamp of last setup.py change
return get_last_touched(self.setup_file)
@property
def environment(self):
if not hasattr(self, "_environment"):
self._environment = self.runtime.create_environment(
self.config.environment_path
)
return self._environment
def _get_build_token(self, key):
return os.path.join(self.environment.virtual_directory, "build.%s" % key)
def _build(self, key, last_modified, cmd, verbose=True):
token = self._get_build_token(key)
last_built = get_last_touched(token)
if not last_built or last_built < last_modified:
self.stdout.write(style.format_command("Building", key))
result = self.execute(cmd, verbose=False, capture=True)
if "pip" in cmd:
deps = []
for line in result.split("\n"):
splits = line.split(" ")
if line.startswith("Successfully installed"):
dep = splits[2]
dep = "==".join(dep.rsplit("-", 1))
dep = Dependency(dep)
deps.append((dep, style.green("+ ")))
elif line.startswith("Requirement already satisfied: "):
dep = splits[3]
dep = Dependency(dep)
deps.append((dep, style.yellow(". ")))
elif "Uninstalling" in line:
index = line.index("Uninstalling")
dep = line[index:].split(" ")[1]
x = 0
dep = dep[x : len(dep) - 1]
dep = "".join(dep)
dep = "==".join(dep.rsplit("-", 1))
dep = Dependency(dep)
deps.append((dep, style.red("- ")))
for dep, prefix in sorted(deps, key=lambda x: str(x[0])):
self.stdout.write(prefix + dep.to_stdout())
touch(token)
def build(self):
"""Builds the app in the app's environment.
Only builds if the build is out-of-date and is non-empty.
Builds in 3 stages: requirements, dev requirements, and app.
pip is used to install requirements, and setup.py is used to
install the app itself.
Raises:
ValidationError if the app fails to build.
"""
if self.exists:
self._build(
"requirements",
self.requirements_last_modified,
"pip install -U -r %s" % self.requirements_file,
)
try:
self._build(
"requirements (dev)",
self.dev_requirements_last_modified,
"pip install -U -r %s" % self.dev_requirements_file,
)
except Exception as e:
if "No such file" not in str(e):
raise e
self.stdout.write(style.yellow("Could not find dev requirements"))
try:
self._build(
"requirements (local)",
self.local_requirements_last_modified,
"pip install -U -r %s" % self.local_requirements_file,
)
except Exception as e:
if "No such file" not in str(e):
raise e
self.stdout.write(style.yellow("Could not find local requirements"))
self._build(
"application",
self.setup_last_modified,
"python %s develop" % self.setup_file,
)
def execute(self, command, **kwargs):
return self.environment.execute(command, **kwargs)
def run(self, command, **kwargs):
self.build()
self.stdout.write(style.format_command("Running", command))
return self.execute(command, **kwargs)
def generate(self, blueprint, context, interactive=True):
"""Generate a blueprint within this application."""
if not isinstance(blueprint, Blueprint):
bp = self.blueprints.get(blueprint)
if not bp:
raise ValueError("%s is not a valid blueprint" % blueprint)
blueprint = bp
self.stdout.write(style.format_command("Generating", blueprint.full_name))
generator = Generator(self, blueprint, context, interactive=interactive)
result = generator.generate()
if blueprint.name == "init":
# try re-setting the name
self.refresh()
return result
def get_dependency_manager(self, dev=False):
return DependencyManager(
os.path.join(
self.directory,
self.dev_requirements_file if dev else self.requirements_file,
)
)
def add(self, addon, dev=False, interactive=True):
"""Add a new dependency and install it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
existing = dependencies.get(addon)
self.stdout.write(style.format_command("Adding", addon))
dependencies.add(addon)
try:
# try running the build
self.build()
self.refresh()
# remove version of this in other requirements file
other_dependencies.remove(addon, warn=False)
# run new addon constructor
constructor_name = "%s.init" % Dependency(addon).module_name
constructor = self.blueprints.get(constructor_name)
if constructor:
context = constructor.load_context().main([], standalone_mode=False)
self.generate(constructor, context, interactive=interactive)
except Exception as e:
# restore original settings
self.stdout.write(style.red(str(e)))
self.stdout.write(style.yellow("Could not find %s" % addon))
dependencies.remove(addon)
if existing:
dependencies.add(existing)
return
def remove(self, addon, dev=False):
"""Remove a dependency and uninstall it."""
dependencies = self.get_dependency_manager(dev=dev)
other_dependencies = self.get_dependency_manager(dev=not dev)
self.stdout.write(style.format_command("Removing", addon))
removed = dependencies.remove(addon, warn=False)
if not removed:
removed = other_dependencies.remove(addon, warn=False)
if removed:
self.build()
else:
exception = "%s is not installed." % Dependency(addon).to_stdout()
self.stdout.write(style.red(exception))
def info(self):
output = []
dev_requirements = self.get_dependency_manager(dev=True).dependencies
requirements = self.get_dependency_manager(dev=False).dependencies
app = self.to_stdout()
if self.exists:
output.append(style.blue("Application:\n %s" % app))
if requirements:
output.append(style.blue("Requirements:"))
for _, dep in sorted(requirements.items(), key=lambda x: x[0].lower()):
output.append(" " + dep.to_stdout())
if dev_requirements:
output.append(style.blue("Requirements (dev):"))
for _, dep in sorted(
dev_requirements.items(), key=lambda x: x[0].lower()
):
output.append(" " + dep.to_stdout())
else:
output.append(
style.yellow("%s, try running %s." % (app, style.white("dj init")))
)
return "\n".join(output)
def to_stdout(self):
return (
"%s %s %s"
% (
style.white(self.name),
style.gray("@"),
style.green(self.runtime.version),
)
if self.name
else style.yellow("No application")
)
# singleton application instance
current_application = None
def get_current_application():
global current_application
if not current_application:
current_application = Application()
return current_application
def set_current_application(application):
global current_application
current_application = application
| 2.046875 | 2 |
integration_tests/samples/issues/issue_522.py | priya1puresoftware/python-slack-sdk | 2,486 | 12760129 | # export SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN=<KEY>
# python3 integration_tests/samples/issues/issue_522.py
import asyncio
import logging
import os
from slack_sdk.rtm import RTMClient
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger(__name__)
token = os.environ["SLACK_SDK_TEST_CLASSIC_APP_BOT_TOKEN"]
async def sleepy_count(name, sleep_for):
for i in range(10):
await asyncio.sleep(sleep_for)
LOGGER.debug(f"{name} - slept {i + 1} times.")
async def slack_client_and_sleeps():
# real-time-messaging Slack client
client = RTMClient(token=token, run_async=True)
sleepy_count_task = asyncio.create_task(sleepy_count("first counter", 1))
sleepy_count_task2 = asyncio.create_task(sleepy_count("second counter", 3))
await asyncio.gather(client.start(), sleepy_count_task, sleepy_count_task2)
async def slack_client():
# real-time-messaging Slack client
client = RTMClient(token=token, run_async=True)
await asyncio.gather(client.start())
async def sleeps():
sleepy_count_task = asyncio.create_task(sleepy_count("first counter", 1))
sleepy_count_task2 = asyncio.create_task(sleepy_count("second counter", 3))
await asyncio.gather(sleepy_count_task, sleepy_count_task2)
if __name__ == "__main__":
LOGGER.info(f"Try: kill -2 {os.getpid()} or ctrl+c")
if len(sys.argv) > 1:
option = sys.argv[1]
if option == "1":
# sigint closes program correctly
asyncio.run(slack_client())
elif option == "2":
# sigint closes program correctly
asyncio.run(sleeps())
elif option == "3":
# sigint doesn't actually close properly
asyncio.run(slack_client_and_sleeps())
else:
# sigint doesn't actually close properly
asyncio.run(slack_client_and_sleeps())
| 2.328125 | 2 |
examples/nips17_adversarial_competition/eval_infra/code/eval_lib/submissions.py | iamgroot42/cleverhans | 21 | 12760130 | <gh_stars>10-100
"""Classes and functions to manage submissions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from io import StringIO
import os
from six import iteritems
# Cloud Storage directories
ATTACK_SUBDIR = 'submissions/nontargeted'
TARGETED_ATTACK_SUBDIR = 'submissions/targeted'
DEFENSE_SUBDIR = 'submissions/defense'
# Cloud Datastore entity keys
ATTACKS_ENTITY_KEY = [u'SubmissionType', u'Attacks']
TARGET_ATTACKS_ENTITY_KEY = [u'SubmissionType', u'TargetedAttacks']
DEFENSES_ENTITY_KEY = [u'SubmissionType', u'Defenses']
KIND_SUBMISSION = u'Submission'
# Cloud Datastore ID patterns
ATTACK_ID_PATTERN = u'SUBA{:03}'
TARGETED_ATTACK_ID_PATTERN = u'SUBT{:03}'
DEFENSE_ID_PATTERN = u'SUBD{:03}'
# Constants for __str__
TO_STR_MAX_SUBMISSIONS = 5
ALLOWED_EXTENSIONS = ['.zip', '.tar', '.tar.gz']
def participant_from_submission_path(submission_path):
"""Parses type of participant based on submission filename.
Args:
submission_path: path to the submission in Google Cloud Storage
Returns:
dict with one element. Element key correspond to type of participant
(team, baseline), element value is ID of the participant.
Raises:
ValueError: is participant can't be determined based on submission path.
"""
basename = os.path.basename(submission_path)
file_ext = None
for e in ALLOWED_EXTENSIONS:
if basename.endswith(e):
file_ext = e
break
if not file_ext:
raise ValueError('Invalid submission path: ' + submission_path)
basename = basename[:-len(file_ext)]
if basename.isdigit():
return {'team_id': int(basename)}
if basename.startswith('baseline_'):
return {'baseline_id': basename[len('baseline_'):]}
raise ValueError('Invalid submission path: ' + submission_path)
SubmissionDescriptor = namedtuple('SubmissionDescriptor',
['path', 'participant_id'])
class CompetitionSubmissions(object):
"""Class which holds information about all submissions.
All submissions are stored in 3 dictionaries, one for targeted attacks,
one for non-targeted attacks and one for defenses.
All submissions are identified using internal competition ID,
which looks like 'SUB????'. Additionally each submission has external
identified which could be name of baseline or Kaggle ID.
External ID only used when list of submissions is formed and when
scorebored is built. Internal submission IDs are used for all actual
evaluation. Thus all identifiers are internal IDs unless otherwise noted.
"""
def __init__(self, datastore_client, storage_client, round_name):
"""Initializes CompetitionSubmissions.
Args:
datastore_client: instance of CompetitionDatastoreClient
storage_client: instance of CompetitionStorageClient
round_name: name of the round
"""
self._datastore_client = datastore_client
self._storage_client = storage_client
self._round_name = round_name
# each of the variables is a dictionary,
# where key - submission ID
# value - SubmissionDescriptor namedtuple
self._attacks = None
self._targeted_attacks = None
self._defenses = None
def _load_submissions_from_datastore_dir(self, dir_suffix, id_pattern):
"""Loads list of submissions from the directory.
Args:
dir_suffix: suffix of the directory where submissions are stored,
one of the folowing constants: ATTACK_SUBDIR, TARGETED_ATTACK_SUBDIR
or DEFENSE_SUBDIR.
id_pattern: pattern which is used to generate (internal) IDs
for submissins. One of the following constants: ATTACK_ID_PATTERN,
TARGETED_ATTACK_ID_PATTERN or DEFENSE_ID_PATTERN.
Returns:
dictionary with all found submissions
"""
submissions = self._storage_client.list_blobs(
prefix=os.path.join(self._round_name, dir_suffix))
return {
id_pattern.format(idx): SubmissionDescriptor(
path=s, participant_id=participant_from_submission_path(s))
for idx, s in enumerate(submissions)
}
def init_from_storage_write_to_datastore(self):
"""Init list of sumibssions from Storage and saves them to Datastore.
Should be called only once (typically by master) during evaluation of
the competition.
"""
# Load submissions
self._attacks = self._load_submissions_from_datastore_dir(
ATTACK_SUBDIR, ATTACK_ID_PATTERN)
self._targeted_attacks = self._load_submissions_from_datastore_dir(
TARGETED_ATTACK_SUBDIR, TARGETED_ATTACK_ID_PATTERN)
self._defenses = self._load_submissions_from_datastore_dir(
DEFENSE_SUBDIR, DEFENSE_ID_PATTERN)
self._write_to_datastore()
def _write_to_datastore(self):
"""Writes all submissions to datastore."""
# Populate datastore
roots_and_submissions = zip([ATTACKS_ENTITY_KEY,
TARGET_ATTACKS_ENTITY_KEY,
DEFENSES_ENTITY_KEY],
[self._attacks,
self._targeted_attacks,
self._defenses])
client = self._datastore_client
with client.no_transact_batch() as batch:
for root_key, submissions in roots_and_submissions:
batch.put(client.entity(client.key(*root_key)))
for k, v in iteritems(submissions):
entity = client.entity(client.key(
*(root_key + [KIND_SUBMISSION, k])))
entity['submission_path'] = v.path
entity.update(participant_from_submission_path(v.path))
batch.put(entity)
def init_from_datastore(self):
"""Init list of submission from Datastore.
Should be called by each worker during initialization.
"""
self._attacks = {}
self._targeted_attacks = {}
self._defenses = {}
for entity in self._datastore_client.query_fetch(kind=KIND_SUBMISSION):
submission_id = entity.key.flat_path[-1]
submission_path = entity['submission_path']
participant_id = {k: entity[k]
for k in ['team_id', 'baseline_id']
if k in entity}
submission_descr = SubmissionDescriptor(path=submission_path,
participant_id=participant_id)
if list(entity.key.flat_path[0:2]) == ATTACKS_ENTITY_KEY:
self._attacks[submission_id] = submission_descr
elif list(entity.key.flat_path[0:2]) == TARGET_ATTACKS_ENTITY_KEY:
self._targeted_attacks[submission_id] = submission_descr
elif list(entity.key.flat_path[0:2]) == DEFENSES_ENTITY_KEY:
self._defenses[submission_id] = submission_descr
@property
def attacks(self):
"""Dictionary with all non-targeted attacks."""
return self._attacks
@property
def targeted_attacks(self):
"""Dictionary with all targeted attacks."""
return self._targeted_attacks
@property
def defenses(self):
"""Dictionary with all defenses."""
return self._defenses
def get_all_attack_ids(self):
"""Returns IDs of all attacks (targeted and non-targeted)."""
return list(self.attacks.keys()) + list(self.targeted_attacks.keys())
def find_by_id(self, submission_id):
"""Finds submission by ID.
Args:
submission_id: ID of the submission
Returns:
SubmissionDescriptor with information about submission or None if
submission is not found.
"""
return self._attacks.get(
submission_id,
self._defenses.get(
submission_id,
self._targeted_attacks.get(submission_id, None)))
def get_external_id(self, submission_id):
"""Returns human readable submission external ID.
Args:
submission_id: internal submission ID.
Returns:
human readable ID.
"""
submission = self.find_by_id(submission_id)
if not submission:
return None
if 'team_id' in submission.participant_id:
return submission.participant_id['team_id']
elif 'baseline_id' in submission.participant_id:
return 'baseline_' + submission.participant_id['baseline_id']
else:
return ''
def __str__(self):
"""Returns human readable representation, useful for debugging purposes."""
buf = StringIO()
title_values = zip([u'Attacks', u'Targeted Attacks', u'Defenses'],
[self._attacks, self._targeted_attacks, self._defenses])
for idx, (title, values) in enumerate(title_values):
if idx >= TO_STR_MAX_SUBMISSIONS:
buf.write('...\n')
break
buf.write(title)
buf.write(u':\n')
for k, v in iteritems(values):
buf.write(u'{0} -- {1} {2}\n'.format(k, v.path,
str(v.participant_id)))
buf.write(u'\n')
return buf.getvalue()
| 2.34375 | 2 |
leetcode/342.py | windniw/just-for-fun | 1 | 12760131 | """
link: https://leetcode.com/problems/power-of-four
problem: 问num是否是4的幂,要求O(1)
solution: 位运算
"""
class Solution:
def isPowerOfFour(self, num: int) -> bool:
return num > 0 and num & (num - 1) == 0 and num & 0xaaaaaaaa == 0
| 3.59375 | 4 |
openff/evaluator/tests/test_utils/test_openmm.py | lilyminium/openff-evaluator | 0 | 12760132 | <reponame>lilyminium/openff-evaluator
from random import randint, random
import mdtraj
import numpy as np
import pytest
from openff.toolkit.topology import Molecule, Topology
from openff.toolkit.typing.engines.smirnoff import ForceField, vdWHandler
from openff.toolkit.typing.engines.smirnoff.parameters import (
ChargeIncrementModelHandler,
ElectrostaticsHandler,
LibraryChargeHandler,
)
from simtk import unit as simtk_unit
from openff.evaluator import unit
from openff.evaluator.backends import ComputeResources
from openff.evaluator.forcefield import ParameterGradientKey
from openff.evaluator.protocols.openmm import _compute_gradients
from openff.evaluator.thermodynamics import ThermodynamicState
from openff.evaluator.utils import get_data_filename
from openff.evaluator.utils.observables import ObservableArray, ObservableFrame
from openff.evaluator.utils.openmm import (
openmm_quantity_to_pint,
pint_quantity_to_openmm,
system_subset,
)
def test_daltons():
openmm_quantity = random() * simtk_unit.dalton
openmm_raw_value = openmm_quantity.value_in_unit(simtk_unit.gram / simtk_unit.mole)
pint_quantity = openmm_quantity_to_pint(openmm_quantity)
pint_raw_value = pint_quantity.to(unit.gram / unit.mole).magnitude
assert np.allclose(openmm_raw_value, pint_raw_value)
@pytest.mark.parametrize(
"openmm_unit",
[
simtk_unit.dalton,
simtk_unit.kilojoules_per_mole,
simtk_unit.angstrom,
simtk_unit.kelvin,
simtk_unit.atmosphere,
simtk_unit.gram,
simtk_unit.liter,
simtk_unit.gram / simtk_unit.liter,
],
)
@pytest.mark.parametrize(
"value",
[random(), randint(1, 10), [random(), random()], np.array([random(), random()])],
)
def test_openmm_to_pint(openmm_unit, value):
openmm_quantity = value * openmm_unit
openmm_raw_value = openmm_quantity.value_in_unit(openmm_unit)
pint_quantity = openmm_quantity_to_pint(openmm_quantity)
pint_raw_value = pint_quantity.magnitude
assert np.allclose(openmm_raw_value, pint_raw_value)
@pytest.mark.parametrize(
"pint_unit",
[
unit.dalton,
unit.kilojoules / unit.mole,
unit.angstrom,
unit.kelvin,
unit.atmosphere,
unit.gram,
unit.liter,
unit.gram / unit.liter,
],
)
@pytest.mark.parametrize(
"value",
[random(), randint(1, 10), [random(), random()], np.array([random(), random()])],
)
def test_pint_to_openmm(pint_unit, value):
pint_quantity = value * pint_unit
pint_raw_value = pint_quantity.magnitude
openmm_quantity = pint_quantity_to_openmm(pint_quantity)
openmm_raw_value = openmm_quantity.value_in_unit(openmm_quantity.unit)
assert np.allclose(openmm_raw_value, pint_raw_value)
def test_constants():
assert np.isclose(
simtk_unit.AVOGADRO_CONSTANT_NA.value_in_unit((1.0 / simtk_unit.mole).unit),
(1.0 * unit.avogadro_constant).to((1.0 / unit.mole).units).magnitude,
)
assert np.isclose(
simtk_unit.BOLTZMANN_CONSTANT_kB.value_in_unit(
simtk_unit.joule / simtk_unit.kelvin
),
(1.0 * unit.boltzmann_constant).to(unit.joule / unit.kelvin).magnitude,
)
assert np.isclose(
simtk_unit.MOLAR_GAS_CONSTANT_R.value_in_unit(
simtk_unit.joule / simtk_unit.kelvin / simtk_unit.mole
),
(1.0 * unit.molar_gas_constant)
.to(unit.joule / unit.kelvin / unit.mole)
.magnitude,
)
assert np.isclose(
simtk_unit.SPEED_OF_LIGHT_C.value_in_unit(
simtk_unit.meter / simtk_unit.seconds
),
(1.0 * unit.speed_of_light).to(unit.meter / unit.seconds).magnitude,
)
def hydrogen_chloride_force_field(
library_charge: bool, charge_increment: bool
) -> ForceField:
"""Returns a SMIRNOFF force field which is able to parameterize hydrogen chloride."""
# Create the FF
force_field = ForceField()
# Add a Vdw handler.
vdw_handler = vdWHandler(version=0.3)
vdw_handler.method = "cutoff"
vdw_handler.cutoff = 6.0 * simtk_unit.angstrom
vdw_handler.scale14 = 1.0
vdw_handler.add_parameter(
{
"smirks": "[#1:1]",
"epsilon": 0.0 * simtk_unit.kilojoules_per_mole,
"sigma": 1.0 * simtk_unit.angstrom,
}
)
vdw_handler.add_parameter(
{
"smirks": "[#17:1]",
"epsilon": 2.0 * simtk_unit.kilojoules_per_mole,
"sigma": 2.0 * simtk_unit.angstrom,
}
)
force_field.register_parameter_handler(vdw_handler)
# Add an electrostatic, a library charge and a charge increment handler.
electrostatics_handler = ElectrostaticsHandler(version=0.3)
electrostatics_handler.cutoff = 6.0 * simtk_unit.angstrom
electrostatics_handler.method = "PME"
force_field.register_parameter_handler(electrostatics_handler)
if library_charge:
library_charge_handler = LibraryChargeHandler(version=0.3)
library_charge_handler.add_parameter(
parameter_kwargs={
"smirks": "[#1:1]",
"charge1": 1.0 * simtk_unit.elementary_charge,
}
)
library_charge_handler.add_parameter(
parameter_kwargs={
"smirks": "[#17:1]",
"charge1": -1.0 * simtk_unit.elementary_charge,
}
)
force_field.register_parameter_handler(library_charge_handler)
if charge_increment:
charge_increment_handler = ChargeIncrementModelHandler(version=0.3)
charge_increment_handler.add_parameter(
parameter_kwargs={
"smirks": "[#1:1]-[#17:2]",
"charge_increment1": -1.0 * simtk_unit.elementary_charge,
"charge_increment2": 1.0 * simtk_unit.elementary_charge,
}
)
force_field.register_parameter_handler(charge_increment_handler)
return force_field
def test_system_subset_vdw():
# Create a dummy topology
topology = Molecule.from_smiles("Cl").to_topology()
# Create the system subset.
system, parameter_value = system_subset(
parameter_key=ParameterGradientKey("vdW", "[#1:1]", "epsilon"),
force_field=hydrogen_chloride_force_field(True, True),
topology=topology,
scale_amount=0.5,
)
assert system.getNumForces() == 1
assert system.getNumParticles() == 2
charge_0, sigma_0, epsilon_0 = system.getForce(0).getParticleParameters(0)
charge_1, sigma_1, epsilon_1 = system.getForce(0).getParticleParameters(1)
assert np.isclose(charge_0.value_in_unit(simtk_unit.elementary_charge), 0.0)
assert np.isclose(charge_1.value_in_unit(simtk_unit.elementary_charge), 0.0)
assert np.isclose(sigma_0.value_in_unit(simtk_unit.angstrom), 2.0)
assert np.isclose(sigma_1.value_in_unit(simtk_unit.angstrom), 1.0)
assert np.isclose(epsilon_0.value_in_unit(simtk_unit.kilojoules_per_mole), 2.0)
assert np.isclose(epsilon_1.value_in_unit(simtk_unit.kilojoules_per_mole), 0.5)
def test_system_subset_library_charge():
force_field = hydrogen_chloride_force_field(True, False)
# Ensure a zero charge after perturbation.
force_field.get_parameter_handler("LibraryCharges").parameters["[#1:1]"].charge1 = (
1.5 * simtk_unit.elementary_charge
)
# Create a dummy topology
topology = Molecule.from_smiles("Cl").to_topology()
# Create the system subset.
system, parameter_value = system_subset(
parameter_key=ParameterGradientKey("LibraryCharges", "[#17:1]", "charge1"),
force_field=force_field,
topology=topology,
scale_amount=0.5,
)
assert system.getNumForces() == 1
assert system.getNumParticles() == 2
charge_0, sigma_0, epsilon_0 = system.getForce(0).getParticleParameters(0)
charge_1, sigma_1, epsilon_1 = system.getForce(0).getParticleParameters(1)
assert np.isclose(charge_0.value_in_unit(simtk_unit.elementary_charge), -1.5)
assert np.isclose(charge_1.value_in_unit(simtk_unit.elementary_charge), 1.5)
assert np.isclose(sigma_0.value_in_unit(simtk_unit.angstrom), 10.0)
assert np.isclose(sigma_1.value_in_unit(simtk_unit.angstrom), 10.0)
assert np.isclose(epsilon_0.value_in_unit(simtk_unit.kilojoules_per_mole), 0.0)
assert np.isclose(epsilon_1.value_in_unit(simtk_unit.kilojoules_per_mole), 0.0)
def test_system_subset_charge_increment():
pytest.skip(
"This test will fail until the SMIRNOFF charge increment handler allows "
"N - 1 charges to be specified."
)
# Create a dummy topology
topology = Molecule.from_smiles("Cl").to_topology()
# Create the system subset.
system, parameter_value = system_subset(
parameter_key=ParameterGradientKey(
"ChargeIncrementModel", "[#1:1]-[#17:2]", "charge_increment1"
),
force_field=hydrogen_chloride_force_field(False, True),
topology=topology,
scale_amount=0.5,
)
assert system.getNumForces() == 1
assert system.getNumParticles() == 2
charge_0, sigma_0, epsilon_0 = system.getForce(0).getParticleParameters(0)
charge_1, sigma_1, epsilon_1 = system.getForce(0).getParticleParameters(1)
assert not np.isclose(charge_0.value_in_unit(simtk_unit.elementary_charge), -1.0)
assert np.isclose(charge_1.value_in_unit(simtk_unit.elementary_charge), 1.0)
assert np.isclose(sigma_0.value_in_unit(simtk_unit.angstrom), 10.0)
assert np.isclose(sigma_1.value_in_unit(simtk_unit.angstrom), 10.0)
assert np.isclose(epsilon_0.value_in_unit(simtk_unit.kilojoules_per_mole), 0.0)
assert np.isclose(epsilon_1.value_in_unit(simtk_unit.kilojoules_per_mole), 0.0)
@pytest.mark.parametrize("smirks, all_zeros", [("[#6X4:1]", True), ("[#8:1]", False)])
def test_compute_gradients(tmpdir, smirks, all_zeros):
# Load a short trajectory.
coordinate_path = get_data_filename("test/trajectories/water.pdb")
trajectory_path = get_data_filename("test/trajectories/water.dcd")
trajectory = mdtraj.load_dcd(trajectory_path, coordinate_path)
observables = ObservableFrame(
{
"PotentialEnergy": ObservableArray(
np.zeros(len(trajectory)) * unit.kilojoule / unit.mole
)
}
)
_compute_gradients(
[ParameterGradientKey("vdW", smirks, "epsilon")],
observables,
ForceField("openff-1.2.0.offxml"),
ThermodynamicState(298.15 * unit.kelvin, 1.0 * unit.atmosphere),
Topology.from_mdtraj(trajectory.topology, [Molecule.from_smiles("O")]),
trajectory,
ComputeResources(),
True,
)
assert len(observables["PotentialEnergy"].gradients[0].value) == len(trajectory)
if all_zeros:
assert np.allclose(
observables["PotentialEnergy"].gradients[0].value,
0.0 * unit.kilojoule / unit.kilocalorie,
)
else:
assert not np.allclose(
observables["PotentialEnergy"].gradients[0].value,
0.0 * unit.kilojoule / unit.kilocalorie,
)
| 2.046875 | 2 |
todolists/tests/test_models.py | VanirLab/VOS | 0 | 12760133 | <reponame>VanirLab/VOS
from django.contrib.auth.models import User
from django.test import TestCase
from main.models import Package
from todolists.models import Todolist, TodolistPackage
class TestTodolist(TestCase):
fixtures = ['main/fixtures/arches.json', 'main/fixtures/repos.json',
'main/fixtures/package.json']
def setUp(self):
self.user = User.objects.create(username="joeuser", first_name="Joe",
last_name="User", email="<EMAIL>")
self.todolist = Todolist.objects.create(name='Boost rebuild',
description='Boost 1.66 rebuid',
creator=self.user,
raw='linux')
def tearDown(self):
self.todolist.delete()
self.user.delete()
def test_stripped_description(self):
self.todolist.description = 'Boost rebuild '
desc = self.todolist.stripped_description
self.assertFalse(desc.endswith(' '))
def test_get_absolute_url(self):
self.assertIn('/todo/', self.todolist.get_absolute_url())
def test_get_full_url(self):
url = self.todolist.get_full_url()
self.assertIn('https://example.com/todo/', url)
def test_packages(self):
pkg = Package.objects.first()
todopkg = TodolistPackage.objects.create(pkg=pkg, pkgname=pkg.pkgname,
pkgbase=pkg.pkgbase, arch=pkg.arch,
repo=pkg.repo, user=self.user,
todolist=self.todolist)
pkgs = self.todolist.packages()
self.assertEqual(len(pkgs), 1)
self.assertEqual(pkgs[0], todopkg)
| 2.1875 | 2 |
www/src/Lib/zlib.py | stefanhoelzl/brython | 52 | 12760134 | <filename>www/src/Lib/zlib.py
#fix me.. but for now lets just pass the data back..
def compress(data):
return data
def decompress(data):
return data
| 1.820313 | 2 |
alshamelah_api/apps/categories/views.py | devna-dev/durar-backend | 0 | 12760135 | <filename>alshamelah_api/apps/categories/views.py
from rest_framework import viewsets
from rest_framework_extensions.mixins import NestedViewSetMixin
from .models import Category, SubCategory
from .serializers import CategorySerializer, SubCategorySerializer
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
class SubCategoryViewSet(NestedViewSetMixin, viewsets.ModelViewSet):
queryset = SubCategory.objects.all()
serializer_class = SubCategorySerializer
category_query = 'category'
| 1.6875 | 2 |
test/test_db.py | AleRiccardi/NextEws | 0 | 12760136 | <reponame>AleRiccardi/NextEws
import os
import tempfile
import pytest
import sqlite3
import pandas as pd
from nextews import db
# def test_get_size_author(app):
# with app.app_context():
# authors = db.get_authors_df()
# print(authors)
# assert authors.shape[0] != 0
| 2.28125 | 2 |
src/awkward/_v2/contents/content.py | henryiii/awkward-1.0 | 2 | 12760137 | <gh_stars>1-10
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
from __future__ import absolute_import
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import numbers
import awkward as ak
np = ak.nplike.NumpyMetadata.instance()
class Content(object):
def __getitem__(self, where):
if isinstance(where, numbers.Integral):
return self._getitem_at(where)
elif isinstance(where, slice) and where.step is None:
return self._getitem_range(where)
elif isinstance(where, slice):
raise NotImplementedError("needs _getitem_next")
elif isinstance(where, str):
return self._getitem_field(where)
elif where is np.newaxis:
raise NotImplementedError("needs _getitem_next")
elif where is Ellipsis:
raise NotImplementedError("needs _getitem_next")
elif isinstance(where, tuple):
raise NotImplementedError("needs _getitem_next")
elif isinstance(where, ak.highlevel.Array):
raise NotImplementedError("needs _getitem_next")
elif isinstance(where, Content):
raise NotImplementedError("needs _getitem_next")
elif isinstance(where, Iterable) and all(isinstance(x, str) for x in where):
return self._getitem_fields(where)
elif isinstance(where, Iterable):
raise NotImplementedError("needs _getitem_next")
else:
raise TypeError(
"only integers, slices (`:`), ellipsis (`...`), np.newaxis (`None`), "
"integer/boolean arrays (possibly with variable-length nested "
"lists or missing values), field name (str) or names (non-tuple "
"iterable of str) are valid indices for slicing, not\n\n "
+ repr(where)
)
| 2.15625 | 2 |
pyaz/cognitiveservices/account/__init__.py | py-az-cli/py-az-cli | 0 | 12760138 | '''
Manage Azure Cognitive Services accounts.
'''
from ... pyaz_utils import _call_az
from . import commitment_plan, deployment, identity, keys, network_rule
def create(kind, location, name, resource_group, sku, api_properties=None, assign_identity=None, custom_domain=None, encryption=None, storage=None, tags=None, yes=None):
'''
Manage Azure Cognitive Services accounts.
Required Parameters:
- kind -- the API name of cognitive services account
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- name -- cognitive service account name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
- sku -- Name of the Sku of cognitive services account
Optional Parameters:
- api_properties -- Api properties in JSON format or a=b c=d format. Some cognitive services (i.e. QnA Maker) require extra api properties to create the account.
- assign_identity -- Generate and assign an Azure Active Directory Identity for this account.
- custom_domain -- User domain assigned to the account. Name is the CNAME source.
- encryption -- The encryption properties for this resource, in JSON format.
- storage -- The storage accounts for this resource, in JSON array format.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
- yes -- Do not prompt for terms confirmation
'''
return _call_az("az cognitiveservices account create", locals())
def delete(name, resource_group):
'''
Manage Azure Cognitive Services accounts.
Required Parameters:
- name -- cognitive service account name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az cognitiveservices account delete", locals())
def show(name, resource_group):
'''
Manage Azure Cognitive Services accounts.
Required Parameters:
- name -- cognitive service account name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az cognitiveservices account show", locals())
def update(name, resource_group, api_properties=None, custom_domain=None, encryption=None, sku=None, storage=None, tags=None):
'''
Manage Azure Cognitive Services accounts.
Required Parameters:
- name -- cognitive service account name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
Optional Parameters:
- api_properties -- Api properties in JSON format or a=b c=d format. Some cognitive services (i.e. QnA Maker) require extra api properties to create the account.
- custom_domain -- User domain assigned to the account. Name is the CNAME source.
- encryption -- The encryption properties for this resource, in JSON format.
- sku -- Name of the Sku of cognitive services account
- storage -- The storage accounts for this resource, in JSON array format.
- tags -- space-separated tags: key[=value] [key[=value] ...]. Use '' to clear existing tags.
'''
return _call_az("az cognitiveservices account update", locals())
def list(resource_group=None):
'''
Manage Azure Cognitive Services accounts.
Optional Parameters:
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az cognitiveservices account list", locals())
def show_deleted(location, name, resource_group):
'''
Show a soft-deleted Azure Cognitive Services account.
Required Parameters:
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- name -- cognitive service account name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az cognitiveservices account show-deleted", locals())
def list_deleted():
'''
List soft-deleted Azure Cognitive Services accounts.
'''
return _call_az("az cognitiveservices account list-deleted", locals())
def purge(location, name, resource_group):
'''
Purge a soft-deleted Azure Cognitive Services account.
Required Parameters:
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- name -- cognitive service account name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az cognitiveservices account purge", locals())
def recover(location, name, resource_group):
'''
Recover a soft-deleted Azure Cognitive Services account.
Required Parameters:
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- name -- cognitive service account name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az cognitiveservices account recover", locals())
def list_skus(kind=None, location=None, name=None, resource_group=None):
'''
Manage Azure Cognitive Services accounts.
Optional Parameters:
- kind -- the API name of cognitive services account
- location -- Location. Values from: `az account list-locations`. You can configure the default location using `az configure --defaults location=<location>`.
- name -- cognitive service account name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az cognitiveservices account list-skus", locals())
def list_usage(name, resource_group):
'''
Required Parameters:
- name -- cognitive service account name
- resource_group -- Name of resource group. You can configure the default group using `az configure --defaults group=<name>`
'''
return _call_az("az cognitiveservices account list-usage", locals())
def list_kinds():
return _call_az("az cognitiveservices account list-kinds", locals())
| 2.734375 | 3 |
upwork/notification.py | AAYBS/jn-upwork | 8 | 12760139 | import os
from datetime import datetime, timedelta
import configparser
import smtplib
import upwork
class Job(object):
def __init__(self, job_info):
self.job_info = job_info
def __str__(self):
job_info = "New job: %s \nType: %s" %(self.job_info['title'],
self.job_info['job_type'])
job_info += "\nBudget : %s $ \nCreated on: %s " % (
self.job_info['budget'], self.job_info['date_created'])
job_info += "Informations: %s \nLink: %s" % (self.job_info['category2'],
self.job_info['url'])
return job_info
class Config(object):
def __init__(self, source_path='', content=''):
config = configparser.ConfigParser()
if len(source_path) > 0:
script_dir = os.path.dirname(__file__)
abs_file_path = os.path.join(script_dir, source_path)
config.read_file(open(abs_file_path))
elif len(content) > 0:
config.read_string(content)
else:
raise Exception("Specify a configuration file path, or content.")
self.config = config
class UpworkClient(object):
def __init__(self, public_key, secret_key):
if (len(public_key) > 0) & (len(secret_key) > 0):
self.public_key = public_key
self.secret_key = secret_key
else:
raise Exception("No Authentication key\n" +\
"Go to https://developers.upwork.com/?lang=python#getting-started")
def __client(self):
'''
Authenticate to Upwork API
:return: upwork client obj
'''
try:
upwork_client = upwork.Client(
self.public_key, self.secret_key)
verifier = upwork_client.auth.get_authorize_url()
oauth_access_token, oauth_access_token_secret = \
upwork_client.auth.get_access_token(verifier)
client = upwork.Client(
self.public_key, self.secret_key,
oauth_access_token=oauth_access_token,
oauth_access_token_secret=oauth_access_token_secret)
except Exception as e:
print("Error: unable to authenticate ", e)
raise
return client
@classmethod
def __send_mail(self, message):
config = configparser.ConfigParser()
config.read("configuration.ini")
sender = config['email']['mail_from']
receivers = config['email']['mail_to']
try:
smtpObj = smtplib.SMTP('localhost')
smtpObj.sendmail(sender, receivers, message)
print("Successfully sent email")
except Exception as e:
print("Error: unable to send email " + e.message)
def search_jobs(self, job_query):
'''
Call search job with specific search job query
:param job_query:
:return: list of jobs
'''
try:
upwork = self.__client()
upwork_jobs = \
upwork.provider_v2.search_jobs(job_query, page_size=20)
except Exception as e:
print("Error: unable to connect {e!s}")
raise
jobs = []
current_time = datetime.now() - timedelta(hours=1)
# Get only few parameters instead of a whole payload
for job in upwork_jobs:
created = datetime.strptime(job['date_created'],
"%Y-%m-%dT%H:%M:%S+0000")
# check if job is posted in the last hour if not skip it
if created < current_time:
jobs.append(Job(job))
self.__send_mail(jobs)
return jobs
if __name__ == "__main__":
config = Config("configuration.ini")
# Define local parameters
api_key = config['upwork']['api_key']
api_secret = config['upwork']['api_key']
job_skill = config['upwork']['job_skill']
upwork = UpworkClient(api_key, api_secret)
job_query = dict(
skills=[job_skill],
budget='[100 TO 100000]',
duration=['week', 'month', 'ongoing']
)
upwork.search_jobs(job_query)
| 2.65625 | 3 |
src/dotlock/graph.py | alexbecker/dotlock | 43 | 12760140 | <filename>src/dotlock/graph.py<gh_stars>10-100
from typing import Iterable
from dotlock.resolve import Requirement
def graph_resolution(requirements: Iterable[Requirement], offset=0):
for requirement in requirements:
for candidate in requirement.candidates.values():
if candidate.live:
print(offset * ' ' + f'{requirement.info}: {candidate.info.version} [{candidate.info.package_type.name}]')
graph_resolution(candidate.requirements.values(), offset + 2)
| 2.234375 | 2 |
1-dars.py | thejorabek/python | 1 | 12760141 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 1 09:26:03 2022
@author: thejorabek
"""
'''son=10
print(son,type(son))
son=3.14
print(son,type(son))
son='<NAME>'
print(son,type(son))
son=True
print(son,type(son))
son=int()
print(son,type(son))
print("Assalom",123,3.14,True,sep='salom')
print(1,2,3,4,5,6,7,8,9,10,sep='+',end='')
print(' =',1+2+3+4+5+6+7+8+9+10)
print(float(son))
print(str(son))
print(bool(son))
son=1
print(bool(son))'''
'''a=int(input('a='))
b=int(input('b='))
c=a
a=b
b=c
print("a =",a)
print("b =",b)'''
'''a=int(input('a='))
b=int(input('b='))
c=int(input('c='))
d=b
b=a
a=c
c=d
print('a=',a)
print('b=',b)
print('c=',c)'''
'''a=int(input('a='))
b=int(input('b='))
c=int(input('c='))
d=a
a=b
b=c
c=d
print('a=',a)
print('b=',b)
print('c=',c)'''
'''son=int(input("Sonni kiriting: "))
son+=1
print("son=",son,type(son))
fson=float(input("Haqiqiy sonni kiriting: "))
print("fson=",fson,type(fson))
bson=bool(input())
print("bson=",bson)
text=input("Textni kiriting: ")
print(text,type(text))
text='<NAME>'
print(text)
text="<NAME>"
print(text)
text="""Salom "<NAME>" bolalar
'O'rdak' Hello Foundation
Ch\tao \nBRO"""
print(text)'''
'''text="Salom"
print(len(text))
print(text[0],text[1],text[2],text[3],text[4])
print(text[-1],text[-2],text[-3],text[-4],text[-5],sep="")
print(*text)
text="<NAME>"
print(*text[0:len(text):2],sep=',') # 0-indeksdan oxirigacha 2 ta qadamda sakrash
print(*text[::-1]) # stringni teskari chiqarish
print(text[:5]) # boshidan 5-indeksgacha chiqarish
print(text[6:]) # 6-indeksdan oxirigacha chiqarish
# [start : end : step]
# start - boshlanish indeksi, end - tugash indeksi, step - oshirish yoki kamayish qadami'''
| 3.609375 | 4 |
NVIDIA/benchmarks/transformer/implementations/pytorch/fairseq/data/data_utils.py | goswamig/training_results_v0.7 | 48 | 12760142 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import contextlib
import itertools
import math
import os
import statistics
import time
import numpy as np
import torch
from . import FairseqDataset
import fairseq.data.batch_C_v0p5
import fairseq.data.batch_C_v0p5_better
import fairseq.data.batch_C_v0p6
import sys
def infer_language_pair(path):
"""Infer language pair from filename: <split>.<lang1>-<lang2>.(...).idx"""
src, dst = None, None
print('Infer language pair from filename...')
for filename in os.listdir(path):
print('filename:', filename)
parts = filename.split('.')
if len(parts) >= 3 and len(parts[1].split('-')) == 2:
return parts[1].split('-')
return src, dst
class ShardedIterator(object):
"""A sharded wrapper around an iterable (padded to length)."""
def __init__(self, iterable, num_shards, shard_id, fill_value=None):
if shard_id < 0 or shard_id >= num_shards:
raise ValueError('shard_id must be between 0 and num_shards')
self._sharded_len = len(iterable) // num_shards
if len(iterable) % num_shards > 0:
self._sharded_len += 1
self.itr = itertools.zip_longest(
range(self._sharded_len),
itertools.islice(iterable, shard_id, len(iterable), num_shards),
fillvalue=fill_value,
)
def __len__(self):
return self._sharded_len
def __iter__(self):
return self
def __next__(self):
return next(self.itr)[1]
class CountingIterator(object):
"""Wrapper around an iterable that maintains the iteration count."""
def __init__(self, iterable):
self.iterable = iterable
self.count = 0
self.itr = iter(self)
def __len__(self):
return len(self.iterable)
def __iter__(self):
for x in self.iterable:
self.count += 1
yield x
def __next__(self):
return next(self.itr)
def has_next(self):
return self.count < len(self)
def skip(self, num_to_skip):
next(itertools.islice(self.itr, num_to_skip, num_to_skip), None)
return self
def collate_tokens(values, pad_idx, eos_idx, left_pad, move_eos_to_beginning=False, n_seq_per_batch_multiple=8, seq_len_multiple=1):
""" Convert a list of 1d tensors into a padded 2d tensor.
Args:
values: Python list where each element is a PyT 1d tensor
pad_idx: The index into the translation dictionary for the pad token (typically refer to 'dict.pad()')
eos_idx: The index into the translation dictionary for the eos token (typically refer to 'dict.eos()')
left_pad: Bool, left- or right-padding (true: left, false: right)
move_eos_to_beginning: Reverse order of sequence of tokens (true: reverse, false:leave in original order)
n_seq_per_batch_multiple: The number of sequences per batch to round down to
seq_len_multiple: The number of tokens per sequence to round up to
"""
size_of_seq_dim = max(v.size(0) for v in values) # Unpadded size
n_seq_in_batch = len(values)
if n_seq_per_batch_multiple % seq_len_multiple == 0:
n_seq_multiple = n_seq_per_batch_multiple / seq_len_multiple
else:
n_seq_multiple = n_seq_per_batch_multiple
if n_seq_in_batch < n_seq_multiple or n_seq_in_batch % n_seq_multiple > 0:
seq_len_multiple = n_seq_per_batch_multiple
size_of_seq_dim = (size_of_seq_dim + seq_len_multiple - 1) // seq_len_multiple * seq_len_multiple # Padded seq len, rounded up to next multiple
padded_2d_tensor = values[0].new(len(values), size_of_seq_dim).fill_(pad_idx)
def copy_tensor(src, dst):
assert dst.numel() == src.numel()
if move_eos_to_beginning:
assert src[-1] == eos_idx
dst[0] = eos_idx
dst[1:] = src[:-1]
else:
dst.copy_(src)
if left_pad:
for idx, val in enumerate(values):
copy_tensor(val, padded_2d_tensor[idx][size_of_seq_dim - len(val):])
else:
for idx, val in enumerate(values):
copy_tensor(val, padded_2d_tensor[idx][:len(val)])
return padded_2d_tensor
class EpochBatchIterator(object):
"""Iterate over a FairseqDataset and yield batches bucketed by size.
Batches may contain sequences of different lengths. This iterator can be
reused across multiple epochs with the next_epoch_itr() method.
Args:
dataset: a FairseqDataset
max_tokens: max number of tokens in each batch
max_sentences: max number of sentences in each batch
max_positions: max sentence length supported by the model
ignore_invalid_inputs: don't raise Exception for sentences that are too long
required_batch_size_multiple: require batch size to be a multiple of N
seeds: seeds for random number generator for reproducibility (1 seed for
each training epoch)
num_shards: shard the data iterator into N shards
shard_id: which shard of the data iterator to return
"""
def __init__(
self, dataset, dataloader_num_workers=1, dataloader_pin_memory=False, max_tokens=None, max_sentences=None, max_positions=None,
ignore_invalid_inputs=False, required_batch_size_multiple=1, seeds=[1],
num_shards=1, shard_id=0, epoch=0, bucket_growth_factor=1.1, seq_len_multiple=1,
batching_scheme='v0p5', batch_multiple_strategy='multiple_of_sequences',
):
assert isinstance(dataset, FairseqDataset)
self.dataset = dataset
self.max_tokens = max_tokens if max_tokens is not None else float('Inf')
self.max_sentences = max_sentences if max_sentences is not None else float('Inf')
self.dataloader_num_workers = dataloader_num_workers
self.dataloader_pin_memory = dataloader_pin_memory
assert len(max_positions) == 2, "Max positions contains source and target lengths!"
max_src_pos,max_tgt_pos = max_positions
self.max_positions = max_positions
self.max_positions_num = min(max_src_pos, max_tgt_pos)
self.ignore_invalid_inputs = ignore_invalid_inputs
self.bsz_mult = required_batch_size_multiple
self.seeds = seeds
self.num_shards = num_shards
self.shard_id = shard_id
self.seq_len_multiple = seq_len_multiple
self.batching_scheme = batching_scheme
self.batch_multiple_strategy = batch_multiple_strategy
self.epoch = epoch
self._cur_epoch_itr = None
self._next_epoch_itr = None
with numpy_seed(self.seeds[0]):
import time
start = time.time()
indices = self.dataset.ordered_indices(self.seeds[self.epoch])
#need integer, rather than float('Inf') values
max_sentences = max_sentences if max_sentences is not None else sys.maxsize
max_tokens = max_tokens if max_tokens is not None else sys.maxsize
if self.batching_scheme == 'v0p5' :
batches = fairseq.data.batch_C_v0p5.make_batches_v0p5(self.dataset.src_sizes, self.dataset.tgt_sizes, indices, max_tokens, max_sentences, self.bsz_mult, self.max_positions_num)
elif self.batching_scheme == 'v0p5_better' :
print('self.dataset.src_sizes', self.dataset.src_sizes.size)
print('self.dataset.tgt_sizes', self.dataset.tgt_sizes.size)
batches = fairseq.data.batch_C_v0p5_better.make_batches_v0p5_better(self.dataset.src_sizes, self.dataset.tgt_sizes, indices, max_tokens, max_sentences, self.max_positions_num, self.bsz_mult, self.seq_len_multiple)
elif self.batching_scheme == 'v0p6':
batch_strategy = 2
if self.batch_multiple_strategy == 'mult_of_sequences':
batch_strategy = 0
elif self.batch_multiple_strategy == 'pad_sequence_to_mult':
batch_strategy = 1
elif self.batch_multiple_strategy == 'dynamic':
batch_strategy = 2
else:
assert False, "Unknown batch multiple strategy!"
bucket_specify_min_boundary = 8
use_efficient_last_pack = False
#batch_strategy = 2
batches = fairseq.data.batch_C_v0p6.make_batches_v0p6(self.dataset.src_sizes,
self.dataset.tgt_sizes,
indices,
max_tokens,
max_sentences,
self.bsz_mult,
self.max_positions_num,
bucket_specify_min_boundary,
bucket_growth_factor,
batch_strategy,
use_efficient_last_pack)
else : # reference
def roundup(x, multiple):
return (x + multiple - 1) // multiple * multiple
def rounddown(x, multiple):
return x // multiple * multiple
def create_bucket_bounds_lists(max_allowable_seq_length, bucket_specify_min_boundary, bucket_specify_growth_scale):
bucket_boundaries = []
x = bucket_specify_min_boundary
while x < max_allowable_seq_length:
bucket_boundaries.append(x)
x = max(x + 1, int(x * bucket_specify_growth_scale))
if use_efficient_last_pack:
buckets_min_list = [0] + [i+1 for i in bucket_boundaries]
buckets_max_list = bucket_boundaries + [max_allowable_seq_length]
else:
buckets_min_list = [0] + bucket_boundaries
buckets_max_list = bucket_boundaries + [max_allowable_seq_length + 1]
return buckets_min_list, buckets_max_list
def create_seq_to_bucket_id_list_and_n_seq_per_batch(n_tok_per_seq, max_allowable_seq_length, max_sentences, pad_seq_per_batch_to_multiple_of, pad_tok_per_seq_to_multiple_of, bucket_specify_min_boundary, bucket_specify_growth_scale):
bucket_interval_min, bucket_interval_max = create_bucket_bounds_lists(max_allowable_seq_length, bucket_specify_min_boundary, bucket_specify_growth_scale)
if do_seq_len_padding_to_multiple:
n_seq_per_batch = [max_tokens // roundup(x, pad_tok_per_seq_to_multiple_of) for x in bucket_interval_max]
elif do_batch_size_rounding_down_to_multiple:
n_seq_per_batch = [rounddown(max_tokens // x, pad_seq_per_batch_to_multiple_of) for x in bucket_interval_max]
elif do_dynamic_batch_size_choice:
n_seq_per_batch_based_on_seq_len = [max_tokens // roundup(x, pad_tok_per_seq_to_multiple_of) for x in bucket_interval_max]
n_seq_per_batch_based_on_n_seq = [rounddown(max_tokens // x, pad_seq_per_batch_to_multiple_of) for x in bucket_interval_max]
n_seq_per_batch = [max(a,b) for a, b in zip(n_seq_per_batch_based_on_seq_len, n_seq_per_batch_based_on_n_seq)]
else:
n_seq_per_batch = [max_tokens // x for x in bucket_interval_max]
n_seq_per_batch = [min(max_sentences, i) if max_sentences is not None else i for i in n_seq_per_batch]
for a, b, c in zip(bucket_interval_min, bucket_interval_max, n_seq_per_batch):
print('bucket:', a, b, c)
token_length_2_bucket_id = {}
for x in range(max_allowable_seq_length+1):
for bucket_id, payload in enumerate(zip(bucket_interval_min, bucket_interval_max)):
bmin, bmax = payload
if (bmin <= x and x <= bmax and use_efficient_last_pack) or (bmin <= x and x < bmax):
token_length_2_bucket_id[x] = bucket_id
break
return ([token_length_2_bucket_id[x] if x <= max_allowable_seq_length else -1 for x in n_tok_per_seq], n_seq_per_batch, len(bucket_interval_min))
# Make adjustments to tuneable parameters here
pad_seq_per_batch_to_multiple_of = self.bsz_mult
pad_tok_per_seq_to_multiple_of = self.bsz_mult
max_allowable_seq_length = self.max_positions_num
bucket_specify_min_boundary = 8
bucket_specify_growth_scale = bucket_growth_factor ##1.035
do_seq_len_padding_to_multiple = False
do_batch_size_rounding_down_to_multiple = False
do_dynamic_batch_size_choice = True
use_efficient_last_pack = False
batches = []
src_token_counts = []
dst_token_counts = []
seq_counts = []
padded_token_counts = []
batch_max_padded_seq_len = 0
batch_seq_count = 0
batches.append([])
src_batch_token_count = 0
dst_batch_token_count = 0
curr_batch_padded_token_count = 0
batch_n_seq = 0
bucket_id = 0
longest_in_batch = []
print('### max_tokens:', max_tokens)
print('### max_sentences:', max_sentences)
pairwise_max_seq_len = [max(a,b) for a, b in zip(dataset.src_sizes, dataset.tgt_sizes)]
bucket_ids, n_seq_per_batch, n_buckets = create_seq_to_bucket_id_list_and_n_seq_per_batch(pairwise_max_seq_len, max_allowable_seq_length, max_sentences, pad_seq_per_batch_to_multiple_of, pad_tok_per_seq_to_multiple_of, bucket_specify_min_boundary, bucket_specify_growth_scale)
buckets = []
for i in range(n_buckets):
buckets.append([])
n_rejected_sequences = 0
for idx, bidx in enumerate(bucket_ids):
if bidx >= 0:
buckets[bidx].append(idx)
else:
n_rejected_sequences += 1
# Remove empty buckets (causes blow-up in eval code).
buckets = [i for i in buckets if len(i) > 0]
print(n_rejected_sequences, 'were omitted due to containing over 256 tokens.')
batch_seq_count = 0
#count = 0
seq_len_tracker = 0
for bucket, nspb in zip(buckets, n_seq_per_batch):
for item in bucket:
if batch_n_seq < nspb:
batches[-1].append(item)
src_batch_token_count += dataset.src_sizes[item]
dst_batch_token_count += dataset.tgt_sizes[item]
seq_len_tracker = max(seq_len_tracker, dst_batch_token_count)
batch_n_seq += 1
else:
batches.append([item])
src_token_counts.append(src_batch_token_count)
dst_token_counts.append(dst_batch_token_count)
src_batch_token_count = dataset.src_sizes[item]
dst_batch_token_count = dataset.tgt_sizes[item]
seq_counts.append(batch_n_seq)
batch_n_seq = 1
batches.append([])
batch_n_seq = 0
seq_counts.append(batch_n_seq)
src_batch_token_count = 0
dst_batch_token_count = 0
src_token_counts.append(src_batch_token_count)
dst_token_counts.append(dst_batch_token_count)
seq_cnt2 = []
for batch in batches:
seq_len_tracker = 0
nseqbucket = 0
for item in batch:
a = dataset.src_sizes[item]
b = dataset.tgt_sizes[item]
seq_len_tracker = max(seq_len_tracker, max(a, b))
nseqbucket += 1
longest_in_batch.append(seq_len_tracker)
seq_cnt2.append(nseqbucket)
# In the unlucky case, remove a newly created but empty last batch
if not batches[-1]:
del batches[-1]
del seq_counts[-1]
del src_token_counts[-1]
del dst_token_counts[-1]
tmp_batches = batches
batches = []
for b in tmp_batches:
if b:
batches.append(b)
#padded_token_counts = src_token_counts
#padded_token_counts = [x*0 for x in src_token_counts] # Setting to zero until this is actually implemented
#print('split dataset length:', len(dataset.src))
#print('mean src tokens per batch =', statistics.mean(src_token_counts), statistics.mean(padded_token_counts))
#print('median src tokens per batch =', statistics.median(src_token_counts), statistics.median(padded_token_counts))
#print('stdev src tokens per batch =', statistics.stdev(src_token_counts), statistics.stdev(padded_token_counts))
#print('min src tokens per batch =', min(src_token_counts), min(padded_token_counts))
#print('max src tokens per batch =', max(src_token_counts), max(padded_token_counts))
#print('mean tgt tokens per batch =', statistics.mean(dst_token_counts), statistics.mean(padded_token_counts))
#print('median tgt tokens per batch =', statistics.median(dst_token_counts), statistics.mean(padded_token_counts))
#print('stdev tgt tokens per batch =', statistics.stdev(dst_token_counts), statistics.stdev(padded_token_counts))
#print('min tgt tokens per batch =', min(dst_token_counts), min(padded_token_counts))
#print('max tgt tokens per batch =', max(dst_token_counts), max(padded_token_counts))
#print('mean seq per batch =', statistics.mean(seq_counts), statistics.mean(padded_token_counts))
#print('median seq per batch =', statistics.median(seq_counts), statistics.median(padded_token_counts))
#print('stdev seq per batch =', statistics.stdev(seq_counts), statistics.stdev(padded_token_counts))
#print('min seq per batch =', min(seq_counts), min(padded_token_counts))
#print('max seq per batch =', max(seq_counts), max(padded_token_counts))
#print('pad inc: mean tgt tokens per batch =', statistics.mean(np.array(seq_cnt2) * np.array(longest_in_batch)), longest_in_batch[:3], seq_cnt2[:3])
#print('pad inc: median tgt tokens per batch =', statistics.median(np.array(seq_cnt2) * np.array(longest_in_batch)), longest_in_batch[:3], seq_cnt2[:3])
self.frozen_batches = tuple(batches)
# self.frozen_batches = tuple(self._batch_generator())
print("generated %d batches in %fs" % (len(batches), time.time() - start))
def __len__(self):
return len(self.frozen_batches)
def next_epoch_itr(self, shuffle=True):
"""Shuffle batches and return a new iterator over the dataset."""
if self._next_epoch_itr is not None:
self._cur_epoch_itr = self._next_epoch_itr
self._next_epoch_itr = None
else:
self.epoch += 1
self._cur_epoch_itr = self._get_iterator_for_epoch(self.epoch, shuffle)
return self._cur_epoch_itr
def end_of_epoch(self):
return not self._cur_epoch_itr.has_next()
@property
def iterations_in_epoch(self):
if self._cur_epoch_itr is not None:
return self._cur_epoch_itr.count
elif self._next_epoch_itr is not None:
return self._next_epoch_itr.count
return 0
def state_dict(self):
return {
'epoch': self.epoch,
'iterations_in_epoch': self.iterations_in_epoch,
}
def load_state_dict(self, state_dict):
self.epoch = state_dict['epoch']
itr_pos = state_dict.get('iterations_in_epoch', 0)
if itr_pos > 0:
# fast-forward epoch iterator
itr = self._get_iterator_for_epoch(self.epoch, state_dict.get('shuffle', True))
if itr_pos < len(itr):
self._next_epoch_itr = itr.skip(itr_pos)
def _get_iterator_for_epoch(self, epoch, shuffle):
if shuffle:
# set seed based on the seed and epoch number so that we get
# reproducible results when resuming from checkpoints
with numpy_seed(self.seeds[epoch]):
batches = list(self.frozen_batches) # copy
np.random.shuffle(batches)
else:
batches = self.frozen_batches
return CountingIterator(torch.utils.data.DataLoader(
self.dataset,
collate_fn=self.dataset.collater,
num_workers=self.dataloader_num_workers,
pin_memory=self.dataloader_pin_memory,
batch_sampler=ShardedIterator(batches, self.num_shards, self.shard_id, fill_value=[]),
))
def _batch_generator(self):
batch = []
def is_batch_full(num_tokens):
if len(batch) == 0:
return False
if len(batch) == self.max_sentences:
return True
if num_tokens > self.max_tokens:
return True
return False
sample_len = 0
sample_lens = []
ignored = []
for idx in self.dataset.ordered_indices(self.seeds[self.epoch]):
if not self.dataset.valid_size(idx, self.max_positions):
if self.ignore_invalid_inputs:
ignored.append(idx)
continue
raise Exception((
'Size of sample #{} is invalid, max_positions={}, skip this example with --skip-invalid-size-inputs-valid-test'
).format(idx, self.max_positions))
sample_lens.append(self.dataset.num_tokens(idx))
sample_len = max(sample_len, sample_lens[-1])
num_tokens = (len(batch) + 1) * sample_len
if is_batch_full(num_tokens):
mod_len = max(self.bsz_mult * (len(batch) // self.bsz_mult), len(batch) % self.bsz_mult,)
yield batch[:mod_len]
batch = batch[mod_len:]
sample_lens = sample_lens[mod_len:]
sample_len = max(sample_lens) if len(sample_lens) > 0 else 0
batch.append(idx)
if len(batch) > 0:
yield batch
if len(ignored) > 0:
print((
'| WARNING: {} samples have invalid sizes and will be skipped, max_positions={}, first few sample ids={}'
).format(len(ignored), self.max_positions, ignored[:10]))
@contextlib.contextmanager
def numpy_seed(seed):
"""Context manager which seeds the NumPy PRNG with the specified seed and restores the state afterward"""
if seed is None:
yield
return
state = np.random.get_state()
np.random.seed(seed)
try:
yield
finally:
np.random.set_state(state)
| 2.015625 | 2 |
src/mcedit2/panels/pending_imports.py | elcarrion06/mcedit2 | 673 | 12760143 | <reponame>elcarrion06/mcedit2<filename>src/mcedit2/panels/pending_imports.py<gh_stars>100-1000
"""
pending_imports
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from PySide import QtGui
import logging
log = logging.getLogger(__name__)
class PendingImportsWidget(QtGui.QWidget):
def __init__(self):
super(PendingImportsWidget, self).__init__()
self.importsListWidget = QtGui.QListView()
self.importsListModel = QtGui.QStandardItemModel()
self.importsListWidget.setModel(self.importsListModel)
self.importsListWidget.clicked.connect(self.listClicked)
self.importsListWidget.doubleClicked.connect(self.listDoubleClicked)
| 1.921875 | 2 |
code/evaluate.py | SunYanCN/squad-transformer | 43 | 12760144 | """Official evaluation script for v1.1 of the SQuAD dataset.
From the SQuAD website: https://rajpurkar.github.io/SQuAD-explorer/
"""
from collections import Counter
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
return max(scores_for_ground_truths)
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
def unofficial_eval(dataset, predictions):
"""Adapted from evaluate function above. Used during training
to produce local eval scores with the same formulae used for official eval.
"""
f1 = exact_match = total = 0
for example_id, prediction in predictions.items():
total += 1
ground_truths = dataset[example_id]['a']
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
def get_pred_text(eval_dict, example_ids, pred_starts, pred_ends, probs, use_official_ids=False):
"""Get text of predicted answers for a number of predicted starts, ends, and the corresponding example_ids.
If use_official_ids is True, the returned dictionary maps from official UUIDs to predicted (answer text, prob) pairs.
Otherwise, maps from simple numeric IDs to (answer text, prob) pairs.
"""
pred_text = {}
for example_id, pred_start, pred_end, prob in zip(example_ids, pred_starts, pred_ends, probs):
context = eval_dict[str(example_id)]['c']
bounds = eval_dict[str(example_id)]['bounds']
start_idx = bounds[pred_start][0]
end_idx = bounds[pred_end][1]
if use_official_ids:
official_id = eval_dict[str(example_id)]['id']
pred_text[official_id] = (context[start_idx: end_idx], prob)
else:
pred_text[str(example_id)] = (context[start_idx: end_idx], prob)
return pred_text
if __name__ == '__main__':
expected_version = '1.1'
parser = argparse.ArgumentParser(
description='Evaluation for SQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (dataset_json['version'] != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
print(json.dumps(evaluate(dataset, predictions)))
| 2.75 | 3 |
test/attic/test_bag.py | strawlab/flyvr | 3 | 12760145 | #!/usr/bin/env python
import os
import roslib; roslib.load_manifest('freemovr_engine')
import rosbag
import sensor_msgs.msg
import geometry_msgs.msg
import pymvg
import numpy as np
def test_bag():
for i in range(1000):
bagout = rosbag.Bag('/tmp/testbag.bag', 'w')
topic = '/tf'
extrinsics = geometry_msgs.msg.Transform()
bagout.write(topic, extrinsics)
topic = '/camera_info'
intrinsics = sensor_msgs.msg.CameraInfo()
intrinsics.distortion_model = ','*i
intrinsics.K = list(np.random.rand(3,3).flatten())
bagout.write(topic, intrinsics)
bagout.close()
c = pymvg.CameraModel.load_camera_simple()
c.save_to_bagfile('/tmp/testbag.bag')
fname = '/home/stowers/.ros/camera_info/Basler_21220788.yaml'
assert os.path.exists(fname)
c = pymvg.CameraModel.load_camera_from_file(fname)
c.get_intrinsics_as_msg()
c.save_to_bagfile('/tmp/testbag.bag')
c = pymvg.CameraModel.load_camera_from_file('/tmp/testbag.bag')
c.get_intrinsics_as_msg()
| 2.078125 | 2 |
lhotse/recipes/ali_meeting.py | glynpu/lhotse | 0 | 12760146 | <reponame>glynpu/lhotse
"""
The AliMeeting Mandarin corpus, originally designed for ICASSP 2022 Multi-channel
Multi-party Meeting Transcription Challenge (M2MeT), is recorded from real meetings,
including far-field speech collected by an 8-channel microphone array as well as
near-field speech collected by each participants' headset microphone. The dataset
contains 118.75 hours of speech data in total, divided into 104.75 hours for training
(Train), 4 hours for evaluation (Eval) and 10 hours as test set (Test), according to
M2MeT challenge arrangement. Specifically, the Train, Eval and Test sets contain 212,
8 and 20 meeting sessions respectively, and each session consists of a 15 to 30-minute
discussion by 2-4 participants. AliMeeting covers a variety of aspects in real-world
meetings, including diverse meeting rooms, various number of meeting participants and
different speaker overlap ratios. High-quality transcriptions are provided as well.
The dataset can be used for tasks in meeting rich transcriptions, including speaker
diarization and multi-speaker automatic speech recognition.
More details and download link: https://openslr.org/119/
"""
import logging
import tarfile
from collections import defaultdict
from pathlib import Path
from typing import Dict, Optional, Union
from tqdm import tqdm
from lhotse import validate_recordings_and_supervisions, fix_manifests
from lhotse.audio import AudioSource, Recording, RecordingSet
from lhotse.supervision import SupervisionSegment, SupervisionSet
from lhotse.utils import Pathlike, is_module_available, urlretrieve_progress
def download_ali_meeting(
target_dir: Pathlike = ".",
force_download: Optional[bool] = False,
base_url: Optional[
str
] = "https://speech-lab-share-data.oss-cn-shanghai.aliyuncs.com/",
) -> Path:
"""
Downdload and untar the dataset
:param target_dir: Pathlike, the path of the dir to storage the dataset.
:param force_download: Bool, if True, download the tars no matter if the tars exist.
:param base_url: str, the url of the OpenSLR resources.
:return: the path to downloaded and extracted directory with data.
"""
url = f"{base_url}/AliMeeting/openlr/"
target_dir = Path(target_dir)
target_dir.mkdir(parents=True, exist_ok=True)
dataset_tar_names = [
"Train_Ali_far.tar.gz",
"Train_Ali_near.tar.gz",
"Eval_Ali.tar.gz",
"Test_Ali.tar.gz",
]
for tar_name in dataset_tar_names:
tar_path = target_dir / tar_name
if force_download or not tar_path.is_file():
urlretrieve_progress(
f"{url}/{tar_name}", filename=tar_path, desc=f"Downloading {tar_name}"
)
with tarfile.open(tar_path) as tar:
tar.extractall(path=target_dir)
return target_dir
def prepare_ali_meeting(
corpus_dir: Pathlike,
output_dir: Optional[Pathlike] = None,
mic: Optional[str] = "far",
) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]:
"""
Returns the manifests which consist of the Recordings and Supervisions
:param corpus_dir: Pathlike, the path of the data dir.
:param output_dir: Pathlike, the path where to write the manifests.
:param mic: str, "near" or "far", specifies whether to prepare the near-field or far-field data.
:return: a Dict whose key is the dataset part, and the value is Dicts with the keys 'recordings' and 'supervisions'.
"""
if not is_module_available("textgrid"):
raise ValueError(
"To prepare AliMeeting data, please 'pip install textgrid' first."
)
import textgrid
corpus_dir = Path(corpus_dir)
assert corpus_dir.is_dir(), f"No such directory: {corpus_dir}"
manifests = defaultdict(dict)
if output_dir is not None:
output_dir = Path(output_dir)
output_dir.mkdir(parents=True, exist_ok=True)
for part in ["Train", "Eval", "Test"]:
recordings = []
supervisions = []
# Eval and Test may further be inside another folder (since the "far" and "near" are grouped together)
if part == "Eval" or part == "Test":
corpus_dir = (
corpus_dir / f"{part}_Ali"
if (corpus_dir / f"{part}_Ali").is_dir()
else corpus_dir
)
wav_paths = corpus_dir / f"{part}_Ali_{mic}" / "audio_dir"
text_paths = corpus_dir / f"{part}_Ali_{mic}" / "textgrid_dir"
# For 'near' setting:
# - wav files have names like R0003_M0046_F_SPK0093.wav
# - textgrid files have names like R0003_M0046_F_SPK0093.TextGrid
# Speaker ID information is present in the file name itself
# For 'far' setting:
# - wav files have names like R0015_M0151_MS002.wav
# - textgrid files have names like R0015_M015.TextGrid
# Speaker ID information is present inside the TextGrid file
for text_path in tqdm(
list(text_paths.rglob("*.TextGrid")), desc=f"Preparing {part}"
):
session_id = text_path.stem
if mic == "near":
_, _, gender, spk_id = session_id.split("_")
spk_id = spk_id[3:] # SPK1953 -> 1953
try:
tg = textgrid.TextGrid.fromFile(str(text_path))
except ValueError:
logging.warning(
f"{session_id} has annotation issues. Skipping this recording."
)
continue
wav_path = list(wav_paths.rglob(f"{session_id}*.wav"))[0]
recording = Recording.from_file(wav_path, recording_id=session_id)
recordings.append(recording)
for tier in tg.tiers:
if mic == "far":
parts = tier.name.split("_")
if len(parts) == 4:
_, _, gender, spk_id = parts
elif len(parts) == 2:
gender, spk_id = parts
spk_id = spk_id[3:] # SPK1953 -> 1953
for i, interval in enumerate(tier.intervals):
if interval.mark != "":
start = interval.minTime
end = interval.maxTime
text = interval.mark
segment = SupervisionSegment(
id=f"{session_id}-{spk_id}-{i}",
recording_id=recording.id,
start=start,
duration=round(end - start, 4),
channel=0,
language="Chinese",
speaker=spk_id,
gender=gender,
text=text.strip(),
)
supervisions.append(segment)
recording_set, supervision_set = fix_manifests(
RecordingSet.from_recordings(recordings),
SupervisionSet.from_segments(supervisions),
)
# Fix manifests
validate_recordings_and_supervisions(recording_set, supervision_set)
if output_dir is not None:
supervision_set.to_file(output_dir / f"supervisions_{part.lower()}.jsonl")
recording_set.to_file(output_dir / f"recordings_{part.lower()}.jsonl")
manifests[part.lower()] = {
"recordings": recording_set,
"supervisions": supervision_set,
}
return manifests
| 1.773438 | 2 |
senk_poly_tools/senk_poly_tools.py | dualon/senk_poly_tools | 0 | 12760147 | <reponame>dualon/senk_poly_tools
""" Polygraph Tools, Semmelweis Egyetem Neurologiai Klinika
The code is an experimental parser and analyzer for the EDF files that
are results of various polygraph recordings.
author: <NAME>. 'dualon' Gyurko gyurko dot david at e-arc dot hu
The EDF IO was inspired by Boris Reuderink's EEGTools:
https://github.com/breuderink/eegtools/tree/master/eegtools
"""
import sys
import os
import argparse
import pprint
import datetime
import re
import numpy as np
from math import floor
import csv
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from edf_container import EdfContainer # senk_poly_tools.
class SenkPolyTools(object):
def __init__(self, sampling_freq = 0.5, export_raw = False, annot_label = 'EDF Annotations'):
""" Sampling frequency (in Hz) determines the sampling rate of the processed data. """
print("SenkPolyTools initiating...", end = " ")
self.results_base_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..',
'results'
))
self.annot_label = annot_label
self.export_nr_raw = (True if export_raw else False)
self.sampling_freq = self.filterSamplingFreq(sampling_freq)
print("OK")
def createEdfContainer(self, file_name):
""" Open an EDF(+) file and return an empty EdfContainer object with the file handler. """
print("Creating EDF(+) container... ", end="")
abs_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
'..',
'data',
file_name
))
if not os.path.isfile(abs_path):
raise FileNotFoundError("File '{}' not found!".format(abs_path))
e = EdfContainer(abs_path)
e.file_obj = open(abs_path, 'rb')
print("OK")
return e
def loadEdf(self, file_name):
e = self.createEdfContainer(file_name)
e = self.loadEdfHeaders(e)
e = self.loadEdfData(e)
return e
def loadEdfHeaders(self, e):
""" Load the headers of an EDF(+) file into an EdfContainer.
Existing headers are overwritten.
param e: (empty) EdfContainer object,
see SenkPolyTools.createEdfContainer()
returns EdfContainer with loaded headers
"""
print("Loading EDF(+) headers... ", end = "")
if not isinstance(e, EdfContainer):
raise Exception("The provided container is not an EdfContainer! Use the container returned by SenkPolyTools.openEdf().")
f = e.file_obj
f.seek(0)
e.version = f.read(8)
e.local_patient_id = f.read(80)
e.local_recording_id = f.read(80)
# parse timestamp to standard ISO datetime format
start_date = f.read(8)
start_time = f.read(8)
start_date = start_date.decode(encoding='ASCII').split('.')
start_time = start_time.decode(encoding='ASCII').split('.')
year = int(start_date[2])
e.date_time = datetime.datetime(
(2000+year if year<=84 else 1900+year),
int(start_date[1]),
int(start_date[0]),
int(start_time[0]),
int(start_time[1]),
int(start_time[2])
)
e.num_of_bytes_in_header = int(f.read(8))
edf_type = f.read(44)[:5]
e.edf_type = edf_type.decode(encoding='ASCII')
e.num_of_records = int(f.read(8))
e.record_duration = float(f.read(8))
e.num_of_signals = int(f.read(4))
nsr = range(e.num_of_signals)
#print(nsr)
e.labels = [f.read(16).strip().decode(encoding='ASCII') for _ in nsr]
e.transducer_types = [f.read(80).strip().decode(encoding='ASCII') for _ in nsr]
e.physical_dimension = [f.read(8).strip().decode(encoding='ASCII') for _ in nsr] # physical_dimensions: uV, cm/s, %, mV, ...
e.physical_min = np.asarray([float(f.read(8)) for _ in nsr])
e.physical_max = np.asarray([float(f.read(8)) for _ in nsr])
e.digital_min = np.asarray([float(f.read(8)) for _ in nsr])
e.digital_max = np.asarray([float(f.read(8)) for _ in nsr])
e.gain = (e.physical_max - e.physical_min) / (e.digital_max - e.digital_min) # numpy arrays
e.prefiltering = [f.read(80).strip() for _ in nsr]
e.num_of_samples_per_record = [int(f.read(8)) for _ in nsr]
e.sample_freq = [ns/e.record_duration for ns in e.num_of_samples_per_record] # in Hertz
# reserved bytes for each signal
f.read(32 * e.num_of_signals)
if f.tell() != e.num_of_bytes_in_header:
raise ValueError("The number of bytes in the header does not match the file object cursor. Header length mismatch during reading?");
print("OK")
return e
def loadEdfData(self, e):
""" Load the data from an EDF(+) file based on the already loaded headers
in an EdfContainer object.
param e: EdfContainer object with already loaded headers,
see SenkPolyTools.loadEdfHeaders()
returns EdfContainer with loaded data
"""
print("Loading EDF(+) data... ", end = "")
if e.file_obj is None:
raise AttributeError("EdfContainer.file_obj is missing, use SenkPolyTools.createEdfContainer() to create a file stream.")
err_msg = "EdfContainer.{} is missing, call SenkPolyTools.loadEdfHeaders()."
if e.num_of_records is None:
raise AttributeError(err_msg.format('num_of_records'))
if e.num_of_samples_per_record is None:
raise AttributeError(err_msg.format('num_of_samples_per_record'))
if e.labels is None:
raise AttributeError(err_msg.format('labels'))
if e.digital_min is None:
raise AttributeError(err_msg.format('digital_min'))
if e.physical_min is None:
raise AttributeError(err_msg.format('physical_min'))
if e.gain is None:
raise AttributeError(err_msg.format('gain'))
data = []
for i in range(e.num_of_records):
# 'k' is the index in the data list
# it is equal to j until the annotations channel is reached,
# k == j-1 after the annotation channel
k = 0
for j, curr_num_of_samples in enumerate(e.num_of_samples_per_record):
buff = e.file_obj.read(curr_num_of_samples * 2)
if len(buff) != curr_num_of_samples * 2:
raise InputError("Unexpected end of EDF file!")
if e.labels[j] == self.annot_label:
e.annotations.extend(self.parseEdfAnnotation(buff))
else:
# if there is no entry for the channel data, create one
if len(data) == j:
data.append([])
# 2-byte little endian integer per channel
dig = np.fromstring(buff, '<i2').astype(np.float32)
phys = (dig - e.digital_min[j]) * e.gain[j] + e.physical_min[j]
# @TODO: consider numpy.r_
phys = phys.tolist()
data[k].extend(phys) # note the 'k' index
k = k+1
e.data_labels = [l for l in e.labels if l != self.annot_label]
# @TODO: more elegant numpy conversion
for chn_idx, chn_data in enumerate(data):
data[chn_idx] = np.array(chn_data)
e.data = np.array(data) # e.data is a numpy 2D matrix
del data
print("OK")
return e
def parseEdfAnnotation(self, annot):
""" Parse annotations of an EDF+ file.
The EDF+ annotations are unicode encoded bytes. The onset and duration are separated by \x14 (single byte with value 21), these are followed by one or more annotation strings, which are separated by \x15 (single byte with value 20).
See the EDF+ specification for details:
http://www.edfplus.info/specs/edfplus.html#edfplusannotations
See also SenkPolyTools.loadEdfData()
param annot: bytes, the annotations in a record
returns: list of dicts, each dict is like {'onset': <float>, 'duration': <float>, 'annotation': <string>}
Example
-------
>>> ann = SenkPolyTools.parseEdfAnnotation(bytes)
>>> ann
... [{'onset': '+0', 'duration': None, 'annotation': ''}, {'onset': '+94.8038', 'duration': '0.0000', 'annotation': '1'}]
"""
exp = '(?P<onset>[+\-]\d+(?:\.\d*)?)' + \
'(?:\x15(?P<duration>\d+(?:\.\d*)?))?' + \
'(\x14(?P<annotation>[^\x00]*))?' + \
'(?:\x14\x00)'
return [m.groupdict() for m in re.finditer(exp, annot.decode('utf-8'))]
def smoothByAvg(self, x, wl = 3):
""" Smooth a curve with moving average.
A series of overlapping, wl length sections are created from the data (these are the moving windows), and they are convoluted against ones.
Smoothing by moving average is a basic smoothing function for non-periodic functions (as TCD data is typically not periodic, for example; see FFT for periodic data).
param x numpy array containing the data (typically a channel)
param wl int, the length of the moving window
returns a numpy array of the smoothed data
"""
if len(x) < wl:
raise Exception("SenkPolyTools.smoothByAvg: data length < wl")
s = np.r_[ x[wl-1:0:-1], x, x[-1:-wl:-1] ]
w = np.ones(wl,'d')
c = np.convolve(w/w.sum(), s, mode='valid')
# the smoothed array will have the length of (the original array + wl -1),
# but the first wl-1 points are not averaged
# -> cut wl/2 points at the start of the results
#return c[floor(wl/2):]
return c
def findExtrema(self, x, hwl = 150):
""" Find the local extrema (minima and maxima) in a running window of a vector.
The method takes a running window and finds one local minimum and local maximum in it. The window serves as a combined lookback and lookahead frame, the currently examined value (x[i]) is always at the middle between the two frames. The window itself can be different length:
1. It starts at index 0, its length is hwl (==only the lookahead frame),
|[*---]-------|
2. as it moves, more and more values can be included in the lookback frame,
|[--*---]-----|
3. when the current index is equal or greater than the half window length (i>=hwl), but there are more indices remaining than the lookahead frame (len(x)>i+hwl), it's a full window,
|-[---*---]---|
4. the lookahead frame starts to shrink as the end of the data is approached,
|------[---*-]|
5. and finally it stops when the last value is examined
|-------[---*]|
If the currently examined value is the smallest/largest in the window,
it is added to the output, if not, the window moves on.
The result is a tuple of 2 lists, each list containing (index, value) tuples. Every time a local minimum or maximum was found, it is added to the respective list with its index.
The first value is used from plateaus.
Note that scipy.signal.argrelextrema() does *not* always find the smallest local minima, therefore a custom method was created.
Parameters
----------
param x: 1D numpy array of data
param hwl: integer, the half length of the running window
returns: a (minima, maxima) tuple, both minima and maxima are lists of (index, value) tuples
Example
-------
>>> x
... array([1.0, .6, .8, .5, .7, .8, 1.0, 1.0, 1.0, .8, .6, .2, .5, .3, .4, .5, .8, 1.0, .7])
>>> minima, maxima = findExtrema(x, 3)
>>> minima
... [(3, .5), (11, .2)]
>>> maxima
... [(0, 1.0), (6, 1.0), (17, 1.0)]
"""
x_len = x.shape[0]
if x_len < hwl:
msg = "SenkPolyTools.findExtrema: data length ({}) < hwl ({})".format(x_len, hwl)
raise Exception(msg)
minima = []
maxima = []
for i in range(x_len):
ws = i-hwl if i-hwl>=0 else 0 # running window start
we = i+hwl if i+hwl<=x_len else x_len # running window end
w = x[ws:we]
wi = i-ws
wmin_i = np.argmin(w)
if wi == wmin_i:
minima.append( (i, x[i]) )
wmax_i = np.argmax(w)
if wi == wmax_i:
maxima.append( (i, x[i]) )
return (minima, maxima)
def indexDists(self, ind):
""" Get the arithmetic distance among indices in a list.
Example
-------
Find out the RR distances from an ECG channel (let's say that channel 0 is the ECG):
>>> minima, maxima = SenkPolyTools.findExtrema(EdfContainer.data[0])
>>> rr_indices, rr_amplitudes = zip(*maxima)
>>> dists = SenkPolyTools.indexDists(rr_indices)
>>> heart_rate = [60/(dist/EdfContainer.sample_freq[0]) for dist in dists]
"""
prev_idx_val = 0
dist = []
for idx in ind:
dist.append(idx-prev_idx_val)
prev_idx_val = idx
return dist
def downsample(self, x, orig_freq, freq):
""" Downsample signal data.
The downsampling ratio is calculated as the floor(original frequency/new freqency), both frequency values should be in the same dimension (preferably Hertz).
Note that this is a simple downsampling, not decimation, therefore high frequency data may cause aliasing.
param x: numpy array or list, the signal data
param orig_freq: float, the original frequency
param freq: float, the new frequency
returns: a numpy array of the downsampled data
"""
stepping = floor(orig_freq/freq)
return np.array(x[:stepping:])
def interpolate(self, timed_y, sample_x):
""" Interpolate linearly sparsely sampled data to get more frequently sampled output.
The method takes a list of values (channel data) that are sparser than the resampling frequency, and calculates the intermediate y values at each 'sample_x' coordinate. The calculation is a linear interpolation (weighted average).
param timed_y: list of tuples, each tuple is (x_coordinate, y_value)
param sample_x: list of integers, each integer is an x_coordinate
returns: list of tuples, (sample_x_i, interpolated_y)
Example
-------
Find the systolic blood pressure values (the maxima of the arterial blood pressure) in data sampled at 500 Hz. The time positions of the maxima will be less frequent and irregular, therefore interpolate them to constant 0.5 Hz.
>>> _, abp_maxes = spt.findExtrema(channel_data)
>>> # 500 Hz * 0.5 = 250
>>> resampling_x = [ix for ix in range(0, len(channel_data), 250)]
>>> abp_interp_maxes = spt.interpolate(abp_maxes, interp_x)
>>> abp_interp_maxes
... [(0, 0.0), (250, 121.0), (500, 125.0), (750, 117.0)]
"""
interpolated_y = []
tyi = iter(timed_y)
prev_tx, prev_ty = (0, 0.0)
next_tx, next_ty = next(tyi)
for sx in sample_x:
if sx >= next_tx:
try:
prev_tx = next_tx
prev_ty = next_ty
next_tx, next_ty = next(tyi)
except StopIteration:
break;
iy = ((next_ty - prev_ty) * (sx - prev_tx)) / (next_tx - prev_tx) + prev_ty
interpolated_y.append( (sx, iy) )
return interpolated_y
def getSamplingFreq(self):
if self.sampling_freq is None:
raise ValueError("Sampling frequency has not been set yet! [ getSamplingFreq() ]")
return self.sampling_freq
def filterSamplingFreq(self, f):
""" Get or set the global sampling frequency parameter. """
if not f:
f = 0.5
if not isinstance(f, float):
f = float(f)
if f <= 0.0:
raise ValueError("Sampling frequency must be larger than zero! [setSamplingFreq({})]".format(f))
return f
def calcSamplingRateFromFreq(self, chn_freq, new_freq):
""" Calculate the resampling rate from frequency.
The channel data is recorded at a given 'chn_freq' frequency, but the user may want to specify a 'new_freq' sampling frequency. This method returns the index stepping for the new sampling.
For example, if
chn_freq = 500 (data was recorded at 500 Hz)
new_freq = 0.5 (user wants to sample the data at 0.5 Hz)
then
resampling rate = 1000
because 0.5 Hz means 1 measurement per 2 seconds, and since the original data contains 500 records (measurements) per second, we have to take every 1000th record to achieve 0.5 Hz.
See also
--------
SenkPolyTools.setSamplingFreq()
"""
if new_freq > chn_freq:
raise ValueError("Requested sampling frequency can't be higher than the original channel record frequency! [calcSamplingRateFromFreq({}, {})]".format(chn_freq, new_freq))
if chn_freq == 0:
raise ValueError("Channel record frequency and sampling frequency can't be zero! [calcSamplingRateFromFreq({}, {})]".format(chn_freq, new_freq))
return int(round(chn_freq/new_freq))
def exportAnnotations(self, edfc):
print("Annotations... ", end='')
annot_fname = os.path.join(self.results_base_path, "{}_annotations.txt".format(edfc.file_basename))
with open(annot_fname, 'w', newline='') as annfp:
csvw = csv.writer(annfp, dialect='excel', delimiter=';')
csvw.writerow(['Onset', 'Duration', 'Annotation(s)'])
for ann in edfc.annotations:
if ann['annotation']:
csvw.writerow([
ann['onset'],
ann['duration'],
ann['annotation']
])
print("OK")
def processSenkEdf(self, edfc):
""" Process an EDF file specific to the SE Neurology Clinic.
This is a high level method which takes an EdfContainer object and processes and exports the ECG, TCD, ABP and CO2 channels.
The output is a bunch of text files in the /results directory.
"""
for chn_i, chn_data in enumerate(edfc.data):
chn_n = edfc.data_labels[chn_i]
if 'EKG' in chn_n or 'ECG' in chn_n:
spt.exportSenkEcg(edfc, chn_i)
elif 'TCD' in chn_n:
spt.exportSenkTcd(edfc, chn_i)
elif 'Tonometry' in chn_n: # ABP
spt.exportSenkAbp(edfc, chn_i)
elif 'CO2' in chn_n:
spt.exportSenkCo2(edfc, chn_i)
else:
print("Channel '{}'... skipped".format(chn_n))
def exportSenkEcg(self, edfc, chn_num):
chn_n = edfc.data_labels[chn_num]
data = edfc.data[chn_num]
ecg_freq = edfc.sample_freq[chn_num]
print("Channel '{}'... ".format(chn_n), end="")
sm_data = spt.smoothByAvg(data, 8)
# export data after minimal noise reduction
if self.export_nr_raw:
fname = os.path.join(self.results_base_path, "{}_{}_nr_raw.txt".format(edfc.file_basename, chn_n.replace(' ', '')))
with open(fname, "w", newline='') as fp:
csvw = csv.writer(fp, dialect='excel', delimiter=';')
csvw.writerow(['Time (sec)', 'Data Value'])
for sm_d_i, dv in enumerate(sm_data):
csvw.writerow([sm_d_i/ecg_freq, dv])
# find minima, maxima
_, ecg_maxima = spt.findExtrema(sm_data)
ecg_max_ind, __ = zip(*ecg_maxima)
ecg_dists = spt.indexDists(ecg_max_ind)
hr = [60/(dist/ecg_freq) for dist in ecg_dists]
times = [curr_ecg_idx/ecg_freq for curr_ecg_idx in ecg_max_ind]
#times.insert(0, 0.0)
fname = os.path.join(self.results_base_path, "{}_{}_heart_rate.txt".format(edfc.file_basename, chn_n.replace(' ', '')))
with open(fname, "w", newline='') as fp:
csvw = csv.writer(fp, dialect='excel', delimiter=';')
csvw.writerow(["Time (sec)", "Heart Rate", "R-R Distance"])
for ecg_t, ecg_hr, rr_dist in zip(times, hr, ecg_dists):
csvw.writerow([ecg_t, ecg_hr, rr_dist])
print("OK")
def exportSenkTcd(self, edfc, chn_num):
chn_n = edfc.data_labels[chn_num]
data = edfc.data[chn_num]
tcd_freq = edfc.sample_freq[chn_num]
print("Channel '{}'... ".format(chn_n), end="")
sm_data = spt.smoothByAvg(data, 30)
# export data after minimal noise reduction
if self.export_nr_raw:
fname = os.path.join(self.results_base_path, "{}_{}_nr_raw.txt".format(edfc.file_basename, chn_n.replace(' ', '')))
with open(fname, "w", newline='') as fp:
csvw = csv.writer(fp, dialect='excel', delimiter=';')
csvw.writerow(['Time (sec)', 'Data Value'])
for sm_d_i, dv in enumerate(sm_data):
csvw.writerow([sm_d_i/tcd_freq, dv])
# find minima, maxima
tcd_mins, tcd_maxes = spt.findExtrema(sm_data)
# 0.5 Hz sampling (after interpolation)
sm_data_len = len(sm_data)
tcd_sampling = self.calcSamplingRateFromFreq(tcd_freq, self.getSamplingFreq())
interp_x = [ix for ix in range(0, sm_data_len, tcd_sampling)]
tcd_interp_mins = spt.interpolate(tcd_mins, interp_x)
tcd_interp_maxes = spt.interpolate(tcd_maxes, interp_x)
# 5 seconds moving average on interpolated minima
_, tcd_imins_vals = zip(*tcd_interp_mins)
tcd_imins_vals = np.array(tcd_imins_vals)
tcd_imins_avg = spt.smoothByAvg(tcd_imins_vals, 10)
# 5 seconds moving average on interpolated maxima
_, tcd_imaxes_vals = zip(*tcd_interp_maxes)
tcd_imaxes_vals = np.array(tcd_imaxes_vals)
tcd_immaxes_avg = spt.smoothByAvg(tcd_imaxes_vals, 10)
fname = os.path.join(self.results_base_path, "{}_{}_0.5hz.txt".format(edfc.file_basename, chn_n.replace(' ', '')))
with open(fname, "w", newline='') as fp:
csvw = csv.writer(fp, dialect='excel', delimiter=';')
csvw.writerow([
"Time (sec)",
"TCD Maxima",
"TCD Minima",
"(Max + 2*Min)/3",
"TCD Maxima 5sec Moving Avg",
"TCD Minima 5sec Moving Avg",
"Moving Avg (Max + 2*Min)/3",
"Resistance Index of Moving Avg"
])
for tcd_mn, tcd_mx, tcd_imn_a, tcd_imx_a in zip(tcd_interp_mins, tcd_interp_maxes, tcd_imins_avg, tcd_immaxes_avg):
csvw.writerow([
tcd_mn[0]/tcd_freq,
tcd_mx[1],
tcd_mn[1],
(tcd_mx[1] + 2*tcd_mn[1])/3,
tcd_imx_a,
tcd_imn_a,
(tcd_imx_a + 2*tcd_imn_a)/3,
(tcd_imx_a - tcd_imn_a)/tcd_imx_a
])
print("OK")
def exportSenkAbp(self, edfc, chn_num):
chn_n = edfc.data_labels[chn_num]
data = edfc.data[chn_num]
abp_freq = edfc.sample_freq[chn_num]
print("Channel '{}'... ".format(chn_n), end="")
sm_data = spt.smoothByAvg(data, 8)
# export data after minimal noise reduction
if self.export_nr_raw:
fname = os.path.join(self.results_base_path, "{}_{}_nr_raw.txt".format(edfc.file_basename, chn_n.replace(' ', '')))
with open(fname, "w", newline='') as fp:
csvw = csv.writer(fp, dialect='excel', delimiter=';')
csvw.writerow(['Time (sec)', 'Data Value'])
for sm_d_i, dv in enumerate(sm_data):
csvw.writerow([sm_d_i/abp_freq, dv])
# find minima, maxima
abp_mins, abp_maxes = spt.findExtrema(sm_data)
# sampling settings
sm_data_len = len(sm_data)
abp_sampling = self.calcSamplingRateFromFreq(abp_freq, self.getSamplingFreq())
interp_x = [ix for ix in range(0, sm_data_len, abp_sampling)]
# heart rate from ABP for every half second, because ECG channel is often missing...
abp_max_ind, __ = zip(*abp_maxes)
abp_max_dists = spt.indexDists(abp_max_ind)
hr = [60/(dist/abp_freq) for dist in abp_max_dists]
abp_hr_interp = spt.interpolate(zip(abp_max_ind, hr), interp_x)
# Half Hertz sampling of interpolated minima and maxima
abp_interp_mins = spt.interpolate(abp_mins, interp_x)
abp_interp_maxes = spt.interpolate(abp_maxes, interp_x)
# 5 seconds moving average on interpolated minima
_, abp_imins_vals = zip(*abp_interp_mins)
abp_imins_vals = np.array(abp_imins_vals)
abp_imins_avg = spt.smoothByAvg(abp_imins_vals, 10)
# 5 seconds moving average on interpolated maxima
_, abp_imaxes_vals = zip(*abp_interp_maxes)
abp_imaxes_vals = np.array(abp_imaxes_vals)
abp_imaxes_avg = spt.smoothByAvg(abp_imaxes_vals, 10)
fname = os.path.join(self.results_base_path, "{}_{}_0.5hz.txt".format(edfc.file_basename, chn_n.replace(' ', '')))
with open(fname, "w", newline='') as fp:
csvw = csv.writer(fp, dialect='excel', delimiter=';')
csvw.writerow([
"Time (sec)",
"ABP Systole",
"ABP Diastole",
"(Syst + 2*Diast)/3",
"ABP Systole 5sec Moving Avg",
"ABP Diastole 5sec Moving Avg",
"Moving Avg (Syst + 2*Diast)/3",
"Heart Rate (Derived)"
])
for abp_mn, abp_mx, abp_imn_a, abp_imx_a, abp_hr in zip(abp_interp_mins, abp_interp_maxes, abp_imins_avg, abp_imaxes_avg, abp_hr_interp):
csvw.writerow([
abp_mn[0]/abp_freq,
abp_mx[1],
abp_mn[1],
(abp_mx[1] + 2*abp_mn[1])/3,
abp_imx_a,
abp_imn_a,
(abp_imx_a + 2*abp_imn_a)/3,
abp_hr[1]
])
print("OK")
def exportSenkCo2(self, edfc, chn_num):
chn_n = edfc.data_labels[chn_num]
data = edfc.data[chn_num]
co2_freq = edfc.sample_freq[chn_num]
print("Channel '{}'... ".format(chn_n), end="")
sm_data = spt.smoothByAvg(data, 8)
# export data after minimal noise reduction
if self.export_nr_raw:
fname = os.path.join(self.results_base_path, "{}_{}_nr_raw.txt".format(edfc.file_basename, chn_n.replace(' ', '')))
with open(fname, "w", newline='') as fp:
csvw = csv.writer(fp, dialect='excel', delimiter=';')
csvw.writerow(['Time (sec)', 'Data Value'])
for sm_d_i, dv in enumerate(sm_data):
csvw.writerow([sm_d_i/co2_freq, dv])
# 0.5 Hz sampling parameters
sm_data_len = len(sm_data)
co2_sampling = self.calcSamplingRateFromFreq(co2_freq, self.getSamplingFreq())
interp_x = [ix for ix in range(0, sm_data_len, co2_sampling)]
# respiratory peaks
_, co2_maxima = spt.findExtrema(sm_data, hwl=2000)
# respiratory rate
co2_max_ind, __ = zip(*co2_maxima)
co2_dists = spt.indexDists(co2_max_ind)
rr = [60/(dist/co2_freq) for dist in co2_dists]
co2_rr_interp = spt.interpolate(zip(co2_max_ind, rr), interp_x)
# .5 Hz sampling of respiratory peaks
co2_interp_maxes = spt.interpolate(co2_maxima, interp_x)
# 5 seconds moving average on interpolated maxima
_, co2_imaxes_vals = zip(*co2_interp_maxes)
co2_imaxes_vals = np.array(co2_imaxes_vals)
co2_immaxes_avg = spt.smoothByAvg(co2_imaxes_vals, 10)
fname = os.path.join(self.results_base_path, "{}_{}_0.5hz.txt".format(edfc.file_basename, chn_n.replace(' ', '')))
with open(fname, "w", newline='') as fp:
csvw = csv.writer(fp, dialect='excel', delimiter=';')
csvw.writerow([
"Time (sec)",
"CO2 Maxima",
"CO2 Maxima 5sec Moving Avg",
"Respiratory Rate"
])
for co2_mx, co2_imx_a, curr_rr in zip(co2_interp_maxes, co2_immaxes_avg, co2_rr_interp):
csvw.writerow([
(co2_mx[0]/co2_freq) - 7, # respiratory channel lag: 7 seconds
co2_mx[1],
co2_imx_a,
curr_rr[1]
])
print("OK")
if __name__ == '__main__':
argp = argparse.ArgumentParser(description="Build the global ComPPI network and export various subnetworks of it.")
argp.add_argument(
'-edf',
required = True,
help="The name of the input EDF file (only the file name, and it must be in the ./data/ folder)")
argp.add_argument(
'-s', '--sampling_frequency',
help="Optional parameter to specify the sampling frequency (in Hz) at which the processed/interpolated data will be exported.\nThe value should be a number larger than zero (float or integer).\nDefault sampling frequency = 0.5")
argp.add_argument(
'-r', '--raw',
action='store_true',
help="Optional flag to export the raw data with minimal noise reduction (processed data will also be exported).")
args = argp.parse_args()
spt = SenkPolyTools(args.sampling_frequency, args.raw)
edfc = spt.loadEdf(args.edf)
spt.exportAnnotations(edfc)
spt.processSenkEdf(edfc) | 1.984375 | 2 |
resample/__init__.py | labscript-suite-temp-2-archive/dihm-runviewer--forked-from--labscript_suite-runviewer | 0 | 12760148 | import os
import platform
import shutil
import importlib
# This sub-package is a proxy for the extensension file that abstracts the platform.
# Here we check what platform we are on and we import the appropriate extension from
# yet another sub-package, of which there is one for each supported platform.
# Then we pull out the resample function from that extension into this packages namespace
# so that importers see it here without having to know what platform we are on.
# Importers can simply do: 'from runviewer.resample import resample' to get the resample function.
if __name__ == '__main__':
raise RuntimeError('Due to funny import rules, this file can\'t be run as __main__.' +
'please do \'import runmanager.resample\' from elsewhere to run it.')
arch = platform.architecture()
if arch == ('32bit', 'WindowsPE'):
plat_name = 'win32'
file_name = 'resample.pyd'
elif arch == ('64bit', 'WindowsPE'):
plat_name = 'win64'
file_name = 'resample.pyd'
elif arch == ('32bit', 'ELF'):
plat_name = 'unix32'
file_name = 'resample.so'
elif arch == ('64bit', 'ELF'):
plat_name = 'unix64'
file_name = 'resample.so'
else:
raise RuntimeError('Unsupported platform, please report a bug')
module = importlib.import_module('runviewer.resample.%s.resample'%plat_name)
resample = module.resample | 2.78125 | 3 |
app.py | Anivarth/weather | 0 | 12760149 | from bottle import run, route, debug, template, static_file, get, request
import requests, time, sys, pyinfodb
#alternate open_weather_app_id = ######
# Static Routes
"""@get('/<filename:re:.*\.js>')
def javascripts(filename):
return static_file(filename, root='static/js')"""
@get('/<filename:re:.*\.css>')
def stylesheets(filename):
return static_file(filename, root='static/css')
@get('/images/<filename:re:.*\.(jpg|png|gif|ico)>')
def images(filename):
return static_file(filename, root='static/images')
"""
@get('/<filename:re:.*\.(eot|ttf|woff|svg)>')
def fonts(filename):
return static_file(filename, root='static/fonts')
"""
@route('/myip')
@route('/myip')
def myip():
client_ip = request.headers['X-Real-IP']
return ['Your IP is: {}\n'.format(client_ip)]
@route('/credits/')
@route('/credits')
def credits():
return credits
@route('/')
@route('')
def weather():
#client_ip = request.headers['X-Real-IP'] #Not work on local environment
ip_lookup = pyinfodb.IPInfo('#######')
ip = ip_lookup.get_city()#client_ip) #add client_ip in the parenthesis in real
ip_address = ip['ipAddress']
city = ip['cityName']
country_code = ip['countryCode']
zipcode = ip['zipCode']
country = ip['countryName']
region = ip['regionName']
latitude = ip['latitude']
longitude = ip['longitude']
time_zone = ip['timeZone']
payload = {'lat':latitude,'lon':longitude, 'units':'metric','appid':'#####'}
weather_data = requests.get('http://api.openweathermap.org/data/2.5/weather',params=payload)
weather = weather_data.json()
humidity = weather['main']['humidity']
pressure = int(float(weather['main']['pressure'])*0.750062)
avg_temp = int(round(weather['main']['temp']))
max_temp = int(round(weather['main']['temp_max']))
min_temp = int(weather['main']['temp_min'])
description = weather['weather'][0]['description']
cloud_cover = weather['clouds']['all']
icon = weather['weather'][0]['icon']
wind_direction = float(weather['wind']['deg'])
if wind_direction>348.74 and wind_direction<11.26:
wind_direction = "N"
elif wind_direction>11.25 and wind_direction<33.76:
wind_direction = 'NNE'
elif wind_direction>33.75 and wind_direction<56.26:
wind_direction = 'NE'
elif wind_direction>56.25 and wind_direction<78.76:
wind_direction = 'ENE'
elif wind_direction>78.75 and wind_direction<101.26:
wind_direction = 'E'
elif wind_direction>101.25 and wind_direction<123.76:
wind_direction = 'ESE'
elif wind_direction>123.75 and wind_direction<146.26:
wind_direction = 'SE'
elif wind_direction>146.25 and wind_direction<168.76:
wind_direction = 'SSE'
elif wind_direction>168.75 and wind_direction<191.26:
wind_direction = 'S'
elif wind_direction>191.25 and wind_direction<213.76:
wind_direction = 'SSW'
elif wind_direction>213.75 and wind_direction<236.26:
wind_direction = 'SW'
elif wind_direction>236.25 and wind_direction<258.76:
wind_direction = 'WSW'
elif wind_direction>258.75 and wind_direction<281.26:
wind_direction = 'W'
elif wind_direction>281.25 and wind_direction<303.76:
wind_direction = 'WNW'
elif wind_direction>303.75 and wind_direction<326.26:
wind_direction = 'NW'
elif wind_direction>326.25 and wind_direction<348.75:
wind_direction = 'NNW'
wind_speed = weather['wind']['speed']
local_time = time.time()
return template('index.tpl', city=city, region = region, country_code = country_code,\
avg_temp = avg_temp, description = description, min_temp = min_temp, max_temp=max_temp,\
wind_speed = wind_speed, wind_direction = wind_direction, cloud_cover = cloud_cover,\
time_zone = time_zone, icon=icon, humidity = humidity, pressure=pressure,\
country = country, zipcode = zipcode, local_time = local_time )
@route('/faq')
@route('/faq/')
def faq():
return template('faq.tpl')
debug(True)
run(reloader=True, host='localhost', port=8080)
| 2.453125 | 2 |
Docker-API-lotr/services/api/helpers/helpers.py | ehlui/flask-mongo-lotr | 0 | 12760150 | <gh_stars>0
from flask import current_app as app
import logging
import pymongo
import os
db_data = {
'host': os.getenv("MONGODB_SERVICE_NAME", "No exist"),
'port': int(os.getenv("MONGODB_PORT", "0000")),
'db_name': os.getenv("MONGODB_DATABASE", "No exist"),
'db_type': os.getenv("DATABASE_TYPE", "No exist")
}
def verbose_formatter():
return logging.Formatter(
'[%(asctime)s.%(msecs)d] ; %(levelname)s ; '
'[%(name)s.%(funcName)s:%(lineno)d] ; %(message)s',
datefmt='%d-%m-%Y_%H:%M:%S'
)
def configure_logging(app, path='logs/app.log'):
del app.logger.handlers[:]
loggers = [app.logger, ]
handlers = []
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(verbose_formatter())
handlers.append(console_handler)
file_handler = logging.FileHandler(path)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(verbose_formatter())
handlers.append(file_handler)
for l in loggers:
for handler in handlers:
l.addHandler(handler)
l.propagate = False
l.setLevel(logging.DEBUG)
def handle_pag_args(limit, offset):
args_parsed = {'limit': 1, 'offset': 1}
try:
limit_parsed = int(limit)
offset_parsed = int(offset)
if offset_parsed < 0 or limit_parsed < 0:
raise ArithmeticError('Offset and limit cannot be smaller than 0')
args_parsed['limit'] = limit_parsed
args_parsed['offset'] = offset_parsed
except (ValueError, ArithmeticError) as e:
app.logger.error(f' ; msg={e}')
return args_parsed
def handle_pagination(limit_arg, offset_arg, endpoint):
pagination = {'prev_url': '', 'next_url': ''}
arg_dict = handle_pag_args(limit_arg, offset_arg)
offset_limit_diff = arg_dict["offset"] - arg_dict["limit"]
prev_args = f'limit={arg_dict["limit"]}&offset={offset_limit_diff}'
next_args = f'limit={arg_dict["limit"]}&offset={arg_dict["offset"] + arg_dict["limit"]}'
pagination['prev_url'] = f'{endpoint}?{prev_args}'
pagination['next_url'] = f'{endpoint}?{next_args}'
if offset_limit_diff < 0:
pagination['prev_url'] = ''
return pagination
def build_pagination(limit_arg, offset_arg, endpoint, element, db_table):
arg_dict = handle_pag_args(limit_arg, offset_arg)
pag_urls = handle_pagination(
arg_dict['limit'], arg_dict['offset'], endpoint
)
limit = arg_dict['limit']
offset = arg_dict['offset']
max_elements = db_table.find().count()
if offset > max_elements:
offset = max_elements - 1
start_id = db_table.find().sort(element, pymongo.ASCENDING)
last_id = start_id[offset][element]
pagination_search = {element: {'$gte': last_id}}
return {
'limit': limit,
'offset': offset,
'prev_url': pag_urls["prev_url"],
'next_url': pag_urls["next_url"],
'pagination_search': pagination_search
}
| 2.40625 | 2 |