content stringlengths 5 1.05M |
|---|
# -*- coding: utf-8 -*-
from .primaries import MSDS_DISPLAY_PRIMARIES_CRT
__all__ = [
'MSDS_DISPLAY_PRIMARIES_CRT',
]
|
from googleads import ad_manager
from dfp.client import get_client
def create_line_items(line_items):
"""
Creates line items in DFP.
Args:
line_items (arr): an array of objects, each a line item configuration
Returns:
an array: an array of created line item IDs
"""
dfp_client = get_client()
line_item_service = dfp_client.GetService('LineItemService', version='v201908')
line_items = line_item_service.createLineItems(line_items)
# Return IDs of created line items.
created_line_item_ids = []
for line_item in line_items:
created_line_item_ids.append(line_item['id'])
return created_line_item_ids
def create_line_item_config(name, order_id, placement_ids, ad_unit_ids, cpm_micro_amount, sizes, hb_bidder_key_id,
hb_pb_key_id, hb_bidder_value_id, hb_pb_value_id, currency_code='USD'):
"""
Creates a line item config object.
Args:
name (str): the name of the line item
order_id (int): the ID of the order in DFP
placement_ids (arr): an array of DFP placement IDs to target
ad_unit_ids (arr): an array of DFP ad unit IDs to target
cpm_micro_amount (int): the currency value (in micro amounts) of the
line item
sizes (arr): an array of objects, each containing 'width' and 'height'
keys, to set the creative sizes this line item will serve
hb_bidder_key_id (int): the DFP ID of the `hb_bidder` targeting key
hb_pb_key_id (int): the DFP ID of the `hb_pb` targeting key
currency_code (str): the currency code (e.g. 'USD' or 'EUR')
Returns:
an object: the line item config
"""
# Set up sizes.
creative_placeholders = []
for size in sizes:
creative_placeholders.append({
'size': size
})
# Create key/value targeting for Prebid.
# https://github.com/googleads/googleads-python-lib/blob/master/examples/dfp/v201802/line_item_service/target_custom_criteria.py
# create custom criterias
hb_bidder_criteria = {
'xsi_type': 'CustomCriteria',
'keyId': hb_bidder_key_id,
'valueIds': [hb_bidder_value_id],
'operator': 'IS'
}
hb_pb_criteria = {
'xsi_type': 'CustomCriteria',
'keyId': hb_pb_key_id,
'valueIds': [hb_pb_value_id],
'operator': 'IS'
}
# The custom criteria will resemble:
# (hb_bidder_criteria.key == hb_bidder_criteria.value AND
# hb_pb_criteria.key == hb_pb_criteria.value)
top_set = {
'xsi_type': 'CustomCriteriaSet',
'logicalOperator': 'AND',
'children': [hb_bidder_criteria, hb_pb_criteria]
}
# https://developers.google.com/doubleclick-publishers/docs/reference/v201802/LineItemService.LineItem
line_item_config = {
'name': name,
'orderId': order_id,
# https://developers.google.com/doubleclick-publishers/docs/reference/v201802/LineItemService.Targeting
'targeting': {
'inventoryTargeting': {},
'customTargeting': top_set,
},
'startDateTimeType': 'IMMEDIATELY',
'unlimitedEndDateTime': True,
'lineItemType': 'PRICE_PRIORITY',
'costType': 'CPM',
'costPerUnit': {
'currencyCode': currency_code,
'microAmount': cpm_micro_amount
},
'creativeRotationType': 'EVEN',
'primaryGoal': {
'goalType': 'NONE'
},
'creativePlaceholders': creative_placeholders,
}
if placement_ids is not None:
line_item_config['targeting']['inventoryTargeting']['targetedPlacementIds'] = placement_ids
if ad_unit_ids is not None:
line_item_config['targeting']['inventoryTargeting']['targetedAdUnits'] = [{'adUnitId': id} for id in ad_unit_ids]
return line_item_config
|
from server import app, DBSession
from flask import Blueprint, request, session, send_file, make_response, jsonify
from utils import captcha, cmparePswd, invalid, invalidate
from flask_jwt_extended import jwt_required, jwt_optional, create_access_token, get_jwt_identity, get_raw_jwt
import io
from model import Storehouse, User, Product, Order
import datetime
bp = Blueprint('product',__name__)
# 仓库管理员可用看到自己所管理仓库的所有商品,其它角色不可用
@bp.route("/all", methods=['GET'])
@jwt_required
def allProduct():
current_user = get_jwt_identity()
sess = DBSession()
user = sess.query(User).filter_by(id=current_user).first()
if user.isOperator:
storehouse = sess.query(Storehouse).filter_by(operator_id=current_user).first()
if storehouse:
products = sess.query(Product).filter_by(storehouse_id=storehouse.id).all()
all_products = [product.brief() for product in products]
return jsonify(products=all_products), 200
return jsonify({"msg": "No Permission"}), 401
# 仓库管理员可用看到归档商品的信息,其它角色只能看到未归档商品的信息
@bp.route("/detail", methods=['POST'])
@jwt_optional
def productDetail():
sess = DBSession()
current_user = get_jwt_identity()
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
product_id = request.json.get('id')
if not product_id:
return jsonify({"msg": "Missing id parameter"}), 400
product = sess.query(Product).filter_by(id=product_id).first()
if not product:
return jsonify({"msg": "Bad productId"}), 401
if product.archived:
if current_user:
user = sess.query(User).filter_by(id=current_user).first()
if user and user.isManager:
return jsonify(product.detailed()), 200
return jsonify({"msg": "No Permission"}), 401
else:
return jsonify(product.detailed()), 200
# 经理端创建新产品
@bp.route("/create", methods=['POST'])
@jwt_required
def createProduct():
sess = DBSession()
current_user = get_jwt_identity()
user = sess.query(User).filter_by(id=current_user).first()
#manager = sess.query(User).filter_by(id=current_user,isManager=True).first()
if not user.isManager:
return jsonify({"msg": "No Permission"}), 401
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
title = request.json.get('title')
if not title:
return jsonify({"msg": "Missing title parameter"}), 400
category = request.json.get('category')
if not category:
return jsonify({"msg": "Missing category parameter"}), 400
storehouse_id = request.json.get('storehouse_id')
if not storehouse_id:
return jsonify({"msg": "Missing storehouse_id parameter"}), 400
dictdata = request.json.get('dictdata')
if not dictdata:
return jsonify({"msg": "Missing dictdata parameter"}), 400
product = Product(title,category,storehouse_id)
product.update(dictdata)
sess.add(product)
sess.commit()
return jsonify(result=True, productId=product.id)
# 经理端更改商品信息
@bp.route("/update", methods=['POST'])
@jwt_required
def updateProduct():
sess = DBSession()
current_user = get_jwt_identity()
user = sess.query(User).filter_by(id=current_user).first()
if not user.isManager:
return jsonify({"msg": "No Permission"}), 401
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
product_id = request.json.get('product_id')
if not product_id:
return jsonify({"msg": "Missing product_id parameter"}), 400
#name = request.json.get('name')
#if not name:
# return jsonify({"msg": "Missing name parameter"}), 400
#category = request.json.get('category')
#if not category:
# return jsonify({"msg": "Missing category parameter"}), 400
#status = request.json.get('status')
#if not status:
# return jsonify({"msg": "Missing status parameter"}), 400
#all_description = request.json.get('description')
#if not all_description:
# return jsonify({"msg": "Missing description parameter"}), 400
#product = sess.query(Product).filter_by(id=product_id,removed=False).first()
#if not product:
# return jsonify({"msg": "Bad productId"}), 401
#description = sess.query(Description).filter_by(product_id=product_id,removed=False).first()
#if not description:
# return jsonify({"msg": "Bad description"}), 401
#product.name=name
# status = request.json.get('status')
# if not status:
# return jsonify({"msg": "Missing status parameter"}), 400
# product = sess.query(Product).filter_by(id=product_id).first()
dictdata = request.json.get('dictdata')
if not dictdata:
return jsonify({"msg": "Missing dictdata parameter"}), 400
product = sess.query(Product).filter_by(id=product_id).first()
product.update(dictdata)
sess.commit()
#if product.removed:
# description.removed=True
#description.modify(all_description)
return jsonify(result=True), 200
'''
# 经理端查看销售统计,不知道放哪儿先放这儿了
# Tested by Pytest
@bp.route("/statistics", methods=['POST'])
@jwt_required
def statistics():
sess = DBSession()
current_user = get_jwt_identity()
manager = sess.query(User).filter_by(id=current_user,isManager=True).first()
if not manager:
return jsonify({"msg": "Bad manager_id"}), 401
if not request.is_json:
return jsonify({"msg": "Missing JSON in request"}), 400
storehouse_id = request.json.get('storehouse_id')
if not storehouse_id:
return jsonify({"msg": "Missing storehouse_id parameter"}), 400
storehouse = sess.query(Storehouse).filter_by(id=storehouse_id).first()
if not storehouse:
return jsonify({"msg": "Bad storehouseId"}), 401
nowTime = datetime.datetime.now()
virtual_orders = sess.query(Order).filter_by(storehouse_id=storehouse_id,virtual=True,cancelled=False).all()
if not virtual_orders:
return jsonify({"msg": "No order record"}), 401
product_count={}
for virorder in virtual_orders:
orders = sess.query(Order).filter_by(storehouse_id=storehouse_id,belonging_id=virorder.id,virtual=False).all()
for order in orders:
if(order.createTime.__rsub__(nowTime).days<=10):
if(order.product_id in product_count):
product_count[order.product_id] = product_count[order.product_id] + order.count
else:
product_count[order.product_id] = order.count
# 按字典集合中,每一个元组的第二个元素排列。
productId_count=sorted(product_count.items(),key=lambda x:x[1],reverse=True)
title_count=[]
for _id_count in productId_count:
product = sess.query(Product).filter_by(id=_id_count[0]).first()
name_count.append([product.name,_id_count[1]])
return jsonify(name_count=name_count), 200
'''
|
import unittest
from probrnn import graphs
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
params = {
"LEARNING_RATE": 0.0001,
"N_HIDDEN": 64,
"N_BINS": 2,
"BATCH_SIZE": 30,
"WINDOW_LENGTH": 23,
}
class TestNADE(unittest.TestCase):
def test_initialize(self):
graph = graphs.NADE(params)
graph.initialize()
def test_get_stats(self):
graph = graphs.NADE(params)
graph.initialize()
d = graph.get_stats()
d = graph.get_stats()
self.assertTrue(len(graph.summary.keys()) > 5)
class TestTimeSeriesPrediction(unittest.TestCase):
def test_train_step(self):
graph = graphs.TimeSeriesPrediction(params)
graph.initialize()
x = np.random.randn(params["WINDOW_LENGTH"], params["BATCH_SIZE"], params["N_BINS"])
graph.train_step((x, None))
def test_test_error(self):
graph = graphs.TimeSeriesPrediction(params)
graph.initialize()
x = np.random.randn(params["WINDOW_LENGTH"], params["BATCH_SIZE"], params["N_BINS"])
graph.test_error((x, None))
if __name__ == "__main__":
unittest.main()
|
S = input()
T = input()
a = []
for i in range(len(S)):
if S[i] != T[i]:
a.append(i)
if len(a) == 0 or (len(a) == 2 and a[0] + 1 == a[1] and S[a[0]] == T[a[1]] and S[a[1]] == T[a[0]]):
print('Yes')
else:
print('No')
|
# -*- coding: utf-8 -*-
import attr
import typing
from attrs_mate import AttrsClass
# --- SQS
@attr.s
class SQSRecord(AttrsClass):
messageId: str = attr.ib()
receiptHandle: str = attr.ib()
body: str = attr.ib()
attributes: dict = attr.ib()
messageAttributes: dict = attr.ib()
md5OfBody: str = attr.ib()
eventSource: str = attr.ib()
eventSourceARN: str = attr.ib()
awsRegion: str = attr.ib()
@attr.s
class SQSEvent(AttrsClass):
Records: typing.List[SQSRecord] = SQSRecord.ib_list_of_nested()
# --- S3Put
@attr.s
class Bucket(AttrsClass):
name: str = attr.ib()
ownerIdentity: dict = attr.ib()
arn: str = attr.ib()
@attr.s
class Object(AttrsClass):
key: str = attr.ib()
size: int = attr.ib()
eTag: str = attr.ib()
sequencer: str = attr.ib()
@attr.s
class S3(AttrsClass):
s3SchemaVersion: str = attr.ib()
configurationId: str = attr.ib()
bucket: Bucket = Bucket.ib_nested()
object: Object = Object.ib_nested()
@attr.s
class S3PutEvent(AttrsClass):
eventVersion: str = attr.ib()
eventSource: str = attr.ib()
awsRegion: str = attr.ib()
eventTime: str = attr.ib()
eventName: str = attr.ib()
userIdentity: dict = attr.ib()
requestParameters: dict = attr.ib()
responseElements: dict = attr.ib()
s3: S3 = S3.ib_nested()
|
import socket
# socket 만들기
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(("192.168.99.1", 9999))
#ip 주소, port 번호(사용중인 번호만 아니면 모든 int 가능)
# message 작성
test_msg = "abcd" # 판단 1
sock.send(test_msg.encode())
# message 받기
data_size = 1024
data = sock.recv(data_size)
# print(data.decode())
# 연결 종료
sock.close() |
from flask import request
from flask_restful import Resource
from werkzeug.exceptions import NotFound
from bookshelf.books.model import Book
from bookshelf.books.repository import BookRepository
class BookResource(Resource):
def __init__(self, book_repository: BookRepository):
self._book_repository = book_repository
def get(self, book_id: str):
book = self._book_repository.get_book(book_id)
if book is None:
raise NotFound
return {
"data": {
"type": "Book",
"id": book.id,
"attributes": {
"title": book.title
}
}
}
def put(self, book_id: str):
book_data = request.json
book = Book(id=book_data['data']['id'], title=book_data['data']['attributes']['title'])
self._book_repository.save_book(book)
return {
"data": {
"type": "Book",
"id": book.id,
"attributes": {
"title": book.title
}
}
}, 201
|
import graphene
from ..core.fields import FilterInputConnectionField
from ..translations.mutations import PageTranslate
from .bulk_mutations import PageBulkDelete, PageBulkPublish, PageTypeBulkDelete
from .filters import PageFilterInput, PageTypeFilterInput
from .mutations.attributes import (
PageAttributeAssign,
PageAttributeUnassign,
PageTypeReorderAttributes,
)
from .mutations.pages import (
PageCreate,
PageDelete,
PageTypeCreate,
PageTypeDelete,
PageTypeUpdate,
PageUpdate,
)
from .resolvers import (
resolve_page,
resolve_page_type,
resolve_page_types,
resolve_pages,
)
from .sorters import PageSortingInput, PageTypeSortingInput
from .types import Page, PageType
class PageQueries(graphene.ObjectType):
page = graphene.Field(
Page,
id=graphene.Argument(graphene.ID, description="ID of the page."),
slug=graphene.String(description="The slug of the page."),
description="Look up a page by ID or slug.",
)
pages = FilterInputConnectionField(
Page,
sort_by=PageSortingInput(description="Sort pages."),
filter=PageFilterInput(description="Filtering options for pages."),
description="List of the shop's pages.",
)
page_type = graphene.Field(
PageType,
id=graphene.Argument(
graphene.ID, description="ID of the page type.", required=True
),
description="Look up a page type by ID.",
)
page_types = FilterInputConnectionField(
PageType,
sort_by=PageTypeSortingInput(description="Sort page types."),
filter=PageTypeFilterInput(description="Filtering options for page types."),
description="List of the page types.",
)
def resolve_page(self, info, id=None, slug=None):
return resolve_page(info, id, slug)
def resolve_pages(self, info, **kwargs):
return resolve_pages(info, **kwargs)
def resolve_page_type(self, info, id):
return resolve_page_type(info, id)
def resolve_page_types(self, info, **kwargs):
return resolve_page_types(info, **kwargs)
class PageMutations(graphene.ObjectType):
# page mutations
page_create = PageCreate.Field()
page_delete = PageDelete.Field()
page_bulk_delete = PageBulkDelete.Field()
page_bulk_publish = PageBulkPublish.Field()
page_update = PageUpdate.Field()
page_translate = PageTranslate.Field()
# page type mutations
page_type_create = PageTypeCreate.Field()
page_type_update = PageTypeUpdate.Field()
page_type_delete = PageTypeDelete.Field()
page_type_bulk_delete = PageTypeBulkDelete.Field()
# attributes mutations
page_attribute_assign = PageAttributeAssign.Field()
page_attribute_unassign = PageAttributeUnassign.Field()
page_type_reorder_attributes = PageTypeReorderAttributes.Field()
|
import difflib as dl
def generateHTMLDiff(srcname, mutantname):
with open("../src/" + srcname) as f:
original_file = f.read()
with open("../mutants/" + mutantname + "/src/" + srcname.replace(".cpp", "_" + mutantname + ".cpp")) as f:
mutated_file = f.read()
hd = dl.HtmlDiff()
diffs = hd.make_table(original_file.split("\n"), mutated_file.split("\n"), fromdesc='Source', todesc=mutantname, context=False, numlines=0)
return diffs
#with open("diff" + "_" + mutantname + ".html","w+") as diff_file:
# diff_file.write(diffs)
|
import json
import numpy as np
import cv2
import io
from django.core import serializers
from django.http import HttpResponse, JsonResponse
from django.shortcuts import render, redirect, get_object_or_404
import base64
import os
import dlib
from imutils import face_utils
import math
from .detection import *
from .geometry import *
from .extractor import *
# Create your views here.
def extract(request):
if request.method == 'POST':
myfile = request.FILES['file'].file.read()
nparr = np.fromstring(myfile, np.uint8)
input_image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) # cv2.IMREAD_COLOR in OpenCV 3.1
# Let's have a ****
originalImage = input_image
# Detect eyes landmarks, to align the face later
eyePoints = facial_landmarks(originalImage, eyeOnlyMode=True)
if eyePoints is not None:
# Align face and redetect landmarks
image = align_face(originalImage, eyePoints)
improved_landmarks = facial_landmarks(image, allowEnhancement=True)
ret, jpeg = cv2.imencode('.jpg', image)
image_bytes = jpeg.tobytes()
output_image_html = "<img class='demo-img' src='data:image/png;base64," + base64.b64encode(image_bytes).decode() + "'/>"
# Extract feature
options = ['all']
features = face_parts_imgs(image, improved_landmarks, options)
feature_images = {}
for key in features:
ret, jpeg = cv2.imencode('.jpg', features[key])
image_bytes = jpeg.tobytes()
html_class = 'extractor-img'
output_html = f"<img class='{html_class}' src='data:image/png;base64,{base64.b64encode(image_bytes).decode()}'/>"
feature_images[key] = output_html
return JsonResponse({'image':output_image_html,'features': feature_images})
else:
return JsonResponse({'error':'No faces detected!'}) |
#%%
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import phd.viz
colors, palette = phd.viz.phd_style()
# Load in the datasets
data_a = pd.read_csv('../../data/ch6_induction_si/figS1_partA.csv')
data_b = pd.read_csv('../../data/ch6_induction_si/figs1_partB.csv')
data_O2 = pd.read_csv('../../data/ch2_induction/RazoMejia_2018.csv', comment='#')
data_O2['repressors'] *= 2
data_O2 = data_O2[(data_O2['operator'] == 'O2') & (data_O2['repressors'] == 260)]
# Define necessary parameters.
ka = data_a[data_a.parameter == 'logKA']
ki = data_a[data_a.parameter == 'logKI']
c_range = np.logspace(-8, -2, 500)
R = 260
ep_r = -13.9
_colors = sns.color_palette('magma', n_colors=len(data_b)+1)
def foldchange(c, R, ep_ai, ep_r, ep_a, ep_i):
mwc_term = (1 + c / ep_a)**2 * (1 + np.exp(-ep_ai)) / ((1 + c / ep_a)**2 +
np.exp(-ep_ai) * (1 + c / ep_i)**2)
fc = (1 + mwc_term * (R / 4.6E6) * np.exp(-ep_r))**-1
return fc
# Define the figure axis and labels.
fig, ax = plt.subplots(1, 2, figsize=(6, 2.25))
phd.viz.despine(ax)
_ = ax[0].set_xlabel(r'allosteric parameter $\Delta\varepsilon_{AI}\,(k_BT)$')
_ = ax[0].set_ylabel(r'best-fit parameter value')
_ = ax[1].set_xscale('log')
_ = ax[1].set_xlabel('IPTG [µM]')
_ = ax[1].set_ylabel('fold-change')
# Plots for panel (A)
_ = ax[0].plot(ka.ep, ka.bestfit, '-', color=colors['orange'],
label=r'$\mathrm{log}\, \frac{K_A}{1\mathrm{M}}$')
_ = ax[0].plot(ki.ep, ki.bestfit, '-', color=colors['purple'],
label=r'$\mathrm{log}\, \frac{K_A}{1\mathrm{M}}$')
# Plot the curves
for i in range(len(data_b)):
ep_ai = data_b.iloc[i]['ep_ai']
ka = np.exp(data_b.iloc[i]['log_ka'])
ki = np.exp(data_b.iloc[i]['log_ki'])
fc = foldchange(c_range, R, ep_ai, ep_r, ka, ki)
_ = ax[1].plot(c_range * 1E6, fc, label=ep_ai, color=_colors[i])
# plot the data.
grouped = data_O2.groupby(['IPTG_uM']).fold_change_A
for group, data in grouped:
mean_fc = np.mean(data)
mean_sem = np.std(data) / np.sqrt(len(data))
_ = ax[1].errorbar(group, mean_fc, mean_sem,
linestyle='none', color=colors['orange'],
fmt='o', markeredgecolor='white', markeredgewidth=0.5,
label='__nolegend__', ms=4.5)
# Add the legends and labels.
_ = ax[0].legend(loc='lower left')
leg = ax[1].legend(loc='upper left', title=r"""allosteric parameter
$\Delta\varepsilon_{AI}$ $(k_BT)$""", bbox_to_anchor=(1, 1),
fontsize=6)
leg.get_title().set_fontsize(6)
fig.text(0, 0.95, '(A)', fontsize=8)
fig.text(0.45, 0.95, '(B)', fontsize=8)
# Format and save the figure.
plt.tight_layout()
plt.savefig('../figs/ch6_figS1.pdf', bbox_inches='tight')
plt.savefig('../figs/ch6_figS1.png', bbox_inches='tight')
# %%
|
# faça um programa que leia a LARGURA (larg) e a ALTURA (altu) dde uma parede em metros, calcule sua área [ LARGURA x ALTURA] {area),
# e a quantidade necessárioa de tinta, sabendo que um galao de tinta pinta dois metros.
larg = float(input("qual a largura da parede? "))
altu = float(input("qual a altura da parede? "))
area = larg * altu
tottinta = area/2
#if tottinta > 1:
# res = "galões de tinta"
#else:
# res = "galão de tinta"
print(f"A parede em questao mede {larg:.2} metro(s) de largura e {altu:.2} metro(s) de altura, \npossui uma area de {area:.2} m²", end= " e ")
print(f"para pintá-la será necessária a compra de {tottinta}l litro(s) de tinta")
|
import pytest
from ephios.core.signup import SignupStats
@pytest.mark.django_db
def test_signup_stats_addition(django_app):
a = SignupStats(4, 2, 3, None)
b = SignupStats(5, 2, 3, 5)
c = SignupStats(3, 2, None, 2)
assert a + b == SignupStats(9, 4, 6, None)
assert b + c == SignupStats(8, 4, 3, 7)
@pytest.mark.django_db
def test_signup_conflicting_shifts(django_app, volunteer, event, conflicting_event):
assert not conflicting_event.shifts.first().signup_method.can_sign_up(
volunteer.as_participant()
)
|
from typing import Dict, List
from icolos.core.containers.perturbation_map import Edge
from icolos.core.workflow_steps.pmx.base import StepPMXBase
from pydantic import BaseModel
from icolos.core.workflow_steps.step import _LE
from icolos.utils.enums.program_parameters import StepPMXEnum
from icolos.utils.execute_external.pmx import PMXExecutor
from icolos.utils.general.parallelization import SubtaskContainer
import numpy as np
import glob
import pandas as pd
import os
_PSE = StepPMXEnum()
class StepPMXRunAnalysis(StepPMXBase, BaseModel):
"""
Analyses map, returns both a summary and a full results dataframe, written to top level of work_dir
"""
results_summary: pd.DataFrame = None
results_all: pd.DataFrame = None
class Config:
arbitrary_types_allowed = True
def __init__(self, **data):
super().__init__(**data)
self._initialize_backend(executor=PMXExecutor)
self.results_summary = pd.DataFrame()
self.results_all = pd.DataFrame()
def execute(self):
edges = [e.get_edge_id() for e in self.get_edges()]
self.execution.parallelization.max_length_sublists = 1
self._subtask_container = SubtaskContainer(
max_tries=self.execution.failure_policy.n_tries
)
self._subtask_container.load_data(edges)
self._execute_pmx_step_parallel(
run_func=self.run_analysis,
step_id="pmx_run_analysis",
result_checker=self._check_result,
)
self.analysis_summary(edges)
def _run_analysis_script(
self, analysispath, stateApath, stateBpath, bVerbose=False
):
fA = " ".join(glob.glob("{0}/*xvg".format(stateApath)))
fB = " ".join(glob.glob("{0}/*xvg".format(stateBpath)))
oA = "{0}/integ0.dat".format(analysispath)
oB = "{0}/integ1.dat".format(analysispath)
wplot = "{0}/wplot.png".format(analysispath)
o = "{0}/results.txt".format(analysispath)
args = " ".join(self.settings.arguments.flags)
cmd = "$PMX analyse --quiet -fA {0} -fB {1} -o {2} -oA {3} -oB {4} -w {5} -t {6} -b {7}".format(
fA, fB, o, oA, oB, wplot, 298, 100
)
# subprocess complains that the command is too long
os.system(cmd)
if bVerbose == True:
fp = open(o, "r")
lines = fp.readlines()
fp.close()
bPrint = False
for l in lines:
if "ANALYSIS" in l:
bPrint = True
if bPrint == True:
print(l, end="")
def _read_neq_results(self, fname):
fp = open(fname, "r")
lines = fp.readlines()
fp.close()
out = []
for l in lines:
l = l.rstrip()
foo = l.split()
if "BAR: dG" in l:
out.append(float(foo[-2]))
elif "BAR: Std Err (bootstrap)" in l:
out.append(float(foo[-2]))
elif "BAR: Std Err (analytical)" in l:
out.append(float(foo[-2]))
elif "0->1" in l:
out.append(int(foo[-1]))
elif "1->0" in l:
out.append(int(foo[-1]))
return out
def _fill_resultsAll(self, res, edge, wp, r):
try:
rowName = "{0}_{1}_{2}".format(edge, wp, r)
self.results_all.loc[rowName, "val"] = res[2]
self.results_all.loc[rowName, "err_analyt"] = res[3]
self.results_all.loc[rowName, "err_boot"] = res[4]
self.results_all.loc[rowName, "framesA"] = res[0]
self.results_all.loc[rowName, "framesB"] = res[1]
except IndexError:
self._logger.log(
f"Index Error encountered whilst parsing results to summary file for job {edge}/{wp}/{r}",
_LE.WARNING,
)
def _summarize_results(self, edges):
bootnum = 1000
for edge in edges:
for wp in self.therm_cycle_branches:
dg = []
erra = []
errb = []
distra = []
distrb = []
for r in range(1, self.get_perturbation_map().replicas + 1):
rowName = "{0}_{1}_{2}".format(edge, wp, r)
dg.append(self.results_all.loc[rowName, "val"])
erra.append(self.results_all.loc[rowName, "err_analyt"])
errb.append(self.results_all.loc[rowName, "err_boot"])
distra.append(
np.random.normal(
self.results_all.loc[rowName, "val"],
self.results_all.loc[rowName, "err_analyt"],
size=bootnum,
)
)
distrb.append(
np.random.normal(
self.results_all.loc[rowName, "val"],
self.results_all.loc[rowName, "err_boot"],
size=bootnum,
)
)
rowName = "{0}_{1}".format(edge, wp)
distra = np.array(distra).flatten()
distrb = np.array(distrb).flatten()
if self.get_perturbation_map().replicas == 1:
self.results_all.loc[rowName, "val"] = dg[0]
self.results_all.loc[rowName, "err_analyt"] = erra[0]
self.results_all.loc[rowName, "err_boot"] = errb[0]
else:
self.results_all.loc[rowName, "val"] = np.mean(dg)
self.results_all.loc[rowName, "err_analyt"] = np.sqrt(
np.var(distra) / float(self.get_perturbation_map().replicas)
)
self.results_all.loc[rowName, "err_boot"] = np.sqrt(
np.var(distrb) / float(self.get_perturbation_map().replicas)
)
#### also collect self.results_summary
rowNameWater = "{0}_{1}".format(edge, "ligand")
rowNameProtein = "{0}_{1}".format(edge, "complex")
dg = (
self.results_all.loc[rowNameProtein, "val"]
- self.results_all.loc[rowNameWater, "val"]
)
erra = np.sqrt(
np.power(self.results_all.loc[rowNameProtein, "err_analyt"], 2.0)
- np.power(self.results_all.loc[rowNameWater, "err_analyt"], 2.0)
)
errb = np.sqrt(
np.power(self.results_all.loc[rowNameProtein, "err_boot"], 2.0)
- np.power(self.results_all.loc[rowNameWater, "err_boot"], 2.0)
)
rowName = edge
self.results_summary.loc[rowName, "lig1"] = edge.split("_")[0]
self.results_summary.loc[rowName, "lig2"] = edge.split("_")[1]
self.results_summary.loc[rowName, "val"] = dg
self.results_summary.loc[rowName, "err_analyt"] = erra
self.results_summary.loc[rowName, "err_boot"] = errb
print(self.results_summary)
def analysis_summary(self, edges):
for edge in edges:
for r in range(1, self.get_perturbation_map().replicas + 1):
for wp in self.therm_cycle_branches:
analysispath = "{0}/analyse{1}".format(
self._get_specific_path(
workPath=self.work_dir, edge=edge, wp=wp
),
r,
)
resultsfile = "{0}/results.txt".format(analysispath)
res = self._read_neq_results(resultsfile)
self._fill_resultsAll(res, edge, wp, r)
# the values have been collected now
# let's calculate ddGs
self._summarize_results(edges)
try:
if "exp_results" in self.settings.additional.keys() and os.path.isfile(
self.settings.additional["exp_results"]
):
exp_data = pd.read_csv(
self.settings.additional["exp_results"],
converters={"Ligand": lambda x: str(x).split(".")[0]},
)
print(exp_data)
# compute the experimental ddG and append to resultsSummary
node_data = self.get_perturbation_map().node_df
self.results_summary["exp_ddG"] = self.results_summary.apply(
lambda x: np.array(
self.compute_exp_ddG(
x["lig1"], x["lig2"], node_data=node_data, exp_data=exp_data
)
),
axis=1,
)
except Exception as e:
self._logger.log(
f"Failed to compute experimental results, error was: {e}", _LE.WARNING
)
# final write to disk
self.results_summary.to_csv(os.path.join(self.work_dir, "resultsSummary.csv"))
self.results_all.to_csv(os.path.join(self.work_dir, "resultsAll.csv"))
def compute_exp_ddG(
self, lig1: str, lig2: str, node_data: pd.DataFrame, exp_data: pd.DataFrame
) -> float:
"""
Compute the ddG between two ligands from experimental data
"""
lig1_id = (
node_data.loc[node_data["hash_id"] == lig1]["node_id"]
.to_list()[0]
.replace(" ", "")
)
lig2_id = (
node_data.loc[node_data["hash_id"] == lig2]["node_id"]
.to_list()[0]
.replace(" ", "")
)
lig1_dG = float(
exp_data.loc[exp_data["Ligand"] == lig1_id]["Exp. ΔG"].tolist()[0]
)
lig2_dG = float(
exp_data.loc[exp_data["Ligand"] == lig2_id]["Exp. ΔG"].tolist()[0]
)
return lig2_dG - lig1_dG
def run_analysis(self, jobs: List[str], bVerbose=True):
for idx, edge in enumerate(jobs):
for r in range(1, self.get_perturbation_map().replicas + 1):
# ligand
wp = "ligand"
analysispath = "{0}/analyse{1}".format(
self._get_specific_path(workPath=self.work_dir, edge=edge, wp=wp),
r,
)
os.makedirs(analysispath, exist_ok=True)
stateApath = self._get_specific_path(
workPath=self.work_dir,
edge=edge,
wp=wp,
state="stateA",
r=r,
sim="transitions",
)
stateBpath = self._get_specific_path(
workPath=self.work_dir,
edge=edge,
wp=wp,
state="stateB",
r=r,
sim="transitions",
)
self._run_analysis_script(
analysispath, stateApath, stateBpath, bVerbose=bVerbose
)
# protein
wp = "complex"
analysispath = "{0}/analyse{1}".format(
self._get_specific_path(workPath=self.work_dir, edge=edge, wp=wp),
r,
)
os.makedirs(analysispath, exist_ok=True)
stateApath = self._get_specific_path(
workPath=self.work_dir,
edge=edge,
wp=wp,
state="stateA",
r=r,
sim="transitions",
)
stateBpath = self._get_specific_path(
workPath=self.work_dir,
edge=edge,
wp=wp,
state="stateB",
r=r,
sim="transitions",
)
self._run_analysis_script(
analysispath, stateApath, stateBpath, bVerbose=bVerbose
)
def _check_result(self, batch: List[List[str]]) -> List[List[bool]]:
"""
Look in each hybridStrTop dir and check the output pdb files exist for the edges
"""
output_files = ["integ0.dat", "integ1.dat", "results.txt", "wplot.png"]
results = []
for subjob in batch:
subjob_results = []
for job in subjob:
subjob_results.append(
all(
[
os.path.isfile(
os.path.join(
self.work_dir, job, "complex", "analyse1", f
)
)
for f in output_files
]
)
)
results.append(subjob_results)
return results
|
import sys
from django.db import connections
from django.db.utils import ConnectionDoesNotExist, IntegrityError
from django.core.management.base import BaseCommand
from django_comments.models import Comment
from django_comments_xtd.models import XtdComment
__all__ = ['Command']
class Command(BaseCommand):
help = "Load the xtdcomment table with valid data from django_comments."
def add_arguments(self, parser):
parser.add_argument('using', nargs='*', type=str)
def populate_db(self, cursor):
for comment in Comment.objects.all():
sql = ("INSERT INTO %(table)s "
" ('comment_ptr_id', 'thread_id', 'parent_id',"
" 'level', 'order', 'followup') "
"VALUES (%(id)d, %(id)d, %(id)d, 0, 1, 0)")
cursor.execute(sql % {'table': XtdComment._meta.db_table,
'id': comment.id})
def handle(self, *args, **options):
total = 0
using = options['using'] or ['default']
for db_conn in using:
try:
self.populate_db(connections[db_conn].cursor())
total += XtdComment.objects.using(db_conn).count()
except ConnectionDoesNotExist:
print("DB connection '%s' does not exist." % db_conn)
continue
except IntegrityError:
if db_conn != 'default':
print("Table '%s' (in '%s' DB connection) must be empty."
% (XtdComment._meta.db_table, db_conn))
else:
print("Table '%s' must be empty."
% XtdComment._meta.db_table)
sys.exit(1)
print("Added %d XtdComment object(s)." % total)
|
# -*- coding:utf-8 -*-
import logging
from logging import Formatter
from colorlog import ColoredFormatter
from mongolog.handlers import MongoHandler
logger = None
def get_logger(app,
name,
log_level=None,
log_file=None,
mongo_host=None,
mongo_port=None,
mongo_db=None,
mongo_collection=None):
global logger
if logger: return logger
# initialize flask access log
werkzeug_logger = logging.getLogger('werkzeug')
# initialize logging log_level
if not log_level:
log_level = logging.DEBUG
else:
log_level = get_log_level(log_level)
# initialize logger
logger = logging.getLogger(name)
logger.setLevel(log_level)
# initialize stream log
stream_handler = logging.StreamHandler()
stream_handler.setLevel(log_level)
stream_formatter = ColoredFormatter(
'%(log_color)s%(asctime)s %(white)s[pid:%(process)d] [%(levelname)s] (%(pathname)s:%(lineno)d) %(message)s',
reset=True,
log_colors={
'DEBUG': 'green',
'INFO': 'yellow',
'WARNING': 'cyan',
'ERROR': 'red',
'CRITICAL':'red',
})
stream_handler.setFormatter(stream_formatter)
logger.addHandler(stream_handler)
# initialize file log
if log_file:
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(log_level)
file_formatter = Formatter('%(asctime)s;%(levelname)s;%(pathname)s:%(lineno)d;%(message)s')
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
werkzeug_logger.addHandler(file_handler)
app.logger.addHandler(file_handler)
# initialize remote log
if mongo_collection and mongo_db and mongo_host and mongo_port:
mongo_handler = MongoHandler.to(db=mongo_db,
collection=mongo_collection,
host=mongo_host,
port=mongo_port,
level=log_level)
logger.addHandler(mongo_handler)
return logger
LOG_LEVEL = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL
}
def get_log_level(level):
return LOG_LEVEL.get(level, logging.DEBUG)
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import (QTimer, QCoreApplication, Qt, qsrand, QTime, QRectF, QPointF, pyqtSignal, QSize, QPoint, QSettings, QVariant)
from PyQt5.QtGui import (QBrush, QColor, QPainter, QPixmap, QFont, QPalette)
from PyQt5.QtWidgets import (QWidget, QApplication, QGraphicsScene, QGraphicsView, QLabel, QSplitter, QPushButton, QHBoxLayout,
QVBoxLayout, QTextEdit, QGridLayout, QGraphicsRectItem, QGraphicsTextItem, QSizePolicy, QListWidget, QListWidgetItem)
from AnimatedClock import AnimatedClock
from AnimatedCalendar import AnimatedCalendar
from StaticPhotos import StaticPhotos
from CaptionedPhotos import CaptionedPhotos
from WindowToolbar import WindowToolbar
from PhotoDeltas import PhotoDeltas
class MainWindow(QWidget):
def __init__(self, appConfig, projectorControl, parent=None):
self.PHOTO_DELTAS_FILE_NAME = "//MACALLAN/Photos/Photo Info/Deltas/PhotoDeltas.txt"
QWidget.__init__(self, parent)
self._projectorControl = projectorControl
# File used for delta changes to photos
self.photoDeltas = PhotoDeltas(self.PHOTO_DELTAS_FILE_NAME)
# Frameless window
self.setWindowFlags(Qt.FramelessWindowHint)
# Background to black
self.setAutoFillBackground(True)
palette = QPalette()
palette.setColor(QPalette.Background, Qt.black)
self.setPalette(palette)
# Clock
self.clock = AnimatedClock()
# Calendar
self.calendar = AnimatedCalendar(calUpdateSecs=600, calServerUrl=appConfig["calServerUrl"])
# Image
# self.photos = AnimatedPhotos("//macallan/photos/PhotosMain/", ["jpg"], maxCols=3, maxRows=4, borders=[0,0,0,0], xBetweenPics=5, yBetweenPics=5, animationSpeed=1.0, picChangeMs=5000)
self.photos = StaticPhotos("//macallan/photos/PhotosMain/", ["jpg"], self.photoDeltas, picChangeMs=5000)
#self.photos = CaptionedPhotos("//macallan/photos/PhotosMain/", ["jpg"], picChangeMs=5000)
# Toolbar
self.windowToolbar = WindowToolbar(self.close, self)
# Left pane of page
self.leftPane = QSplitter(Qt.Vertical)
self.leftPane.addWidget(self.clock)
self.leftPane.addWidget(self.calendar)
# Right pane of page
self.rightPane = QSplitter(Qt.Vertical)
self.rightPane.addWidget(self.windowToolbar)
self.rightPane.addWidget(self.photos)
# Splitter between left and right panes
self.horzSplitter = QSplitter(Qt.Horizontal)
self.horzSplitter.addWidget(self.leftPane)
self.horzSplitter.addWidget(self.rightPane)
self.layout = QHBoxLayout(self)
self.layout.addWidget(self.horzSplitter)
self.setLayout(self.layout)
# Remember the locations of the splitter bars to restore next time the program is run
settings = QSettings("PhotoCalendar")
settings.beginGroup("MainWindow")
position = settings.value("Position", QVariant(QPoint(0, 0)))
self.move(position)
size = settings.value("Size", QVariant(QSize(1920, 1200)))
self.resize(size)
if settings.value("HorzSplitter") is not None:
self.horzSplitter.restoreState(settings.value("HorzSplitter"))
#print("Restoring horz", settings.value("HorzSplitter"))
if settings.value("LeftPaneSplitter") is not None:
self.leftPane.restoreState(settings.value("LeftPaneSplitter"))
#print("Restoring left pane", settings.value("LeftPaneSplitter"))
if settings.value("RightPaneSplitter") is not None:
self.rightPane.restoreState(settings.value("RightPaneSplitter"))
#print("Restoring right pane", settings.value("RightPaneSplitter"))
settings.endGroup()
# Start rotating photos
self.photos.start()
# # Grid layout
# layout = QGridLayout()
# # layout.setContentsMargins(0,0,0,0)
# layout.setSpacing(0)
# layout.addWidget(self.clock, 0, 0)
# layout.addWidget(self.calendar, 1, 0)
# layout.addWidget(self.photos, 0, 1, 2, 1)
# layout.setColumnStretch(0, 1)
# layout.setColumnStretch(1, 2.5)
# self.setLayout(layout)
# Start photo animation
self.photos.start()
def closeEvent(self, event):
print("Main window close event")
# Save layout settings
settings = QSettings("PhotoCalendar")
settings.beginGroup("MainWindow")
curSize = self.size()
settings.setValue("Size", QVariant(curSize))
curPos = self.pos()
settings.setValue("Position", QVariant(curPos))
#settings.setValue("MainWindow/State", QVariant(self.saveState()))
horzSplitterState = self.horzSplitter.saveState()
#print("HorzSplitter save", horzSplitterState)
settings.setValue("HorzSplitter", QVariant(horzSplitterState))
leftPaneSplitterState = self.leftPane.saveState()
settings.setValue("LeftPaneSplitter", QVariant(leftPaneSplitterState))
#print("LeftPaneSplitter save", leftPaneSplitterState)
rightPaneSplitterState = self.rightPane.saveState()
settings.setValue("RightPaneSplitter", QVariant(rightPaneSplitterState))
#print("RightPaneSplitter save", leftPaneSplitterState)
settings.endGroup()
# Stop the sub-elements
self.calendar.stop()
self.clock.stop()
self.photos.stop()
# Accept the close event
event.accept()
def resizeEvent(self, evt=None):
xWindowSize = self.width()
yWindowSize = self.height()
print("MainWindow size x,y", xWindowSize, yWindowSize);
def test(self):
self._projectorControl.test()
def keyPressEvent(self, event): #QKeyEvent
key = event.key()
if key == QtCore.Qt.Key_Left:
# print('Left')
self.photos.movePrev()
elif key == QtCore.Qt.Key_Right:
# print('Right')
self.photos.moveNext()
elif key == QtCore.Qt.Key_0:
self.photoDeltas.setRating(self.photos, 0)
self.photos.reshow()
elif key == QtCore.Qt.Key_1:
self.photoDeltas.setRating(self.photos, 1)
self.photos.reshow()
elif key == QtCore.Qt.Key_2:
self.photoDeltas.setRating(self.photos, 2)
self.photos.reshow()
elif key == QtCore.Qt.Key_3:
self.photoDeltas.setRating(self.photos, 3)
self.photos.reshow()
elif key == QtCore.Qt.Key_4:
self.photoDeltas.setRating(self.photos, 4)
self.photos.reshow()
elif key == QtCore.Qt.Key_5:
self.photoDeltas.setRating(self.photos, 5)
self.photos.reshow()
elif key == QtCore.Qt.Key_D:
self.photoDeltas.setDateError()
self.photos.reshow()
elif key == QtCore.Qt.Key_L:
self.photoDeltas.setLocationError()
self.photos.reshow()
# print("keypressMainWindow", event.text())
|
#!/usr/bin/python
import sys, os, string
ROOT = os.path.dirname(os.path.abspath(__file__))
#sys.path.insert(0, os.path.join(ROOT, '..'))
#sys.path.append(ROOT+"/lib")
import markup, datetime
def get_classify_stats(ocf,cf,ck,out_dir,outf,outfo,taxa_level):
contigs_by_class = { }
origContigsByClass = { }
origClassifiedCount = 0
classifiedCount = 0
id_class = { }
id_class["0"] = "UNKNOWN"
orig_class_file = open(ocf)
class_file = open(cf)
class_key = open(ck)
#pass outdir as argument
out = open(out_dir + os.sep + outf, 'w')
orig_out = open(out_dir + os.sep + outfo, 'w')
# parse in key file
for line in class_key:
line = line.strip()
fields = line.split("\t")
# f1 is id, f2 is class name
if len(fields) != 2:
print "Error in file format\n"
else:
id_class[fields[0]] = fields[1]
# parse original file to identity ambiguous assignment (which is one more than max previous ID)
maxClassID = 0;
for line in orig_class_file:
line = line.strip()
fields = line.split()
# f1 is contig, f2 is class
if len(fields) != 2:
print "Error in file format\n"
elif maxClassID < int(fields[1]):
maxClassID = int(fields[1])
if origContigsByClass.has_key(fields[1]):
origContigsByClass[fields[1]]+=1
else:
origContigsByClass[fields[1]] = 1
origClassifiedCount += 1
id_class[str(maxClassID+1)] = "AMBIGUOUS"
# parse contig class file
for line in class_file:
line = line.strip()
fields = line.split()
# f1 is contig, f2 is class
if len(fields) != 2:
print "Error in file format\n"
elif contigs_by_class.has_key(fields[1]):
contigs_by_class[fields[1]] += 1
else:
contigs_by_class[fields[1]] = 1
if fields[1] > 0:
classifiedCount += 1
# output stats
# todo: add info on ORFs and read counts
summary = markup.page()
summary.init(bodyattrs={'style':"margin:0px"})
summary.p("Originally classified contigs:")
summary.table(border="1")
for key in origContigsByClass:
try:
class_name = id_class[key]
except KeyError:
continue
summary.tr()
summary.add("<td align=\"left\">%s</td><td align=\"right\">%d</td><td align=\"right\">%3.2f%%</td>"%(class_name, origContigsByClass[key], origContigsByClass[key]/float(origClassifiedCount)*100))
summary.tr.close()
summary.tr()
summary.add("<td align=\"left\"Total classified:</td><td align=\"right\">%d</td>"%(origClassifiedCount))
summary.tr.close()
summary.table.close()
classify = markup.page()
classify.init(bodyattrs={'style':"margin:0px"})
classify.p("Classified contigs:")
classify.table(border="1")
for key in contigs_by_class:
try:
class_name = id_class[key]
except KeyError:
continue
classify.tr()
classify.add("<td align=\"left\"><a target=\"_blank\" href=\"../%s.classified/%s/\">%s</a></td><td align=\"right\">%d</td><td align=\"right\">%3.2f%%</td>"%(taxa_level, class_name, class_name, contigs_by_class[key], contigs_by_class[key]/float(classifiedCount)*100))
classify.tr.close()
classify.tr()
classify.add("<td align=\"left\"Total classified:</td><td align=\"right\">%d</td>"%(classifiedCount))
classify.tr.close()
classify.table.close()
additional = classifiedCount - origClassifiedCount
if additional >= 0:
summary.p("Total additional classified contigs: %d"%(additional))
else:
summary.p("Total contigs classified as unknown from known: %d"%(abs(additional)))
summary.p.close();
orig_out.write(summary.__str__())
out.write(classify.__str__())
orig_out.close()
out.close()
|
from django.db import models
from datetime import datetime
from django.core.validators import MaxValueValidator, MinValueValidator
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Project(models.Model):
name = models.CharField(max_length=100)
added = models.DateTimeField(default=datetime.now)
def __str__(self): # __unicode__ on Python 2
return self.name
class Meta:
ordering = ('name',)
@python_2_unicode_compatible
class Assessment(models.Model):
name = models.CharField(max_length=100)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
added = models.DateTimeField(default=datetime.now)
def __str__(self): # __unicode__ on Python 2
return self.name
class Meta:
ordering = ('name',)
@python_2_unicode_compatible
class Sh0t(models.Model):
title = models.CharField(max_length=200)
body = models.TextField(default="")
assessment = models.ForeignKey(Assessment, null=True, on_delete=models.CASCADE)
added = models.DateTimeField(default=datetime.now)
severity = models.IntegerField(default=5, validators=[MinValueValidator(0), MaxValueValidator(5)])
def __str__(self): # __unicode__ on Python 2
return self.title
class Meta:
ordering = ('severity','title',)
@python_2_unicode_compatible
class Flag(models.Model):
title = models.CharField(max_length=100)
note = models.TextField(default="")
assessment = models.ForeignKey(Assessment, null=True, on_delete=models.CASCADE)
done = models.BooleanField(default=False)
added = models.DateTimeField(default=datetime.now)
order = models.IntegerField(default=1)
def __str__(self): # __unicode__ on Python 2
return self.title
class Meta:
ordering = ('title',)
@python_2_unicode_compatible
class Template(models.Model):
name = models.CharField(max_length=100)
body = models.TextField(default="")
added = models.DateTimeField(default=datetime.now)
def __str__(self): # __unicode__ on Python 2
return self.name
class Meta:
ordering = ('name',)
|
# -*- coding: utf-8 -*-
"""
Author : Jacques Flores
Created : October 17th,2019
About: Script for creating datasets in Dataverse.
An Empty JSON file with Dataverse structure is imported and converted into a JSON dict
Metadata is imported from an excel file into a pandas dataframe and written into the empty JSON formatted string.
"""
from pyDataverse import api
import pandas as pd
# Confidential API Token (Do Not Distribute) ****last four digits removed)
apitoken = "38404b17-46f9-4fe5-808e-a4a38bd80aea"
# Demo Dataverse server
dtvserver = "https://dataverse.nl"
#Loading connection and authentication
dataverse = api.Api(dtvserver,apitoken)
#read excel file with metadata as pandas dataframe
xlfile = "DH2019_paperswithfilesandhandlesCopy.xlsx"
xl = pd.read_excel(xlfile, converters={'paperID': str})
dataset = 0
entries = xl['paperID'].count()
while dataset < entries:
#Make a copy of the dataverse json template as metadata
poster = xl.loc[dataset]['contribution_type'] == 'Poster'
fileid = xl.loc[dataset]['paperID']
handle = xl.loc[dataset]['handle']
#upload files ( I had to edit the api upload file function (pkg: pydataverse) cause it kept raising an error, as a result it does not return a response)
#if there is a poster it will upload the abstract and the poster ELSE it will only upload the abstract
#The abstarct should be named as " (paperID).pdf [e.g. 100.pdf] and the poster as "paperIDp.pdf" [e.g. 100p.pdf] for it to work.
#If named differently this can be changed below
if poster:
dataverse.upload_file(handle , 'filesa/%sa.pdf' % (fileid))
dataverse.upload_file(handle , 'filesa/%sp.pdf' % (fileid))
else:
dataverse.upload_file(handle , 'filesa/%sa.pdf' % (fileid))
#publish dataset and print response
pubdset = dataverse.publish_dataset(handle, type = "major", auth = True)
print ('-' * 40)
print (pubdset.json())
print (pubdset.status_code)
#Counter for datsets and emptying metadata template
dataset = dataset + 1
|
from bluesky.plans import count
ct = count([])
#ct.flyers = [topoff_inj, diag6_flyer5, diag6_flyer1]
ct.flyers = [diag6_flyer5, diag6_flyer1]
uid, = RE(ct)
assert len(db[uid].descriptors) == 3 # one event stream per flyer
|
# datanectar data science chains should live here
|
## Usando o Native Baise vamos fazer um programa que preveja a probabilidade de acidentes de veiculos
import pandas as pd ## Essa é a biblioteca que vamos usar para manipulação de dados
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import accuracy_score
## Essa biblioteca permite fazermos a divisão entre os dados que serão utilizados para treino e para teste
from yellowbrick.classifier import ConfusionMatrix
## Essa biblioteca vai nos permitir visualizar a matriz de confusão
baseDados = pd.read_csv('insurance.csv')
## Nos estamos colocando dentro da váriavel (baseDados) os dados da planilha
baseDados = baseDados.drop(columns=['Unnamed: 0'])
'''
Bom geralmente por ordenação do banco de dados geralmente se coloca um indice em cada linha da coluna para poder
manter um maior controle.
Essa planilha de exemplo que estamos usando também tem esses indices na primeira coluna (para o Python começa do 0)
sendo assim para evitar que esses números interfiram nos nossos calculos vamos apagar essa coluna referente ao indice
CUIDADO PARA NÃO APAGAR COLUNA ERRADA
'''
baseDados.Accident.unique() ## Aqui estamos só visualizando a coluna aonde esta nossas classes (o codigo não funcinou)
'''
Nesse banco de dados de exemplo o NOME DA COLUNA AONDE ESTA NOSSAS CLASSES É "ACCIDENT".
Nesta tabela não se adotou a convenção de colocar a classe como a ultima coluna da tabela, mas depois vamos
fazer a repartição
'''
atributos = baseDados.iloc[:,[0,1,2,3,4,5,6,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26]].values
'''
Dentro das váriaveis atributos vamos guardar os atributos do nosso banco de dados, repare que temos todas as
colunas com os atributos EatributosCETO A COLUNA 7 que é justamente a coluna com a nossa classe.
além disso note que no final usamos um (.values) para deixar esses dados no formato suportado pelo Native Base
Outra coisa que devemos observar é que o método que vai percorrer e retirar os elementos da variável para outra é o
(.iloc) a primeira os primeiros colchetes com 2 pontos [:] significa que queremos percorrer TODAS AS LINHAS da
nossa tabela. O segundo colchete com os números de tabela representa QUAIS COLUNAS QUEREMOS PASSAR PARA NOSSA VÁRIAVEL
MAIS UMA VEZ REPARE QUE NÃO ESTAMOS PEGANDO A COLUNA REFERENTE A CLASSE
'''
classe = baseDados.iloc[:,7].values
## Aqui é a mesma ideia criando uma váriavel para conter somente a coluna da nossa classe
####################### TRANSFORMAÇÃO DOS DADOS ####################################
'''
Se olharmos nossa tabela e até mesmo nossas váriaveis vamos ver que os dados guardados dentro dela estão no formato
literal (texto) entretanto o mais adequado, até mesmo por questão de processamento de dados, é utilizar valores
numericos representando cada um dos elementos dos atributos atributos.
Para fazer essa conversão podemos usar a função LabelEncoder() das bibliotecas que baixamos, mas para que possamos
fazer em cada um dos valores de cada atributo devemos percorrer TODOS OS ATRIBUTOS.
'''
labelencoder = LabelEncoder()
## Para não ter que ficar chamando a função toda hora vamos colocar a função LabelEncoder() dentro da váriavel
atributos[:,0] = labelencoder.fit_transform(atributos[:,0])
atributos[:,1] = labelencoder.fit_transform(atributos[:,1])
atributos[:,2] = labelencoder.fit_transform(atributos[:,2])
atributos[:,3] = labelencoder.fit_transform(atributos[:,3])
atributos[:,4] = labelencoder.fit_transform(atributos[:,4])
atributos[:,5] = labelencoder.fit_transform(atributos[:,5])
atributos[:,6] = labelencoder.fit_transform(atributos[:,6])
atributos[:,7] = labelencoder.fit_transform(atributos[:,7])
atributos[:,8] = labelencoder.fit_transform(atributos[:,8])
atributos[:,9] = labelencoder.fit_transform(atributos[:,9])
atributos[:,10] = labelencoder.fit_transform(atributos[:,10])
atributos[:,11] = labelencoder.fit_transform(atributos[:,11])
atributos[:,12] = labelencoder.fit_transform(atributos[:,12])
atributos[:,13] = labelencoder.fit_transform(atributos[:,13])
atributos[:,14] = labelencoder.fit_transform(atributos[:,14])
atributos[:,15] = labelencoder.fit_transform(atributos[:,15])
atributos[:,16] = labelencoder.fit_transform(atributos[:,16])
atributos[:,17] = labelencoder.fit_transform(atributos[:,17])
atributos[:,18] = labelencoder.fit_transform(atributos[:,18])
atributos[:,19] = labelencoder.fit_transform(atributos[:,19])
atributos[:,20] = labelencoder.fit_transform(atributos[:,20])
atributos[:,21] = labelencoder.fit_transform(atributos[:,21])
atributos[:,22] = labelencoder.fit_transform(atributos[:,22])
atributos[:,23] = labelencoder.fit_transform(atributos[:,23])
atributos[:,24] = labelencoder.fit_transform(atributos[:,24])
atributos[:,25] = labelencoder.fit_transform(atributos[:,25])
'''
Aqui estamos justamente fazendo a TRANSFORMAÇÃO de cada atributo percorrendo toda a linha dele
poderiamos usar um laço for, mas por questões didaticas foi feito dessa forma manual
'''
################################ CRIANDO AS VÁRIAVEIS PARA TREINO E TESTE #############################
atributosTreinamentos, atributosTestes, classeTreinamento, classeTeste = train_test_split(atributos, classe,
test_size = 0.3,
random_state = 0)
## aqui criamos váriaveis para receber dados de trei e de teste usando dados tanto de atributos quanto de classe
## usamos a função (train_test_split) que tinhamos baixado na biblioteca anterior.
## Note que essa função recebe como parametro na ordem os atributos e a classe
## Além disso devemos passar como parametro o (test_size) que é a quantidade por cento que queremos para teste.
## neste caso passamos 30% (0.3) ao passo que por consequencia os outros 70% (0.7) serão para treino
## o último parametro que devemos passar agora é o (random_state) para que definirmos quais dados usar
## neste caso como passamos o parametro (0) indica que queremos usar os mesmos dados.
modelo = GaussianNB()
## Neste caso estamos criando o modelo usando a formula gausean Native Baise dentro da váriavel (modelo)
modelo.fit(atributosTreinamentos, classeTreinamento)
'''
Agora usamos a váriavel modelo (aonde está a native baise) para gerar o modelo usando os dados de treinamento..
No Python não temos como ver os modelos gerados (como ocorre no weka)
perceba que para gerar o modelo usamos o método (.fit)
'''
previsoes = modelo.predict(atributosTestes)
'''
Agora nos criamos uma váriavel para, usando o modelo gerado anteriormente, fazermos os testes com os dados
do (atributoTeste).
Perceba que para gerar os testes usamos o método (.predict)
Com as (previsoes) usando os atributos de Testes geramos previsoes que usando nossa I.A.
Podemos comparar as (previsoes) com as (classesTestes), pois ela vai ter as respostas corretas assim podemos
já observar a porcentagem de acerto da nossa I.A
'''
acuracidade = accuracy_score(classeTeste, previsoes)
'''
Usando a função (accuracy_score) passando como párametro a classeTeste e a nossá váriavel de previsões podemos
gerar o valor de porcentagem de acertos da nossa I.A
Neste exemplo nossa I.A acertou 0.8658 (86%)
'''
############################### MATRIZ DE CONFUSÃO ##################################
'''
Por meio da nossa biblioteca (ConfusionMatrix) podemos gerar a matriz de confusão em Python mostrando assim
de forma mais clara como foi o percentual de acerto da nossa I.A
'''
confusao = ConfusionMatrix(modelo, classes= ["Nenhum", "Severo", "Leve", "Moderado"] )
confusao.fit(atributosTreinamentos, classeTreinamento)
confusao.score(atributosTestes,classeTeste)
confusao.poof()
confusao = ConfusionMatrix(modelo, classes= ["None", "Severe", "Mild", "Moderate"] )
confusao.fit(atributosTreinamentos, classeTreinamento)
confusao.score(atributosTestes,classeTeste)
confusao.poof() |
from flask import Flask
app = Flask(__name__)
app.config.from_object('config.config')
from app import views
|
import logging
from terra import api
from terra import msg
from terra import utils
from terra.account import Account
__all__ = ["api", "msg", "utils", "Account"]
__version__ = "0.8.0"
logging.getLogger(__name__).setLevel(logging.INFO)
|
from enum import Enum
class NodeState(Enum):
ROOT = 1
CHILD = 2
TERMINAL = 3
class Node():
def __init__(self,
segment, pos,
#remaining,
nb_remaining,
level=0,
duplicates=1,
parent=None,
state=NodeState.CHILD):
self.state = state
self.parent = parent
self.segment = segment
self.duplicates = duplicates
self.pos = pos
self.length = len(segment)
self.next_pos = pos + self.length
self.level = level
#self.remaining = remaining
#self.is_done = True if not remaining else False
self.nb_remaining = nb_remaining
self.is_done = False if nb_remaining > 0 else True
def __repr__(self):
text = '<Node>%s' % self.segment
return text
def get_sequence(self, sep='|'):
sequence = []
current = self
while current.state == NodeState.CHILD:
sequence.append(current.segment)
current = current.parent
sequence.reverse()
text = sep.join(sequence)
return text
def get_length_sequence(self):
sequence = []
current = self
while current.state == NodeState.CHILD:
sequence.append(current.length)
current = current.parent
sequence.reverse()
return sequence
def get_sum_length_sequence(self):
return sum(self.get_length_sequence())
def get_nb_options(self):
nb_options = 1
current = self
while current.state == NodeState.CHILD:
nb_options *= current.duplicates
current = current.parent
return nb_options
def get_nb_options_from_root(self):
nb_options = 1
current = self
while current.state == NodeState.CHILD:
nb_options *= current.duplicates
current = current.parent
return nb_options
def get_sequence_start(self):
current = self
while current.state == NodeState.CHILD:
current = current.parent
return current
|
import sys
sys.path.append('../')
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import normalize, MinMaxScaler
import re
from math import sqrt, pow
import os
import joblib
from keras.layers import TimeDistributed
from sklearn.model_selection import train_test_split
from scipy.stats import shapiro, normaltest
# load the visualized part (if needed)
from Parameter_value_performance_anomaly import visualized_value_vector
# load the module to transform the value vector to numerical data --- step1
from Parameter_value_performance_anomaly import transform_numerical_data
# load the module to generate the matrix for every log key sequence --- step2
from Parameter_value_performance_anomaly import gen_log_key_matrix
# load the module to analyse the matrix for anomaly detection --- step3
from Parameter_value_performance_anomaly import matrix_analyse_report_anomaly
import optparse
# compute the confidence intervial
from Parameter_value_performance_anomaly import anomaly_predict
if __name__ == "__main__":
# set the format of command input
parser = optparse.OptionParser('usage %prog --p1 <log_value_vector> --p2 <key_num_para_dict> --p3 <Event_npy>')
# set the parameter
parser.add_option('--p1', dest = 'log_value_vector_filename', type = 'string', help = 'Please input the path of the log_value_vector file:')
parser.add_option('--p2', dest = 'para_dict_filename', type = 'string', help = 'Please input the path of the key_num_para_dict file:')
parser.add_option('--p3', dest = 'Event_npy_folder', type = 'string', help = 'Please input the folder to save the event matrix for every log key:')
# parser arguments through the parse_args()
(options, args) = parser.parse_args()
log_value_vector_filename = options.log_value_vector_filename
para_dict_filename = options.para_dict_filename
Event_npy_folder = options.Event_npy_folder
# ================== part1 to load the dataframe for parameter detection =================
log_value_vector_csv_fd = pd.read_csv(log_value_vector_filename)
# ================== part2 to transform the textual value to numerical data ================
key_para_dict_filename = '../Dataset/Linux/Malicious_Separate_Structured_Logs/key_para_dict.csv'
# build the dict with index, in order to trace back then
key_para_dict_index_filename = '../Dataset/Linux/Malicious_Separate_Structured_Logs/key_para_dict_index.csv'
key_para_dict, fd_id = transform_numerical_data.vocabulary_generate(log_value_vector_csv_fd, key_para_dict_filename,\
key_para_dict_index_filename)
# module to process the exception in template computation
tokens = transform_numerical_data.tokens_generate(key_para_dict)
# save the tokens_dict_filename
tokens_dict_filename = '../Dataset/Linux/Malicious_Separate_Structured_Logs/tokens_dict.pkl'
tokens_encode_dict = transform_numerical_data.token_dict(tokens, tokens_dict_filename)
# split the parameter value vector into different columns
# fd_id is the copied csv and list_name is like value x
fd_id, list_name = transform_numerical_data.split_vectors(fd_id, log_value_vector_filename)
# replace the textual data to numerical data
fd_value = transform_numerical_data.map_vectors(fd_id, list_name, log_value_vector_filename, tokens_encode_dict)
# integrate the vector lines into one
integrated_fd_value = transform_numerical_data.integrate_lines(fd_value, list_name)
# delete repeated column in the csv
transform_numerical_data.delete_repeated_line(integrated_fd_value, log_value_vector_filename)
# ================== part3 to generate the separate matrix for log key ===================
# we have the parameter file --- log_value_vector_filename
fd_parameter = pd.read_csv(log_value_vector_filename)
fd_parameter = fd_parameter.copy()
# create the aim file where the key_num_para_dict.csv will be saved
key_num_para_dict = gen_log_key_matrix.log_vectors(fd_parameter, para_dict_filename)
# create all the matrixes for all the eventIDs
for key in key_num_para_dict.keys():
print("the key is:", key)
gen_log_key_matrix.str_array(key_num_para_dict, key, Event_npy_folder)
# ================== part4 to analyse the matrix ===================
filenames = []
root_dir = '../Dataset/Linux/Malicious_Separate_Structured_Logs/Event_npy/'
# r=root, d = directories, f=files
if not os.path.exists(root_dir):
os.mkdir(root_dir)
else:
for r, d, f in os.walk(root_dir):
for file in f:
if file.endswith('.npy'):
filenames.append(os.path.join(r, file))
# set the random seed
seed = 0
rmses = []
rmses_dict = {}
# record the anomaly logs with the name of file and the anomaly logs order
suspicious_anomaly_dict, fp_logs_dict = {}, {}
# identify whether result file has been generated before
for file in filenames:
if os.path.isfile(file + '_rmses.pkl'):
rmses = joblib.load(file + '_rmses.pkl')
else:
# looping read single file
print("we are processing matrix:", file)
matrix = np.load(file)
# set n_steps_in and n_steps_out depending on the sequence length of matrix
# we set the test_size=0.4, the length of matrix should be at least 8
# Here, I will change the length of history to see the performance
if matrix.shape[0] >= 8:
n_steps = 3
X, Y = matrix_analyse_report_anomaly.training_data_generate(matrix, n_steps)
train_x, test_x, train_y, test_y = train_test_split(X, Y, test_size=0.4, random_state=seed)
elif matrix.shape[0] >= 4:
n_steps = 1
X, Y = matrix_analyse_report_anomaly.training_data_generate(matrix, n_steps)
# test_x and
train_x, test_x, train_y, test_y = train_test_split(X, Y, test_size=0.5, random_state=seed)
else:
continue
# get the model
model = matrix_analyse_report_anomaly.LSTM_model(train_x, train_y)
print("the test_x is:", test_x)
# make a prediction
yhat = model.predict(test_x)
# delete the time step element
print("the predicted y is:", yhat)
rmse, means = matrix_analyse_report_anomaly.mean_squared_error_modified(test_y, yhat)
# rmse, meams = mean_squared_error(test_y, yhat)
rmse = sqrt(rmse)
print('Test RMSE: %.3f' % rmse)
# use the mean square error to compare the difference between predicted y and validation y
# the error follows the Gaussian distribution ---- normal, otherwise abnormal
rmses.append(rmse)
# save the result
rmses_dict[file] = rmse
# save the results to files
joblib.dump(rmses, file + '_rmses.pkl')
joblib.dump(rmses_dict, file + '_rmses_dict.pkl')
# ===== part to predict the anomaly logs ====
file_number = re.findall('\d+', file)
threshold1, threshold2, threshold3, suspicious_logs, \
fp_logs = anomaly_predict.anomaly_report(rmses,file_number)
# part to print the picture of means with bar chart
# create the x axis labels for plot
x_list = []
for i in range(len(rmses)):
x_list.append(i)
if len(x_list)<=1:
pass
else:
# part to print the picture of means with line chart
plt.plot(x_list, rmses)
# add the threshold lines with percentage
print(threshold1,threshold2,threshold3)
plt.axhline(y=threshold1, linestyle = "-", label = '98%')
plt.axhline(y=threshold2, linestyle = "-", label = '99%')
plt.axhline(y=threshold3, linestyle = "-", label = '99.9%')
plt.ylabel("Errors Values")
plt.title(file_number[0] + ' ' + 'Errors Distribution')
# plt.title(file + ' ' + 'Errors Distribution')
plt.show()
# generate the dict about anomaly and false positive logs
if len(suspicious_logs) == 0 & len(fp_logs) == 0:
pass
else:
suspicious_anomaly_dict[file_number[0]] = suspicious_logs
fp_logs_dict[file_number[0]] = fp_logs
# save the result
joblib.dump(suspicious_anomaly_dict,'./result/suspicious_anomaly.pkl')
joblib.dump(fp_logs_dict, './result/fp_logs.pkl')
|
class Solution(object):
def reverseVowels(self, s):
"""
:type s: str
:rtype: str
use a array to track the vowels position, then do the swap at these positions
as string is immutable, convert string to list, then join back
"""
vowels = 'aeiouAEIOU'
vowelspos = [x for x, y in enumerate(s) if y in vowels ]
s = list(s)
low = 0
high = len(vowelspos)-1
while low < high:
s[vowelspos[low]],s[vowelspos[high]] = s[vowelspos[high]],s[vowelspos[low]]
low += 1
high -= 1
return ''.join(s)
if __name__ == '__main__':
s = 'l\oe'
print Solution().reverseVowels(s) |
def left_ind(P):
M = 0
for i in range(1,len(P)):
if P[i][0] < P[M][0]:
M = i
elif P[i][0] == P[M][0]:
if P[i][1] > P[M][1]:
M = i
return M
def pos(p, q, r):
val = (q[1] - p[1]) * (r[0] - q[0]) - (q[0] - p[0]) * (r[1] - q[1])
if val == 0:
return 0
elif val > 0:
return 1
else:
return 2
def covex(P, n):
if n < 3:
return
l = left_ind(P)
hull = []
p = l
q = 0
while(True):
hull.append(p)
q = (p + 1) % n
for i in range(n):
if(pos(P[p],
P[i], P[q]) == 2):
q = i
p = q
if(p == l):
break
for each in hull:
print(P[each][0], P[each][1])
n = int(input('Enter # of points:'))
P = []
for _ in range(n):
print('Enter x & y co-orinates:')
P.append([int(e) for e in input().split()][0:2])
print('The covering points are:')
covex(P, len(P))
|
# Generated by Django 3.1.3 on 2020-12-10 06:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0021_auto_20201209_1613'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('category', models.IntegerField(blank=True, null=True)),
],
),
]
|
import ujson as json
class SimpleJsonRpc(object):
def __init__(self, data):
if (isinstance(data, str)):
data = json.loads(data)
self._id = data['id']
self._method = data['method']
self._params = data.get('params', dict())
@property
def method(self):
return self._method
@property
def params(self):
return self._params
|
import numpy as np
from .state_space_model import StateSpaceModel
class Kalman(StateSpaceModel):
"""
A class to perform kalman filtering or smoothing
"""
def __init__(
self,
transition,
observation,
process_noise,
measurement_noise,
init_state_mean,
init_state_cov
):
"""
construct state space model to perform kalman filtering or smoothing
z_n ~ N(z_n|Az_n-1,Gamma)
x_n ~ N(x_n|Cz_n,Sigma)
z_1 ~ N(z_1|mu_0, P_0)
Parameters
----------
transition : (ndim_hidden, ndim_hidden) np.ndarray
transition matrix of hidden variable (A)
observation : (ndim_observe, ndim_hidden) np.ndarray
observation matrix (C)
process_noise : (ndim_hidden, ndim_hidden) np.ndarray
covariance matrix of process noise (Gamma)
measurement_noise : (ndim_observe, ndim_observe) np.ndarray
covariance matrix of measurement noise (Sigma)
init_state_mean : (ndim_hidden,) np.ndarray
mean parameter of initial hidden variable (mu_0)
init_state_cov : (ndim_hidden, ndim_hidden) np.ndarray
covariance parameter of initial hidden variable (P_0)
Attributes
----------
ndim_hidden : int
dimensionality of hidden variable
ndim_observe : int
dimensionality of observed variable
"""
assert init_state_mean.ndim == 1
assert (
transition.ndim == observation.ndim == process_noise.ndim
== measurement_noise.ndim == init_state_cov.ndim == 2
)
assert (
transition.shape[0] == transition.shape[1]
== process_noise.shape[0] == process_noise.shape[1]
== observation.shape[1] == init_state_mean.size
== init_state_cov.shape[0] == init_state_cov.shape[1]
)
assert (
observation.shape[0] == measurement_noise.shape[0]
== measurement_noise.shape[1]
)
self.ndim_hidden = init_state_mean.size
self.ndim_observe = observation.shape[0]
self.transition = transition
self.process_noise = process_noise
self.observation = observation
self.measurement_noise = measurement_noise
self.init_state_mean = init_state_mean
self.init_state_cov = init_state_cov
def filtering(self, seq):
"""
kalman filter
1. prediction
p(z_n+1|x_1:n) = \int p(z_n+1|z_n)p(z_n|x_1:n)dz_n
2. filtering
p(z_n+1|x_1:n+1) \propto p(x_n+1|z_n+1)p(z_n+1|x_1:n)
Parameters
----------
seq : (N, ndim_observe) np.ndarray
observed sequence
Returns
-------
mean : (N, ndim_hidden) np.ndarray
mean parameter of posterior hidden distribution
cov : (N, ndim_hidden, ndim_hidden) np.ndarray
covariance of posterior hidden distribution
"""
kalman_gain = self.init_state_cov @ self.observation.T @ np.linalg.inv(
self.observation @ self.init_state_cov @ self.observation.T
+ self.measurement_noise)
mean = [self.init_state_mean + kalman_gain @ (seq[0] - self.observation @ self.init_state_mean)]
cov = [(np.eye(self.ndim_observe) - kalman_gain @ self.observation) @ self.init_state_cov]
for s in seq[1:]:
mean_predict = self.transition @ mean[-1]
cov_predict = (
self.transition @ cov[-1] @ self.transition.T
+ self.process_noise)
if np.logical_and.reduce(np.isnan(s)):
mean.append(mean_predict)
cov.append(cov_predict)
else:
kalman_gain = cov_predict @ self.observation.T @ np.linalg.inv(
self.observation @ cov_predict @ self.observation.T
+ self.measurement_noise)
mean.append(mean_predict + kalman_gain @ (s - self.observation @ mean_predict))
cov.append(
(np.eye(self.ndim_observe) - kalman_gain @ self.observation)
@ cov_predict)
mean = np.asarray(mean)
cov = np.asarray(cov)
return mean, cov
def smoothing(self):
raise NotImplementedError
|
# Adapted from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/learn/python/learn/datasets/mnist.py
import gzip
import numpy as np
import tensorflow as tf
# added this to see
tf.compat.v1.disable_eager_execution()
#from tensorflow.contrib.learn.python.learn.datasets import base
from influence.dataset import DataSet
import os
def _read32(bytestream):
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(f):
"""Extract the images into a 4D uint8 np array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D unit8 np array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' %
(magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, rows, cols, 1)
return data
def extract_labels(f, one_hot=False, num_classes=10):
"""Extract the labels into a 1D uint8 np array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D unit8 np array.
Raises:
ValueError: If the bystream doesn't start with 2049.
"""
print('Extracting', f.name)
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST label file: %s' %
(magic, f.name))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = np.frombuffer(buf, dtype=np.uint8)
if one_hot:
return dense_to_one_hot(labels, num_classes)
return labels
# adapted from: https://github.com/leriomaggio/deep-learning-keras-tensorflow/blob/master/2.%20Deep%20Learning%20Frameworks/mnist_data.py
def maybe_download(filename, work_directory, SOURCE_URL):
"""Download the data from Yann's website, unless it's already here."""
if not tf.io.gfile.exists(work_directory):
tf.io.gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not tf.io.gfile.exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.io.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def load_mnist(train_dir, validation_size=5000):
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
local_file = maybe_download(TRAIN_IMAGES, train_dir,
SOURCE_URL + TRAIN_IMAGES)
with open(local_file, 'rb') as f:
train_images = extract_images(f)
local_file = maybe_download(TRAIN_LABELS, train_dir,
SOURCE_URL + TRAIN_LABELS)
with open(local_file, 'rb') as f:
train_labels = extract_labels(f)
local_file = maybe_download(TEST_IMAGES, train_dir,
SOURCE_URL + TEST_IMAGES)
with open(local_file, 'rb') as f:
test_images = extract_images(f)
local_file = maybe_download(TEST_LABELS, train_dir,
SOURCE_URL + TEST_LABELS)
with open(local_file, 'rb') as f:
test_labels = extract_labels(f)
if not 0 <= validation_size <= len(train_images):
raise ValueError(
'Validation size should be between 0 and {}. Received: {}.'
.format(len(train_images), validation_size))
validation_images = train_images[:validation_size]
validation_labels = train_labels[:validation_size]
train_images = train_images[validation_size:]
train_labels = train_labels[validation_size:]
train_images = train_images.astype(np.float32) / 255
validation_images = validation_images.astype(np.float32) / 255
test_images = test_images.astype(np.float32) / 255
train = DataSet(train_images, train_labels)
validation = DataSet(validation_images, validation_labels)
test = DataSet(test_images, test_labels)
#print("Train test val type: ", type(train), type(validation), type(test))
d = tf.data.Dataset
d.train = train
d.validation = validation
d.test = test
return d, train, validation, test#tf.data.Dataset#()#train=train, validation=validation, test=test, element_spec=tf.int32)
def load_small_mnist(train_dir, divisor, validation_size=5000, random_seed=0, shuffle=False):
print('Loading with random seed %s' % random_seed)
np.random.seed(random_seed)
data_sets, train, validation, test = load_mnist(train_dir, validation_size)
train_images = train.x
train_labels = train.labels
if True: # do this if want a subset
perm = np.arange(len(train_labels))
if True:
np.random.shuffle(perm)
num_to_keep = int(len(train_labels) / divisor)
print("Only keeping %s of the training set" % num_to_keep)
perm = perm[:num_to_keep]
train_images = train_images[perm, :]
train_labels = train_labels[perm]
validation_images = data_sets.validation.x
validation_labels = data_sets.validation.labels
# perm = np.arange(len(validation_labels))
# np.random.shuffle(perm)
# num_to_keep = int(len(validation_labels) / 10)
# perm = perm[:num_to_keep]
# validation_images = validation_images[perm, :]
# validation_labels = validation_labels[perm]
test_images = data_sets.test.x
test_labels = data_sets.test.labels
# perm = np.arange(len(test_labels))
# np.random.shuffle(perm)
# num_to_keep = int(len(test_labels) / 10)
# perm = perm[:num_to_keep]
# test_images = test_images[perm, :]
# test_labels = test_labels[perm]
#print("Len train labels is ", len(train_labels)
train = DataSet(train_images, train_labels)
validation = DataSet(validation_images, validation_labels)
test = DataSet(test_images, test_labels)
d = tf.data.Dataset
d.train = train
d.validation = validation
d.test = test
return d, train, validation, test #tf.data.Dataset(train=train, validation=validation, test=test)
|
#presently we support manual adding:
import json
with open("listening_files.json","r") as f:
list_file = json.load(f)
total_lessons = list_file['total_lessons']
with open("listening_files.json","w+") as f:
#list_file['Lesson_2'] = ["Flag Football","01:16","Medium"]
list_file['Lesson_1'] = ["How to look inside Brain","04:52","Medium"]
list_file['Lesson_2'] = ["Mining Minerals from sea water","02:53","Medium"]
list_file['Lesson_3'] = ["The job we will lose","04:30","Medium"]
list_file['Lesson_4'] = ["Print Your own medicine","02:59","Medium"]
list_file['Lesson_5'] = ["My underwater Robot","04:13","Medium"]
#list_file['Lesson_6'] = ["Playing Tennis tournament","02:45","Medium"]
#list_file['Lesson_7'] = ["Problems with my english","07:51","Medium"]
#list_file['Lesson_8'] = ["Stephanies likes and dislikes","01:47","Medium"]
#list_file['Lesson_9'] = ["Vietnamese food","03:56","Medium"]
#list_file['Lesson_10'] = ["What is bio-chemistry","02:02","Medium"]
#list_file['total_lessons'] = 5
list_file['total_lessons'] = total_lessons + 2 #Change with lesson want to add
json.dump(list_file,f)
|
from tortoise import fields, models
from subject.models import Subject as Subject
class Trial(models.Model):
id = fields.IntField(pk=True)
subject_id: fields.ForeignKeyRelation[Subject] = fields.ForeignKeyField("models.Subject", related_name="trials")
created_at = fields.DatetimeField(auto_now=False, auto_now_add=False)
data = fields.JSONField()
trial_name = fields.TextField()
@property
def name(self):
return f"{self.trial_name}"
def __str__(self) -> str:
return f"Trial {self.id}: {self.name}" |
import os
import numpy as np
import pandas as pd
def save(df, fname):
dname = os.path.dirname(fname)
if not os.path.isdir(dname):
os.makedirs(dname)
records = df.to_records(index=False)
records.dtype.names = [str(i) for i in records.dtype.names]
np.save(fname, records)
|
import time
from pageobjects.environments import DiscardChangesPopup
from pageobjects.nodes import Nodes, RolesPanel, DeleteNodePopup
from tests import preconditions
from tests.base import BaseTestCase
class TestDiscardEnvironmentChanges(BaseTestCase):
@classmethod
def setUpClass(cls):
BaseTestCase.setUpClass()
def setUp(self):
"""Each test precondition
Steps:
1. Create simple environment with default values
2. Click on created environment
3. Deploy environment with 1 controller and 2 compute nodes
"""
BaseTestCase.clear_nailgun_database()
BaseTestCase.setUp(self)
preconditions.Environment.simple_flat()
time.sleep(1)
preconditions.Environment().deploy_nodes(1, 2)
def _discard_changes(self):
Nodes().discard_changes.click()
with DiscardChangesPopup() as p:
p.discard.click()
p.wait_until_exists()
time.sleep(2)
self.assertEqual(3, len(Nodes().nodes), 'Nodes amount')
for node in Nodes().nodes:
self.assertEqual('ready', node.status.text.lower(),
'Node status is READY')
def test_discard_adding_node(self):
"""Discard changes after adding new node
Scenario:
1. Add compute node
2. Discard changes
3. Verify that there are 3 nodes and their statuses are ready
"""
Nodes().add_nodes.click()
Nodes().nodes_discovered[0].checkbox.click()
RolesPanel().compute.click()
Nodes().apply_changes.click()
time.sleep(1)
self._discard_changes()
def test_discard_deleting_node(self):
"""Discard changes after deleting node
Scenario:
1. Delete one compute node
2. Discard changes
3. Verify that there are 3 nodes and their statuses are ready
"""
with Nodes() as n:
n.nodes[1].checkbox.click()
n.delete_nodes.click()
with DeleteNodePopup() as p:
p.delete.click()
p.wait_until_exists()
time.sleep(1)
self._discard_changes()
|
from os import chdir as cd
from os.path import abspath, dirname, join, pardir, realpath
import platform
from subprocess import CalledProcessError, check_output as execute
from sys import exit
# Introduce ourselves.
print('Initializing pwman...')
# Get root directory of this repository.
REPO_DIR = abspath(join(dirname(realpath(__file__)), pardir))
# And move there.
cd(REPO_DIR)
os_name = platform.system().lower()
python = 'py' if os_name.startswith('win') else 'python'
requirements_path = join(REPO_DIR, 'run', 'requirements.txt')
def command(text, success_message):
try:
execute(text.split(' '))
print(success_message)
except CalledProcessError as e:
print(
f'Something went wrong.\n'
f'Error code: {e.returncode}\n'
f'More info: {e.output}\n'
)
exit(1)
def main():
# Let's tell git to look for hooks in our custom hooks directory.
command(
'git config core.hooksPath .hooks',
'Hooks were configured successfully.'
)
# Install missing packages.
command(
f'{python} -m pip install -r {requirements_path} --user',
'Completed setup of packages.'
)
# If we are not on Linux, our job is completed.
if not os_name.startswith('linux'):
return
# If we are on Linux, we have to grant execute permissions to each hook.
from glob import glob
from os import stat, chmod
from stat import S_IEXEC as EXECUTE_PERMISSION
# List files without extension. Those are the hooks.
hooks = [file for file in glob(f'{REPO_DIR}/.hooks/*') if '.' not in file]
# Add execute permission to a list of already granted permissions.
for file in hooks:
chmod(file, stat(file).st_mode | EXECUTE_PERMISSION)
print('Permissions were granted to all hooks successfully.')
main()
|
import tfidf
def transfer(word, no):
li = []
for i in range(no):
li.append(word)
return li
sport = []
f = open("sport.txt", "r")
for x in f:
g = x.split(" ")
li = transfer(g[0], int(g[1][:-1]))
sport.extend(li)
print(sport)
food = []
f = open("food.txt", "r")
for x in f:
g = x.split(" ")
li = transfer(g[0], int(g[1][:-1]))
food.extend(li)
a,b = tfidf.run(sport,food)
ggg = sorted(a.items(), key =
lambda kv:(kv[1], kv[0]))
with open("sporttf2.txt", mode='w') as f:
for k,v in ggg:
f.write( ("%s %s\n") % (k,v))
print("######################################################")
lll = sorted(b.items(), key =
lambda kv:(kv[1], kv[0]))
with open("foodttf2.txt", mode='w') as f:
for k,v in lll:
f.write( ("%s %s\n") % (k,v)) |
import gym
import numpy as np
"""Data generation for the case of a single block pick and place in Fetch Env"""
actions = []
observations = []
infos = []
vectorize_observation = False
def main():
env = gym.make('FetchDrawTriangle-v1')
numItr = 100
initStateSpace = "random"
env.reset()
print("Reset!")
while len(actions) < numItr:
obs = env.reset()
print("ITERATION NUMBER ", len(actions))
goToGoal(env, obs)
fileName = "data_fetch_draw"
fileName += "_" + initStateSpace
fileName += "_" + str(numItr)
fileName += ".npz"
np.savez_compressed(fileName, acs=actions, obs=observations, info=infos) # save the file
return fileName
def vectorize_obs(obs):
vect_obs = []
for k,v in obs.items():
for element in v:
vect_obs.append(element)
return vect_obs
def goToGoal(env, lastObs):
achieved_goals = np.array(lastObs['achieved_goal'])
achieved_goals = achieved_goals.reshape((9,3))
desired_goals = np.array(lastObs['desired_goal'])
desired_goals = desired_goals.reshape((9,3))
episodeAcs = []
episodeObs = []
episodeInfo = []
dginx = 0
timeStep = 0 #count the total number of timesteps
if vectorize_observation:
episodeObs.append(vectorize_obs(lastObs))
else:
episodeObs.append(lastObs)
cur_grip_pos = lastObs['observation'][0:3]
while timeStep <= env._max_episode_steps:
timeStep += 1
# env.render()
#print(cur_grip_pos)
a = (desired_goals[dginx,:] - cur_grip_pos)
print(a)
action = [a[0], a[1], a[2], 0.15]
obsDataNew, reward, done, info = env.step(action)
cur_grip_pos = obsDataNew['observation'][0:3]
achieved_goals = np.array(obsDataNew['achieved_goal'])
achieved_goals = achieved_goals.reshape((9,3))
desired_goals = np.array(obsDataNew['desired_goal'])
desired_goals = desired_goals.reshape((9,3))
#print(achieved_goals)
print(desired_goals[dginx,:])
ag = achieved_goals[dginx,:]
dg = desired_goals[dginx,:]
dist = np.linalg.norm(np.array(ag) - np.array(dg), axis=-1)
if (dist < 0.05) and dginx < 8 :
dginx +=1
if timeStep < env._max_episode_steps:
if vectorize_observation:
episodeObs.append(vectorize_obs(obsDataNew))
else:
episodeObs.append(obsDataNew)
episodeAcs.append(action)
episodeInfo.append(info)
if timeStep >= env._max_episode_steps: break
actions.append(episodeAcs)
observations.append(episodeObs)
infos.append(episodeInfo)
def test(filename):
print('Test')
data = np.load(fileName)
print('Obs shape={}'.format(data['obs'].shape))
print('Acs shape={}'.format(data['acs'].shape))
print('Infos shape={}'.format(data['info'].shape))
if __name__ == "__main__":
fileName = main()
test(fileName)
|
import compiler_gym # imports the CompilerGym environments
import gym
from transformers import (
DataCollatorWithPadding,
PreTrainedTokenizerFast,
RobertaForSequenceClassification,
RobertaTokenizerFast,
TrainingArguments,
)
from datasets import load_dataset
from o4.data import prepare_cost_dataset
from o4.models import CostModelTrainer
SAMPLES = 64
PHASES = 32
def main():
# Create gym environment
env = gym.make("llvm-ic-v0")
# Create tokenizer
tokenizer = RobertaTokenizerFast.from_pretrained("microsoft/codebert-base-mlm")
tokenizer = PreTrainedTokenizerFast(
tokenizer_file="tokenizer.json", max_len_single_sentence=512
)
tokenizer.add_tokens(env.action_space.names)
tokenizer.add_special_tokens(
{
"cls_token": "[CLS]",
"pad_token": "[PAD]",
"sep_token": "[SEP]",
}
)
# Create model
model = RobertaForSequenceClassification.from_pretrained(
"microsoft/codebert-base-mlm", num_labels=1
)
model.resize_token_embeddings(len(tokenizer))
# Preapre datasets
training_dataset = prepare_cost_dataset(tokenizer, env, data_files='data/npd-v0.csv')
# eval_dataset = prepare_cost_dataset(tokenizer, env, samples=SAMPLES//8, phases=PHASES)
# Use the DataCollatorWithPadding for more efficient batched padding
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
training_args = TrainingArguments(
output_dir="results/cost",
# learning_rate=2e-5,
per_device_train_batch_size=24,
per_device_eval_batch_size=24,
num_train_epochs=20,
# weight_decay=0.01,
report_to="wandb",
run_name=f"codebert-llvm-ic-{SAMPLES}-{PHASES}",
# push_to_hub=True,
hub_model_id="iyaja/codebert-llvm-ic",
)
trainer = CostModelTrainer(
model=model,
args=training_args,
train_dataset=training_dataset["train"],
# eval_dataset=eval_dataset,
tokenizer=tokenizer,
data_collator=data_collator,
)
trainer.train()
model.push_to_hub("codebert-llvm-ic-v0")
tokenizer.push_to_hub("codebert-llvm-ic-v0")
if __name__ == "__main__":
main()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint:disable=redefined-outer-name,logging-format-interpolation
"""Bert layer output collector with threshold clipping for calibration"""
import ctypes
from mxnet import ndarray
from mxnet.base import NDArrayHandle, py_str
from mxnet.ndarray import NDArray
class BertLayerCollector:
"""Saves layer output min and max values in a dict with layer names as keys.
The collected min and max values will be directly used as thresholds for quantization.
"""
def __init__(self, clip_min=None, clip_max=None, logger=None):
self.include_layer = lambda name: name.endswith('_output') or \
name.endswith('reshape10_0') or \
name.endswith('_mul0_0') or \
name.endswith('_squeeze0_0')
self.min_max_dict = {}
self.clip_min = clip_min
self.clip_max = clip_max
self.logger = logger
def collect(self, name, arr):
"""Callback function for collecting min and max values from an NDArray."""
name = py_str(name)
if self.include_layer is not None and not self.include_layer(name):
return
handle = ctypes.cast(arr, NDArrayHandle)
arr = NDArray(handle, writable=False)
min_range = ndarray.min(arr).asscalar()
max_range = ndarray.max(arr).asscalar()
if name.find('gelu0_leakyrelu0') != -1 and max_range > self.clip_max:
max_range = self.clip_max
if name.find('layernorm0_layernorm0') != -1 and min_range < self.clip_min:
min_range = self.clip_min
if name in self.min_max_dict:
cur_min_max = self.min_max_dict[name]
self.min_max_dict[name] = (min(cur_min_max[0], min_range),
max(cur_min_max[1], max_range))
else:
self.min_max_dict[name] = (min_range, max_range)
if self.logger is not None:
self.logger.info('Collecting layer %s min_range=%f, max_range=%f'
% (name, min_range, max_range))
|
import numpy as np
import os
n_sigma=2.
def return_stuff(Mach_number,mode='Fiducial'):
if Mach_number==2 :
dir='Mach2'
Mid_point=106
fit_low_bnd=Mid_point#-n_sigma*sigma_s/0.05+30 #0.05 is spacing in x
fit_up_bnd=176#Mid_point+n_sigma*sigma_s/0.05+15
if Mach_number==3 :
if mode=='Fiducial':
dir='Mach3'
fit_low_bnd=120
if mode=='short':
dir='Mach3short'
fit_low_bnd=120
if mode=='veryshort':
dir='Mach3VeryShort'
fit_low_bnd=120
if mode=='LowRes':
dir='Mach3_256'
fit_low_bnd=120
if mode=='MoreModes':
dir='Mach3_moremodes'
fit_low_bnd=120
Mid_point=120
fit_up_bnd=200#Mid_point+n_sigma*sigma_s/0.05+15
if Mach_number==5 :
dir='Mach5'
Mid_point=116
fit_low_bnd=Mid_point#-n_sigma*sigma_s/0.05+80
fit_up_bnd=200#Mid_point+n_sigma*sigma_s/0.05+60
if Mach_number==4:
dir='Mach4'
Mid_point=113
fit_low_bnd=Mid_point#-n_sigma*sigma_s/0.05+40
fit_up_bnd=200#Mid_point+n_sigma*sigma_s/0.05+20
if Mach_number==6:
dir='Mach6'
Mid_point=130
fit_low_bnd=Mid_point#-n_sigma*sigma_s/0.05+40
fit_up_bnd=200#Mid_point+n_sigma*sigma_s/0.05+20
return dir,fit_low_bnd,fit_up_bnd
def count_number_of_files(Mach_number,A_list,mode='Fiducial'):
dir=return_stuff(Mach_number,mode)[0]
A_list=np.array(A_list)
if A_list.size>1:
all_counts=np.zeros(A_list.size)
pq=0
for A in A_list:
count=0
for i in range(0,1000):
number = str(i).zfill(4)
filename='s_s2_'+str(A)+'_'+number+'.dat'
if os.path.exists('../../Data/'+dir+'/'+filename)==True: count+=1
if os.path.exists('../../Data/'+dir+'/'+filename)==False: break
all_counts[pq]=count
pq+=1
return (all_counts)
else:
count=0
for i in range(0,1000):
number = str(i).zfill(4)
filename='s_s2_'+str(A_list)+'_'+number+'.dat'
if os.path.exists('../../Data/'+dir+'/'+filename)==True: count+=1
if os.path.exists('../../Data/'+dir+'/'+filename)==False: break
return (count)
|
from bokeh.models import HoverTool, ColumnDataSource
from bokeh.plotting import figure, output_file, show
import bokeh.layouts
import datetime
import numpy as np
import DownloadData
def yw2datetime(yw):
if isinstance(yw, (list, np.ndarray)):
return [yw2datetime(i) for i in yw]
yw_int = [int(i) for i in yw.split('-KW')]
weekday = datetime.datetime(yw_int[0], 1, 1).weekday()
if weekday <= 3: # Thursday
date_diff = 1 - weekday
else:
date_diff = 8 - weekday
if date_diff > 0:
return datetime.datetime(yw_int[0], 1, date_diff) + datetime.timedelta(weeks=yw_int[1] - 1, days=6)
else:
return datetime.datetime(yw_int[0] - 1, 12, 31 + date_diff) + datetime.timedelta(weeks=yw_int[1] - 1, days=6)
def get_cmap(num):
cmap_colors = np.zeros([3, num])
cmap_colors[0] = np.interp(np.linspace(0, 1, num), np.linspace(0, 1, 4),
np.array([17.6, 19.2, 83.1, 83.1]) / 100 * 255)
cmap_colors[1] = np.interp(np.linspace(0, 1, num), np.linspace(0, 1, 4),
np.array([66.7, 30.2, 22, 62.7]) / 100 * 255)
cmap_colors[2] = np.interp(np.linspace(0, 1, num), np.linspace(0, 1, 4),
np.array([17.6, 55.7, 22, 22]) / 100 * 255)
return cmap_colors.astype(np.uint8)
def getmarker():
marker_list = [m_type for m_type in bokeh.models.markers.marker_types]
marker_selector = [0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14,
15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21,
24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6,
8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16,
12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25,
0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14,
15, 16, 12, 21, 24, 25, 0, 1, 6, 8, 14, 15, 16, 12, 21, 24, 25]
return marker_list, marker_selector
def remove_none(item):
item = np.array(item)
item[item==None] = 0
return item
def plot_data(p,age,year_week,data,collect_data=False,incidence=True,procentual=False):
data = remove_none(data)
data, interp = DownloadData.extrapolateLastWeek(year_week, data, collect_data=collect_data)
p.sizing_mode = "stretch_both"
p.xaxis[0].formatter = bokeh.models.DatetimeTickFormatter() # PrintfTickFormatter(format="%d.%m.%Y")
cmap_colors = get_cmap(len(age))
marker_list, marker_selector = getmarker()
glyph_list = []
for i in range(len(age)):
if age[i] == 'Gesamt':
line_color = (0, 0, 0)
line_width = 2
time, count = DownloadData.get_total(incidence=incidence)
if procentual:
time_diff = 7
time = time[time_diff:]
count = calculate_procentual(count,time_diff)
source = ColumnDataSource(data=dict(
x_list=time,
y_list=list(count),
desc=[age[i] for x in time],
col=[line_color for x in time]
))
else:
line_color = tuple(cmap_colors[:, i])
line_width = 1
source = ColumnDataSource(data=dict(
x_list=list(yw2datetime(year_week)),
y_list=list(data[i][:]),
desc=[age[i] for x in year_week],
col=[line_color for x in year_week]
))
muted_alpha = .1
if interp:
li = p.line(source.data['x_list'][:-1], source.data['y_list'][:-1], line_color=line_color,
line_width=line_width,
line_alpha=1, muted_alpha=muted_alpha, legend_label=age[i])
li2 = p.line(source.data['x_list'][-2:], source.data['y_list'][-2:], line_color=line_color,
line_width=line_width,
line_alpha=1, muted_alpha=muted_alpha, legend_label=age[i], line_dash=[3, 3])
else:
li = p.line(source.data['x_list'], source.data['y_list'], line_color=line_color, line_width=line_width,
line_alpha=1, muted_alpha=muted_alpha, legend_label=age[i])
if age[i] == 'Gesamt':
sca = p.scatter(x="x_list", y="y_list", source=source, alpha=0, muted_alpha=0, legend_label=age[i])
else:
sca = p.scatter(x="x_list", y="y_list", source=source, muted_alpha=muted_alpha, legend_label=age[i])
sca.glyph.marker = marker_list[marker_selector[i]]
sca.glyph.line_color = line_color
sca.glyph.fill_color = None
sca.glyph.size = 8
glyph_list.append(sca)
procent=""
if incidence:
incidence_cases = "Inzidenz"
else:
incidence_cases = "Fallzahlen"
if procentual:
incidence_cases = "Unterschied"
procent = "%"
p.add_tools(HoverTool(
renderers=glyph_list,
tooltips=[
("Alter", "@desc"),
("Datum", "@x_list{%d.%m.%Y}"),
(incidence_cases, "@y_list{0}"+procent),
],
formatters={'@x_list': 'datetime', },
))
p.legend.location = "top_left"
p.legend.click_policy = "mute"
p.legend.orientation = "horizontal"
return p
def x_bounds(year_week):
yw_dat = yw2datetime(year_week)
return [yw_dat[0]-datetime.timedelta(weeks=1), yw_dat[-1]+datetime.timedelta(weeks=1)]
def align_arrays(x_list,y_list, extend_values=False):
x_total=np.unique(np.concatenate(x_list))
y_return=[]
for x,y in zip(x_list,y_list):
_, indices,_ = np.intersect1d(x_total,x,return_indices=True)
y_new=np.zeros(np.shape(x_total))
y_new[indices]=y
if extend_values:
#y_new[:indices[0]]=y_new[indices[0]]
y_new[indices[-1]+1:]=y_new[indices[-1]]
y_return.append(y_new)
return x_total, y_return
def plot_sum(p):
p.sizing_mode = "stretch_both"
p.xaxis[0].formatter = bokeh.models.DatetimeTickFormatter() # PrintfTickFormatter(format="%d.%m.%Y")
total_date, total_count = DownloadData.get_total(incidence=False,smooth=False)
vac_date, vac_first, vac_second = DownloadData.get_vaccination_data()
x,y_list = align_arrays([total_date,vac_date,vac_date],[total_count,vac_first,vac_second],True)
y_list[0] = np.cumsum(y_list[0])
y_list[1] = np.cumsum(y_list[1])
y_list[2] = np.cumsum(y_list[2])
names = ['Zweite Impfung','Erste Impfung','Infizierte']
source = ColumnDataSource(data=dict(
x_list=x,
y_list=y_list[0]+y_list[1]+y_list[2],
infected=y_list[0],
vax_first=y_list[1],
vax_second=y_list[2],
))
#for i in range(3):
# p.line(x,y_list[i],legend_label=names[i])
#p.line(x=list(range(len(x)-1)),y=x[1:]-x[:-1],legend_label='123')
p.varea_stack(['vax_second', 'vax_first', 'infected'], x='x_list', source=source, legend_label=names,
color=[(50, 50, 200), (50, 200, 50), (200, 50, 50)])
glyph = p.line(x='x_list', y='y_list', source=source, alpha=0)
five_percentile = np.max([np.floor(np.max(y_list[0]+y_list[1]+y_list[2])/(DownloadData.count_age('Gesamt')*0.05)),1])
y_value = DownloadData.count_age('Gesamt')*0.05*five_percentile
p.line(x=[x[0], x[-1]], y=[y_value, y_value],
line_width=2, legend_label='{:}% Bevölkerung'.format(5*five_percentile), line_dash=[3, 3])
p.legend.location = "top_left"
#p.legend.click_policy = "mute"
p.legend.orientation = "horizontal"
p.add_tools(HoverTool(
renderers=[glyph],
tooltips=[
("Datum", "@x_list{%d.%m.%Y}"),
("Infektionen", "@infected{0}"),
("Erste Impfung", "@vax_first{0}"),
("Zweite Impfung", "@vax_second{0}"),
],
formatters={'@x_list': 'datetime', },
mode='vline',
))
return p
def calculate_procentual(data,diff=1):
with np.errstate(all='ignore'):
if np.ndim(data) == 1:
return_value = (data[diff:] - data[:-diff]) / data[:-diff] * 100
else:
return_value = (data[:, diff:] - data[:, :-diff]) / data[:, :-diff] * 100
#return_value[return_value == None] = 0
np.nan_to_num(return_value, copy=False, nan=0, posinf=0, neginf=0)
return return_value
def __init__():
age, year_week, data, abs_data = DownloadData.incidence(True)
x_bound = x_bounds(year_week)
output_file("lines.html")
p1 = figure(title="Inzidenz nach Altersgruppen", x_axis_type='datetime', x_axis_label='Datum',
y_axis_label='Inzidenz',
tools='pan,wheel_zoom,box_zoom,reset')
p2 = figure(title="Fallzahlen nach Altersgruppen", x_axis_type='datetime', x_axis_label='Datum',
y_axis_label='Fallzahl',
tools='pan,wheel_zoom,box_zoom,reset')
p3 = figure(title="Prozentualer Unterschied zur Vorwoche", x_axis_type='datetime', x_axis_label='Datum',
y_axis_label='Unterschied zur Vorwoche in %',
tools='pan,wheel_zoom,box_zoom,reset')
p4 = figure(title="Infektion und Impfung - Kumulativ", x_axis_type='datetime', x_axis_label='Datum',
y_axis_label='Personen',
tools='pan,wheel_zoom,box_zoom,reset')
p1 = plot_data(p1, age, year_week, data, collect_data=True, incidence=True)
p2 = plot_data(p2, age, year_week, abs_data, incidence=False)
p3 = plot_data(p3,age,year_week[1:],calculate_procentual(data), incidence=False, procentual=True)
p4 = plot_sum(p4)
p1.x_range = bokeh.models.Range1d(x_bound[0],x_bound[1])
p4.x_range = p3.x_range = p2.x_range = p1.x_range
p3.y_range = bokeh.models.Range1d(-100, 100)
time, count = DownloadData.get_total(incidence=False)
date_range_slider = bokeh.models.DateRangeSlider(value=(x_bound[0],x_bound[1]),
start=x_bound[0], end=x_bound[1])
def update_xrange(p):
return bokeh.models.CustomJS(args=dict(p=p), code="""
var a = cb_obj.value;
p.x_range.start = a[0];
p.x_range.end = a[1];
""")
date_range_slider.js_on_change('value', update_xrange(p1))
#date_range_slider.js_on_change('value', update_xrange(p2))
tab1 = bokeh.models.Panel(child=p1, title="Inzidenz")
tab2 = bokeh.models.Panel(child=p2, title="Fallzahl")
tab3 = bokeh.models.Panel(child=p3, title="Prozentuale Veränderung")
tab4 = bokeh.models.Panel(child=p4, title="Kumulativ")
sub_text = bokeh.models.Div(text='<p style="font-size:10px">Stand: ' + datetime.datetime.now().strftime("%d.%m.%Y %H:%M") +
'; Quellen: <a href="https://survstat.rki.de">Fallzahlen Altersgruppen - Robert Koch-Institut: SurvStat@RKI 2.0</a>; '
'<a href="https://npgeo-corona-npgeo-de.hub.arcgis.com/datasets/dd4580c810204019a7b8eb3e0b329dd6_0">Fallzahlen Gesamt - Robert Koch-Institut (RKI), dl-de/by-2-0</a>; '
'<a href="https://www-genesis.destatis.de/genesis//online?operation=table&code=12411-0005&bypass=true&'
'levelindex=0&levelid=1611832679336">Bevölkerung - Statistisches Bundesamt (Destatis), 12411-0005 31.12.2019</a>; '
'<a href="https://impfdashboard.de/static/data/germany_vaccinations_timeseries_v2.tsv">Impfdaten - impfdashboard.de</a>;<br>'
'<a href="https://github.com/timkalkus/RKI_Covid_Age">Github-Seite</a> des Tools</p>')
column = bokeh.layouts.column([bokeh.models.Tabs(tabs=[tab1, tab2, tab3, tab4]),date_range_slider, sub_text])
column.sizing_mode = "stretch_both"
show(column)
__init__()
|
#!/usr/bin/python
# import from modules directory
import sys
import os.path
sys.path.append(os.path.abspath(os.path.dirname(__file__)) + "/modules")
import threading
import time
import copy
import DynamicObjectV2
Obj = DynamicObjectV2.Class
MsgLock = threading.RLock()
IOLock = threading.RLock()
version = open('README.md').read().split('\n')[0].split('# ')[1]
# process flags
# -debug = show messages
# -test [testnames]
flags = Obj()
current = None
for arg in sys.argv[1:]: #skips first which is main.py
if (arg[:1] == "-"):
current = Obj()
flags[arg[1:]] = current
else:
current[arg] = True
threads = {}
comms = Obj()
import IPCThread
IPCThread = IPCThread.Class
def registerOutput (owner, tag, default):
with MsgLock:
if (comms[tag] and comms[tag].owner != owner):
return printSync("WARNING: Cannot register tag '{}'. Already registered by thread '{}'.".format(tag, comms[tag].owner.name))
default = Obj(default)
print("Registered '{}' tag with owner thread '{}' and default output {}.".format(tag, owner.name, default))
default.owner = owner
comms[tag] = default
def output (thread, tag, value):
with MsgLock:
if (not comms[tag]): return printSync("WARNING: '{}' cannot output with '{}' tag. ACCESS DENIED. Tag not registered.".format(thread.name, tag))
if (comms[tag].owner != thread):
return printSync("WARNING: '{}' cannot output with '{}' tag. ACCESS DENIED. Thread '{}' has tag ownership.".format(thread.name, tag, comms[tag].owner.name))
comms[tag].extend(value)
info = Obj(copy.copy(comms[tag]))
del info['owner']
if (flags.debug): print(" OUTPUT by {}: [{} tag] {}".format(thread.name, tag, info))
def getInputs ():
with MsgLock:
info = Obj({})
keys = comms.__vars__()
for key in keys:
info[key] = Obj(copy.copy(comms[key]))
del info[key]["owner"]
return info
def printSync (msg):
with IOLock:
print(msg)
def message (thread, msg):
printSync("MESSAGE by {}: {}".format(thread.name, msg))
def makeAPI ():
return {
"registerOutput": registerOutput,
"getInputs": getInputs,
"output": output,
"message": message,
"flags": Obj(copy.copy(flags))
}
# legacy
def addThreadFromClass (Class, name):
threads[name] = Class(name, makeAPI())
def addThreadFromSource (source, name):
class C (IPCThread):
def __init__(self, name, API):
IPCThread.__init__(self, name, API)
source.init(self)
def run(self):
source.run(self)
threads[name] = C(name, makeAPI())
printSync("\n\t\t\tTVCS {}\n".format(version))
# Create threads
import ModuleList
if (flags.test):
moduleName = flags.test.__vars__()
if (len(moduleName) > 0): moduleName = moduleName[0]
else: raise Exception("ERROR: Could not load test - no test specified!")
moduleSource = __import__(moduleName)
addThreadFromSource(moduleSource, moduleName)
moduleName = "Test-" + moduleName
moduleSource = __import__(moduleName)
addThreadFromSource(moduleSource, moduleName)
else:
for moduleName in ModuleList.fromSource:
moduleSource = None
try:
moduleSource = __import__(moduleName)
except Exception as e:
raise Exception("ERROR: Could not load module '{}' - Error: {}".format(moduleName, e))
try:
getattr(moduleSource, "init")
getattr(moduleSource, "run")
addThreadFromSource(moduleSource, moduleName)
except AttributeError:
raise Exception("ERROR: Could not load module '{}' - module does not have 'init' or 'run' function!".format(moduleName))
for moduleName in ModuleList.fromClass:
try:
moduleClass = __import__(moduleName)
getattr(moduleClass, "Class")
addThreadFromClass(moduleClass.Class, moduleName)
except ImportError:
raise Exception("ERROR: Could not load module '{}' - module does not exist!".format(moduleName))
except AttributeError:
raise Exception("ERROR: Could not load module '{}' - module does not have a 'Class'!".format(moduleName))
# Start threads
for t in threads:
threads[t].daemon = True # make sure threads close with main thread
threads[t].start()
# make sure main thread dies only on ctrl-c
while 1:
pass
|
from django.urls import path
from . import views
urlpatterns = [
path('change_pass',views.change_pass,name='change_pass'),
path('dash', views.dashboard, name='dash'),
path('login',views.login,name ='login'),
path('logout',views.logout,name='logout'),
path('prescriptions',views.prescriptions,name='prescriptions'),
]
|
#Basic Operations in a Binary Search Tree(BST) in Python
#Creating a class to create a node
class Node:
def __init__(self, key):
self.key = key
self.left = None
self.right = None
# Inserting a node
def insert(node, key):
# Return a new node if the tree is empty
if node is None:
return Node(key)
# Traverse to the right place and insert the node
if key < node.key:
node.left = insert(node.left, key)
else:
node.right = insert(node.right, key)
return node
# Deleting a node
def deleteNode(root, key):
# Return if the tree is empty
if root is None:
return root
# Find the node to be deleted
if key < root.key:
root.left = deleteNode(root.left, key)
elif(key > root.key):
root.right = deleteNode(root.right, key)
else:
# If the node is with only one child or no child
if root.left is None:
temp = root.right
root = None
return temp
elif root.right is None:
temp = root.left
root = None
return temp
# If the node has two children,
# place the inorder successor in position of the node to be deleted
temp = minValueNode(root.right)
root.key = temp.key
# Delete the inorder successor
root.right = deleteNode(root.right, temp.key)
return root
print("\nInsert nodes in the Binary Search Tree")
#To implement the Operations
root_node = None
root_node = insert(root_node, 10)
root_node = insert(root_node, 4)
root_node = insert(root_node, 3)
root_node = insert(root_node, 12)
root_node = insert(root_node, 6)
root_node = insert(root_node, 11)
root_node = insert(root_node, 14)
root_node = insert(root_node, 8)
print("\nDelete 10")
root = deleteNode(root, 10)
|
import csv
import re
FUNDING_YEAR_RE = re.compile(r'(Funding Year) (\d+)')
FISCAL_YEAR_RE = re.compile(r'(FY) (\d+)')
CONGRESS_NUMBER = re.compile(r'\((\d+)TH\)')
def format_csv(source_doc, csv_file='senate_data.csv', cleaned_file='senate_data_cleaned.csv'):
unclean_data_reader = csv.reader(open(csv_file, 'r'))
cleaned_data_writer = csv.writer(open(cleaned_file, 'wb'))
cleaned_data_writer.writerow(["This data was parsed on an experimental basis by the Sunlight Foundation from Senate disbursement reports. Please cite 'The Sunlight Foundation' in any usage. For more information see the readme at http://assets-reporting.s3.amazonaws.com/1.0/senate_disbursements/readme.txt."])
cleaned_data_writer.writerow(['source_doc','senator_flag','senator_name', 'raw office', 'funding_year', 'fiscal_year', 'congress_number', 'reference_page', 'document_number', 'date_posted', 'start_date', 'end_date', 'description', 'salary_flag', 'amount', 'payee'])
for line in unclean_data_reader:
senator_flag = 1 if 'senator' in line[0].lower() else 0
senator_name = line[0].split('Funding')[0].replace('SENATOR','').strip() if senator_flag else ''
raw_office = line[0]
try:
funding_year = int(re.search(FUNDING_YEAR_RE, line[0]).group(2))
except:
funding_year = ''
try:
fiscal_year = int(re.search(FISCAL_YEAR_RE, line[0]).group(2))
except:
fiscal_year = ''
try:
congress_number = int(re.search(CONGRESS_NUMBER, line[0]).group(1))
except:
congress_number = ''
reference_page = line[3]
document_number = line[4]
date_posted = line[5]
payee = line[6]
start_date = line[7]
end_date = line[8]
description = line[9]
amount = line[10]
salary_flag = 0 if start_date == '' and end_date == '' else 1
cleaned_data_writer.writerow([source_doc, senator_flag, senator_name, raw_office, funding_year,
fiscal_year, congress_number, reference_page, document_number, date_posted,
start_date, end_date, description, salary_flag, amount, payee]) |
"""A test class for the ZipfMandelbrot distribution helper """
import unittest
from PiCN.Simulations.MobilitySimulations.Helper.ConsumerDistributionHelper import ZipfMandelbrotDistribution
class TestConsumerZipfMandelbrotDistributionHelper(unittest.TestCase):
"""test class for testing the behavior and results of the ConsumerZipfDistributionHelper class"""
def test_create_zipf_mandelbrot_distribution(self):
"""tests if the creation of a ZipfMandelbrot distribution works"""
calc_distribution = ZipfMandelbrotDistribution.create_zipf_mandelbrot_distribution(10, 0.7, 0.7)
self.assertEqual(len(calc_distribution), 11, "should have in total 11 elements in the distribution array")
self.assertEqual(calc_distribution[10], 1.0, "last element in the distribution array is 1.0")
def test_get_next_zipf_mandelbrot_distribution_value(self):
"""tests if returning a next value from the distribution works"""
calc_distribution = ZipfMandelbrotDistribution.create_zipf_mandelbrot_distribution(5, 0.7, 0.7)
value1 = ZipfMandelbrotDistribution.get_next_zipfmandelbrot_random_number(calc_distribution, 10)
self.assertIsNot(value1, 0, "should not be 0")
self.assertTrue(1 <= value1 <= 5, "should be in range to the provided basis") |
import logging
from asyncio import get_event_loop
LOGGER = logging.getLogger('pulsar.events')
class AbortEvent(Exception):
"""Use this exception to abort events"""
pass
class Event:
__slots__ = ('name', '_onetime', '_handlers', '_waiter', '_self')
def __init__(self, name, o, onetime):
self.name = name
self._onetime = onetime
self._self = o
self._handlers = None
self._waiter = None
def __repr__(self):
return '%s: %s' % (self.name, self._handlers)
__str__ = __repr__
def handlers(self):
return self._handlers
def onetime(self):
return bool(self._onetime)
def fired(self):
"""Returns true or false depending if this event was fired
One-time events only can be fired
"""
return self._self is None
def bind(self, callback):
"""Bind a ``callback`` to this event.
"""
handlers = self._handlers
if self._self is None:
raise RuntimeError('%s already fired, cannot add callbacks' % self)
if handlers is None:
handlers = []
self._handlers = handlers
handlers.append(callback)
def clear(self):
self._handlers = None
return self
def unbind(self, callback):
"""Remove a callback from the list
"""
handlers = self._handlers
if handlers:
filtered_callbacks = [f for f in handlers if f != callback]
removed_count = len(handlers) - len(filtered_callbacks)
if removed_count:
self._handlers = filtered_callbacks
return removed_count
return 0
def fire(self, exc=None, data=None):
"""Fire the event
:param exc: fire the event with an exception
:param data: fire an event with data
"""
o = self._self
if o is not None:
handlers = self._handlers
if self._onetime:
self._handlers = None
self._self = None
if handlers:
if exc is not None:
for hnd in handlers:
hnd(o, exc=exc)
elif data is not None:
for hnd in handlers:
hnd(o, data=data)
else:
for hnd in handlers:
hnd(o)
if self._waiter:
if exc:
self._waiter.set_exception(exc)
else:
self._waiter.set_result(data if data is not None else o)
self._waiter = None
def waiter(self):
"""Return a :class:`~asyncio.Future` called back once the event
has been fired.
If the event has been fired already return a resolved future.
This method is available only for one-time events
"""
assert self._onetime, 'One time events only can invoke waiter'
if not self._waiter:
self._waiter = get_event_loop().create_future()
if self.fired():
self._waiter.set_result(None)
return self._waiter
class EventHandler:
'''A Mixin for handling events on :ref:`async objects <async-object>`.
It handles :class:`OneTime` events and :class:`Event` that occur
several times.
'''
ONE_TIME_EVENTS = None
_events = None
def events(self):
if self._events is None:
ot = self.ONE_TIME_EVENTS or ()
self._events = dict(((n, Event(n, self, 1)) for n in ot))
return self._events
def event(self, name):
"""Returns the :class:`Event` at ``name``.
If no event is registered for ``name`` creates a new :class:`Event`
object and returns it.
"""
events = self.events()
if name not in events:
events[name] = Event(name, self, 0)
return events[name]
def fire_event(self, name, exc=None, data=None):
"""Fire event at ``name`` if it is registered
"""
if self._events and name in self._events:
self._events[name].fire(exc=exc, data=data)
def bind_events(self, events):
'''Register all known events found in ``events`` key-valued parameters.
'''
evs = self._events
if evs and events:
for event in evs.values():
if event.name in events:
event.bind(events[event.name])
def copy_many_times_events(self, other):
'''Copy :ref:`many times events <many-times-event>` from ``other``.
All many times events of ``other`` are copied to this handler
provided the events handlers already exist.
'''
events = self.events()
other_events = other.events()
if events and other_events:
for name, event in other_events.items():
handlers = event.handlers()
if not event.onetime() and handlers:
ev = events.get(name)
# If the event is available add it
if ev:
for callback in handlers:
ev.bind(callback)
|
# -*- coding: utf-8 -*-
"""
【简介】
加载QSS文件
"""
import sys
from PyQt5.QtWidgets import QMainWindow , QApplication, QVBoxLayout , QPushButton
from CommonHelper import CommonHelper
class MainWindow(QMainWindow):
def __init__(self,parent=None):
super(MainWindow,self).__init__(parent)
self.resize(477, 258)
self.setWindowTitle("加载QSS文件")
btn1 = QPushButton( self)
btn1.setText('添加')
btn1.setToolTip('测试提示')
vbox = QVBoxLayout()
vbox.addWidget( btn1 )
self.setLayout(vbox)
if __name__ == "__main__":
app = QApplication(sys.argv)
win = MainWindow()
styleFile = './style.qss'
qssStyle = CommonHelper.readQss( styleFile )
win.setStyleSheet( qssStyle )
win.show()
sys.exit(app.exec_())
|
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use('Agg')
from matplotlib.collections import BrokenBarHCollection
from itertools import cycle
from collections import defaultdict
import pandas
import numpy as np
asms = snakemake.params["asms"]
print(asms)
alist = snakemake.input["asms"]
print(alist)
data = defaultdict(list)
for i, v in enumerate(alist):
atext = asms[i]
with open(v + ".fai") as input:
for l in input:
s = l.rstrip().split()
data['asm'].append(atext)
data['len'].append(int(s[1]))
df = pandas.DataFrame(data)
df = df.sort_values(by=['asm', 'len'], ascending=(True, False))
# Lazy implementation: assuming largest assembly is correct here!
# TODO: allow user input for max assembly size instead
largestctg = df['len'].max()
asmsize = 0
NGX = list()
for k, g in df.groupby(['asm']):
if g['len'].sum() > asmsize:
asmsize = g['len'].sum()
temp = [0.0]
for i in range(1,len(g)):
temp.append(temp[i-1] + (g.iat[i, 1] / asmsize * 100))
NGX.extend(temp)
df = df.assign(NGX = NGX)
colors = [ '#bd2309', '#bbb12d', '#1480fa', '#14fa2f', '#000000',
'#faf214', '#2edfea', '#ea2ec4', '#ea2e40', '#cdcdcd',
'#577a4d', '#2e46c0', '#f59422', '#219774', '#8086d9' ]
print(df.head())
# Plot the lines
fig, ax = plt.subplots()
i = 0
for k, g in df.groupby(['asm']):
#ax = g.plot(ax=ax, kind='line', x='NGX', y='len', c=colors[i], label=k)
ax = g.plot(ax=ax, marker='', x='NGX', y='len', c=colors[i], linewidth=1, label=k)
i += 1
ax.vlines(x=50.0, ymin=0, ymax=largestctg, linestyles='dashed')
plt.legend(loc='best')
plt.savefig(snakemake.output["plot"])
|
import pymysql
import os
class Connection:
mode = os.environ.get('ENVIRONMENT')
def __init__(self):
self.connection = pymysql.connect(host=os.environ.get('DB_HOST'),
port=int(os.environ.get('DB_PORT')),
user=os.environ.get('DB_USER'),
passwd=os.environ.get('DB_PASSWORD'),
db=os.environ.get('DB_SCHEMA'),
autocommit=True)
self.cursor = self.connection.cursor()
@staticmethod
def read_from_file(file_name):
root = lambda base : os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "files/", file_name))
array = []
try:
file_a = open(root(file_name), "r")
except IOError:
print("Cannot open " + file_name)
else:
for i, line in enumerate(file_a):
array.insert(i, "@"+line.rstrip('\r\n'))
file_a.close()
return array
@staticmethod
def write_to_file(file_name, text):
root = lambda base : os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "files/", file_name))
try:
file_a = open(root(file_name), "w")
except IOError:
print("Cannot open " + file_name)
else:
file_a.write(text)
file_a.close() |
# coding : utf8
from Node import *
class Syntaxe(Node):
def setCrochet(self):
structure = '{noeud}[{var}]'
if self.type == "VAR":
self.crochet = strcture.format(
noeud=self.type,
val=self.value,
var=self.leaf
)
else:
self.crochet = strcture.format(
noeud=self.type,
val=self.value,
var=" ".join([child.crochet for child in self.children])
)
def getCrochet(self):
return self.crochet
|
import json
import typer
from pathlib import Path
from spacy.tokens import Span, DocBin, Doc
from spacy.vocab import Vocab
from wasabi import Printer
msg = Printer()
SYMM_LABELS = ["Binds"]
MAP_LABELS = {
"Pos-Reg": "Regulates",
"Neg-Reg": "Regulates",
"Reg": "Regulates",
"No-rel": "Regulates",
"Binds": "Binds",
}
def main(json_loc: Path, train_file: Path, dev_file: Path, test_file: Path):
"""Creating the corpus from the Prodigy annotations."""
Doc.set_extension("rel", default={})
vocab = Vocab()
docs = {"train": [], "dev": [], "test": []}
ids = {"train": set(), "dev": set(), "test": set()}
count_all = {"train": 0, "dev": 0, "test": 0}
count_pos = {"train": 0, "dev": 0, "test": 0}
with json_loc.open("r", encoding="utf8") as jsonfile:
for line in jsonfile:
example = json.loads(line)
span_starts = set()
if example["answer"] == "accept":
neg = 0
pos = 0
try:
# Parse the tokens
words = [t["text"] for t in example["tokens"]]
spaces = [t["ws"] for t in example["tokens"]]
doc = Doc(vocab, words=words, spaces=spaces)
# Parse the GGP entities
spans = example["spans"]
entities = []
span_end_to_start = {}
for span in spans:
entity = doc.char_span(
span["start"], span["end"], label=span["label"]
)
span_end_to_start[span["token_end"]] = span["token_start"]
entities.append(entity)
span_starts.add(span["token_start"])
doc.ents = entities
# Parse the relations
rels = {}
for x1 in span_starts:
for x2 in span_starts:
rels[(x1, x2)] = {}
relations = example["relations"]
for relation in relations:
# the 'head' and 'child' annotations refer to the end token in the span
# but we want the first token
start = span_end_to_start[relation["head"]]
end = span_end_to_start[relation["child"]]
label = relation["label"]
label = MAP_LABELS[label]
if label not in rels[(start, end)]:
rels[(start, end)][label] = 1.0
pos += 1
if label in SYMM_LABELS:
if label not in rels[(end, start)]:
rels[(end, start)][label] = 1.0
pos += 1
# The annotation is complete, so fill in zero's where the data is missing
for x1 in span_starts:
for x2 in span_starts:
for label in MAP_LABELS.values():
if label not in rels[(x1, x2)]:
neg += 1
rels[(x1, x2)][label] = 0.0
doc._.rel = rels
# only keeping documents with at least 1 positive case
if pos > 0:
# use the original PMID/PMCID to decide on train/dev/test split
article_id = example["meta"]["source"]
article_id = article_id.replace("BioNLP 2011 Genia Shared Task, ", "")
article_id = article_id.replace(".txt", "")
article_id = article_id.split("-")[1]
if article_id.endswith("4"):
ids["dev"].add(article_id)
docs["dev"].append(doc)
count_pos["dev"] += pos
count_all["dev"] += pos + neg
elif article_id.endswith("3"):
ids["test"].add(article_id)
docs["test"].append(doc)
count_pos["test"] += pos
count_all["test"] += pos + neg
else:
ids["train"].add(article_id)
docs["train"].append(doc)
count_pos["train"] += pos
count_all["train"] += pos + neg
except KeyError as e:
msg.fail(f"Skipping doc because of key error: {e} in {example['meta']['source']}")
docbin = DocBin(docs=docs["train"], store_user_data=True)
docbin.to_disk(train_file)
msg.info(
f"{len(docs['train'])} training sentences from {len(ids['train'])} articles, "
f"{count_pos['train']}/{count_all['train']} pos instances."
)
docbin = DocBin(docs=docs["dev"], store_user_data=True)
docbin.to_disk(dev_file)
msg.info(
f"{len(docs['dev'])} dev sentences from {len(ids['dev'])} articles, "
f"{count_pos['dev']}/{count_all['dev']} pos instances."
)
docbin = DocBin(docs=docs["test"], store_user_data=True)
docbin.to_disk(test_file)
msg.info(
f"{len(docs['test'])} test sentences from {len(ids['test'])} articles, "
f"{count_pos['test']}/{count_all['test']} pos instances."
)
if __name__ == "__main__":
typer.run(main)
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014-2017 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2017 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2017 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2017 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# Copyright (C) 2014-2017 Anler Hernández <hello@anler.me>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from unittest import mock
from django.core.urlresolvers import reverse
from django.core import mail
from taiga.base.utils import json
from taiga.hooks.gogs import event_hooks
from taiga.hooks.gogs.api import GogsViewSet
from taiga.hooks.exceptions import ActionSyntaxException
from taiga.projects import choices as project_choices
from taiga.projects.epics.models import Epic
from taiga.projects.issues.models import Issue
from taiga.projects.tasks.models import Task
from taiga.projects.userstories.models import UserStory
from taiga.projects.models import Membership
from taiga.projects.history.services import get_history_queryset_by_model_instance, take_snapshot
from taiga.projects.notifications.choices import NotifyLevel
from taiga.projects.notifications.models import NotifyPolicy
from taiga.projects import services
from .. import factories as f
pytestmark = pytest.mark.django_db
def test_bad_signature(client):
project = f.ProjectFactory()
url = reverse("gogs-hook-list")
url = "%s?project=%s" % (url, project.id)
data = {
"secret": "badbadbad"
}
response = client.post(url, json.dumps(data),
content_type="application/json")
response_content = response.data
assert response.status_code == 400
assert "Bad signature" in response_content["_error_message"]
def test_ok_signature(client):
project = f.ProjectFactory()
f.ProjectModulesConfigFactory(project=project, config={
"gogs": {
"secret": "tpnIwJDz4e"
}
})
url = reverse("gogs-hook-list")
url = "%s?project=%s" % (url, project.id)
data = {"test:": "data", "secret": "tpnIwJDz4e"}
response = client.post(url, json.dumps(data),
content_type="application/json")
assert response.status_code == 204
def test_blocked_project(client):
project = f.ProjectFactory(blocked_code=project_choices.BLOCKED_BY_STAFF)
f.ProjectModulesConfigFactory(project=project, config={
"gogs": {
"secret": "tpnIwJDz4e"
}
})
url = reverse("gogs-hook-list")
url = "%s?project=%s" % (url, project.id)
data = {"test:": "data", "secret": "tpnIwJDz4e"}
response = client.post(url, json.dumps(data),
content_type="application/json")
assert response.status_code == 451
def test_push_event_detected(client):
project = f.ProjectFactory()
url = reverse("gogs-hook-list")
url = "%s?project=%s" % (url, project.id)
data = {
"commits": [
{
"message": "test message",
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
GogsViewSet._validate_signature = mock.Mock(return_value=True)
with mock.patch.object(event_hooks.PushEventHook, "process_event") as process_event_mock:
response = client.post(url, json.dumps(data),
HTTP_X_GITHUB_EVENT="push",
content_type="application/json")
assert process_event_mock.call_count == 1
assert response.status_code == 204
def test_push_event_epic_processing(client):
creation_status = f.EpicStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_epics"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.EpicStatusFactory(project=creation_status.project)
epic = f.EpicFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s #%s ok
bye!
""" % (epic.ref, new_status.slug),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(epic.project, payload)
ev_hook.process_event()
epic = Epic.objects.get(id=epic.id)
assert epic.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_issue_processing(client):
creation_status = f.IssueStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.IssueStatusFactory(project=creation_status.project)
issue = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s #%s ok
bye!
""" % (issue.ref, new_status.slug),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue.project, payload)
ev_hook.process_event()
issue = Issue.objects.get(id=issue.id)
assert issue.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_task_processing(client):
creation_status = f.TaskStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.TaskStatusFactory(project=creation_status.project)
task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s #%s ok
bye!
""" % (task.ref, new_status.slug),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(task.project, payload)
ev_hook.process_event()
task = Task.objects.get(id=task.id)
assert task.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_user_story_processing(client):
creation_status = f.UserStoryStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_us"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.UserStoryStatusFactory(project=creation_status.project)
user_story = f.UserStoryFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s #%s ok
bye!
""" % (user_story.ref, new_status.slug),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(user_story.project, payload)
ev_hook.process_event()
user_story = UserStory.objects.get(id=user_story.id)
assert user_story.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_issue_mention(client):
creation_status = f.IssueStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
issue = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
take_snapshot(issue, user=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s ok
bye!
""" % (issue.ref),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue.project, payload)
ev_hook.process_event()
issue_history = get_history_queryset_by_model_instance(issue)
assert issue_history.count() == 1
assert issue_history[0].comment.startswith("This issue has been mentioned by")
assert len(mail.outbox) == 1
def test_push_event_task_mention(client):
creation_status = f.TaskStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
take_snapshot(task, user=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s ok
bye!
""" % (task.ref),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(task.project, payload)
ev_hook.process_event()
task_history = get_history_queryset_by_model_instance(task)
assert task_history.count() == 1
assert task_history[0].comment.startswith("This task has been mentioned by")
assert len(mail.outbox) == 1
def test_push_event_user_story_mention(client):
creation_status = f.UserStoryStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_us"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
user_story = f.UserStoryFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
take_snapshot(user_story, user=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s ok
bye!
""" % (user_story.ref),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(user_story.project, payload)
ev_hook.process_event()
us_history = get_history_queryset_by_model_instance(user_story)
assert us_history.count() == 1
assert us_history[0].comment.startswith("This user story has been mentioned by")
assert len(mail.outbox) == 1
def test_push_event_multiple_actions(client):
creation_status = f.IssueStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_issues"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.IssueStatusFactory(project=creation_status.project)
issue1 = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
issue2 = f.IssueFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test TG-%s #%s ok
test TG-%s #%s ok
bye!
""" % (issue1.ref, new_status.slug, issue2.ref, new_status.slug),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook1 = event_hooks.PushEventHook(issue1.project, payload)
ev_hook1.process_event()
issue1 = Issue.objects.get(id=issue1.id)
issue2 = Issue.objects.get(id=issue2.id)
assert issue1.status.id == new_status.id
assert issue2.status.id == new_status.id
assert len(mail.outbox) == 2
def test_push_event_processing_case_insensitive(client):
creation_status = f.TaskStatusFactory()
role = f.RoleFactory(project=creation_status.project, permissions=["view_tasks"])
f.MembershipFactory(project=creation_status.project, role=role, user=creation_status.project.owner)
new_status = f.TaskStatusFactory(project=creation_status.project)
task = f.TaskFactory.create(status=creation_status, project=creation_status.project, owner=creation_status.project.owner)
payload = {
"commits": [
{
"message": """test message
test tg-%s #%s ok
bye!
""" % (task.ref, new_status.slug.upper()),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(task.project, payload)
ev_hook.process_event()
task = Task.objects.get(id=task.id)
assert task.status.id == new_status.id
assert len(mail.outbox) == 1
def test_push_event_task_bad_processing_non_existing_ref(client):
issue_status = f.IssueStatusFactory()
payload = {
"commits": [
{
"message": """test message
test TG-6666666 #%s ok
bye!
""" % (issue_status.slug),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue_status.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "The referenced element doesn't exist"
assert len(mail.outbox) == 0
def test_push_event_us_bad_processing_non_existing_status(client):
user_story = f.UserStoryFactory.create()
payload = {
"commits": [
{
"message": """test message
test TG-%s #non-existing-slug ok
bye!
""" % (user_story.ref),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(user_story.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "The status doesn't exist"
assert len(mail.outbox) == 0
def test_push_event_bad_processing_non_existing_status(client):
issue = f.IssueFactory.create()
payload = {
"commits": [
{
"message": """test message
test TG-%s #non-existing-slug ok
bye!
""" % (issue.ref),
"author": {
"username": "test",
},
}
],
"repository": {
"html_url": "http://test-url/test/project"
}
}
mail.outbox = []
ev_hook = event_hooks.PushEventHook(issue.project, payload)
with pytest.raises(ActionSyntaxException) as excinfo:
ev_hook.process_event()
assert str(excinfo.value) == "The status doesn't exist"
assert len(mail.outbox) == 0
def test_api_get_project_modules(client):
project = f.create_project()
f.MembershipFactory(project=project, user=project.owner, is_admin=True)
url = reverse("projects-modules", args=(project.id,))
client.login(project.owner)
response = client.get(url)
assert response.status_code == 200
content = response.data
assert "gogs" in content
assert content["gogs"]["secret"] != ""
assert content["gogs"]["webhooks_url"] != ""
def test_api_patch_project_modules(client):
project = f.create_project()
f.MembershipFactory(project=project, user=project.owner, is_admin=True)
url = reverse("projects-modules", args=(project.id,))
client.login(project.owner)
data = {
"gogs": {
"secret": "test_secret",
"html_url": "test_url",
}
}
response = client.patch(url, json.dumps(data), content_type="application/json")
assert response.status_code == 204
config = services.get_modules_config(project).config
assert "gogs" in config
assert config["gogs"]["secret"] == "test_secret"
assert config["gogs"]["webhooks_url"] != "test_url"
def test_replace_gogs_references():
ev_hook = event_hooks.BaseGogsEventHook
assert ev_hook.replace_gogs_references(None, "project-url", "#2") == "[Gogs#2](project-url/issues/2)"
assert ev_hook.replace_gogs_references(None, "project-url", "#2 ") == "[Gogs#2](project-url/issues/2) "
assert ev_hook.replace_gogs_references(None, "project-url", " #2 ") == " [Gogs#2](project-url/issues/2) "
assert ev_hook.replace_gogs_references(None, "project-url", " #2") == " [Gogs#2](project-url/issues/2)"
assert ev_hook.replace_gogs_references(None, "project-url", "#test") == "#test"
assert ev_hook.replace_gogs_references(None, "project-url", None) == ""
|
import shopify
from kss.customer import Customer
shipping = {
'group_0': tuple(sorted([
'Asendia', 'Åland Islands', 'Albania', 'Andorra', 'Armenia', 'Austria', 'Belarus',
'Belgium', 'Bosnia & Herzegovina', 'Bouvet Island', 'Bulgaria', 'Croatia', 'Cyprus',
'Czechia', 'Denmark', 'Estonia', 'Faroe Islands', 'Finland', 'France', 'Georgia',
'Germany', 'Gibraltar', 'Greece', 'Greenland', 'Guadeloupe', 'Guernsey', 'Vatican City',
'Hungary', 'Iceland', 'Ireland', 'Isle Of Man', 'Italy', 'Jersey', 'Kosovo', 'Latvia',
'Liechtenstein', 'Lithuania', 'Luxembourg', 'Malta', 'Mayotte', 'Moldova', 'Monaco',
'Montenegro', 'Netherlands', 'North Macedonia', 'Norway', 'Poland', 'Portugal', 'Réunion',
'Romania', 'San Marino', 'Serbia', 'Slovakia', 'Slovenia', 'Spain', 'Svalbard & Jan Mayen',
'Sweden', 'Switzerland', 'Turkey', 'Ukraine', 'United Kingdom', 'Hong Kong SAR', 'Israel',
'Japan', 'Macao SAR', 'Russia', 'Singapore', 'South Korea', 'Australia', 'New Zealand'
])),
'group_1': tuple(sorted([
'Afghanistan', 'China', 'Indonesia', 'Philippines', 'Saudi Arabia', 'Taiwan',
'United Arab Emirates', 'Vietnam', 'Argentina', 'Bolivia', 'Brazil', 'Caribbean Netherlands',
'Chile', 'Colombia', 'Ecuador', 'Falkland Islands', 'French Guiana', 'Guyana', 'Paraguay',
'Peru', 'Suriname', 'Uruguay', 'Venezuela', 'Cook Islands', 'Fiji', 'French Polynesia',
'Kiribati', 'Nauru', 'New Caledonia', 'Niue', 'Norfolk Island', 'Papua New Guinea',
'Pitcairn Islands', 'Samoa', 'Solomon Islands', 'Timor-Leste', 'Tokelau', 'Tonga',
'Tuvalu', 'Vanuatu', 'Wallis & Futuna', 'Mexico', 'French Southern Territories',
'Heard & McDonald Islands', 'South Georgia & South Sandwich Islands'
])),
'table_0': (18.0, 23.0, 25.0, 37.0, 47.0),
'table_1': (25.0, 40.0, 50.0, 100.0, 140.0),
'table_2': (10.0, 15.0, 20.0, 40.0, 50.0), # 'MURIKA and Canada
}
def name_split(string: str, firstname=True):
string = string.split()
if firstname:
return string[0].title()
else:
return '' if len(string) == 1 else ' '.join(string[1::]).title()
def parse_shipping_class(price: float):
if price in range(0, 41):
return 0
elif price in range(50, 70):
return 1
elif price >= 69.50 and price <= 79.00:
return 2
elif price in range(80, 151):
return 3
elif price in range(151, 201):
return 4
else:
return None
def parse_shipping_group(country: str):
global shipping
if country.title() == "United States" or country.title() == "Canada" or country.title() == "Puerto Rico":
return 2
elif country.title() in shipping['group_1']:
return 1
elif country.title() in shipping['group_0']:
return 0
else:
return None
def find_shipping_title(country: str):
if country.title() == "United States" or country.title() == "Puerto Rico":
return "Standard International"
elif country.title() == "Canada":
return "Standard"
elif country.title() in shipping['group_0'] or country.title() in shipping['group_1']:
return "Economy International"
else:
return "Undefined"
def find_shipping_price(country: str, price: float):
global shipping
shipping_group = parse_shipping_group(country)
shipping_class = parse_shipping_class(price)
if shipping_group is None or shipping_class is None:
return 0.0
else:
return shipping[f"table_{shipping_group}"][shipping_class]
def single_draft(customer: Customer):
order = shopify.DraftOrder({
'email': customer.data['email'],
'customer': shopify.Customer({
'email': customer.data['email'],
'first_name': name_split(customer.data['shipping_details']['name']),
'last_name': name_split(customer.data['shipping_details']['name'], False),
'phone': customer.data['shipping_details']['phone_number']
}),
'shipping_address': shopify.ShippingAddress({
'address1': customer.data['shipping_details']['address1'],
'address2': customer.data['shipping_details']['address2'] or '',
'city': customer.data['shipping_details']['city'],
'country': customer.data['shipping_details']['country_name'],
'name': customer.data['shipping_details']['name'],
'phone': customer.data['shipping_details']['phone_number'],
'zip': customer.data['shipping_details']['postal_code'],
'province': customer.data['shipping_details']['state'] or '',
'first_name': name_split(customer.data['shipping_details']['name']),
'last_name': name_split(customer.data['shipping_details']['name'], False)
}),
'billing_address': shopify.BillingAddress({
'address1': customer.data['shipping_details']['address1'],
'address2': customer.data['shipping_details']['address2'] or '',
'city': customer.data['shipping_details']['city'],
'country': customer.data['shipping_details']['country_name'],
'name': customer.data['shipping_details']['name'],
'phone': customer.data['shipping_details']['phone_number'],
'zip': customer.data['shipping_details']['postal_code'],
'province': customer.data['shipping_details']['state'] or '',
'first_name': name_split(customer.data['shipping_details']['name']),
'last_name': name_split(customer.data['shipping_details']['name'], False)
}),
'line_items': [
shopify.LineItem({
'title': str(tuple(customer.line_info.keys())[0]),
'price': "%.2f" % (tuple(customer.line_info.values())[0]),
'quantity': 1,
'taxable': False,
'requires_shipping': True,
'applied_discount': {
'title': "Custom",
'description': "Pre-order",
'value_type': "percentage",
'value': "100.0",
'amount': "%.2f" % (tuple(customer.line_info.values())[0])
}
})
],
'tax_exempt': True,
'note': b"Nyahallo there, thank you so much for supporting my first launch \u2764 These are the shipping costs ^^".decode('unicode-escape'),
'shipping_line': shopify.ShippingLine({
'handle': None,
'title': find_shipping_title(customer.data['shipping_details']['country_name']),
'price': find_shipping_price(customer.data['shipping_details']['country_name'], tuple(customer.line_info.values())[0])
})
})
return order
|
from database.conn import execute
def insertDVDB(lpn,vtype,video_ref,time):
if vtype=='Car':
vtype='Car / Taxi'
elif vtype=='Motorcycle':
vtype='Motorcycle / Scooter'
print(vtype)
inserted=execute("INSERT INTO vehicles values('"+lpn+"','"+vtype+"','"+video_ref+"',"+str(time)+")")
return inserted
def getDVFiltered(videos,filters):
query="SELECT vehicle_lpn,vehicle_type,video_time,folder_name,video_name FROM vehicles INNER JOIN videos\
ON vehicles.video_ref = videos.video_ref\
WHERE ("
for v in videos:
query+=" vehicles.video_ref='"+v+"' OR"
query = query[:-2]+") "
# if filters['From Date']!="":
# query+=" AND timestamp::date>='"+ filters['From Date']+"'"
# if filters['To Date']!="":
# query+=" AND timestamp::date<='"+filters['To Date']+"'"
# if filters['From Time']!="":
# fromtime=filters['From Time']
# query+=" AND video_time>="+str(fromtime)+""
# if filters['To Time']!="":
# totime=filters['To Time']
# query+=" AND video_time<="+str(totime)+""
if filters['Vehicle Number']!="":
query+=" AND vehicle_lpn iLIKE '%"+filters['Vehicle Number']+"%'"
if filters['Vehicle Type']!="":
query+=" AND vehicle_type='"+filters['Vehicle Type']+"'"
# if filters['Color']!="":
# query+=" AND vehicle_color='"+filters['Color']+"'"
query+=" ORDER BY folder_name,video_name, video_time "
data=execute(query)
if data:
return data
return None |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import rospy
import torch
import numpy as np
from sensor_msgs.msg import Image as ROS_Image
from opendr_bridge import ROSBridge
import os
from opendr.perception.multimodal_human_centric import RgbdHandGestureLearner
from opendr.engine.data import Image
from vision_msgs.msg import Classification2D
import message_filters
import cv2
class RgbdHandGestureNode:
def __init__(self, input_image_topic="/usb_cam/image_raw", input_depth_image_topic="/usb_cam/image_raw",
gesture_annotations_topic="/opendr/gestures", device="cuda"):
"""
Creates a ROS Node for gesture recognition from RGBD
:param input_image_topic: Topic from which we are reading the input image
:type input_image_topic: str
:param input_depth_image_topic: Topic from which we are reading the input depth image
:type input_depth_image_topic: str
:param gesture_annotations_topic: Topic to which we are publishing the predicted gesture class
:type gesture_annotations_topic: str
:param device: device on which we are running inference ('cpu' or 'cuda')
:type device: str
"""
self.gesture_publisher = rospy.Publisher(gesture_annotations_topic, Classification2D, queue_size=10)
image_sub = message_filters.Subscriber(input_image_topic, ROS_Image)
depth_sub = message_filters.Subscriber(input_depth_image_topic, ROS_Image)
# synchronize image and depth data topics
ts = message_filters.TimeSynchronizer([image_sub, depth_sub], 10)
ts.registerCallback(self.callback)
self.bridge = ROSBridge()
# Initialize the gesture recognition
self.gesture_learner = RgbdHandGestureLearner(n_class=16, architecture="mobilenet_v2", device=device)
model_path = './mobilenet_v2'
if not os.path.exists(model_path):
self.gesture_learner.download(path=model_path)
self.gesture_learner.load(path=model_path)
# mean and std for preprocessing, based on HANDS dataset
self.mean = np.asarray([0.485, 0.456, 0.406, 0.0303]).reshape(1, 1, 4)
self.std = np.asarray([0.229, 0.224, 0.225, 0.0353]).reshape(1, 1, 4)
def listen(self):
"""
Start the node and begin processing input data
"""
rospy.init_node('opendr_gesture_recognition', anonymous=True)
rospy.loginfo("RGBD gesture recognition node started!")
rospy.spin()
def callback(self, image_data, depth_data):
"""
Callback that process the input data and publishes to the corresponding topics
:param image_data: input image message
:type image_data: sensor_msgs.msg.Image
:param depth_data: input depth image message
:type depth_data: sensor_msgs.msg.Image
"""
# Convert sensor_msgs.msg.Image into OpenDR Image and preprocess
image = self.bridge.from_ros_image(image_data, encoding='bgr8')
depth_data.encoding = 'mono16'
depth_image = self.bridge.from_ros_image_to_depth(depth_data, encoding='mono16')
img = self.preprocess(image, depth_image)
# Run gesture recognition
gesture_class = self.gesture_learner.infer(img)
# Publish results
ros_gesture = self.bridge.from_category_to_rosclass(gesture_class)
self.gesture_publisher.publish(ros_gesture)
def preprocess(self, image, depth_img):
'''
Preprocess image, depth_image and concatenate them
:param image_data: input image
:type image_data: engine.data.Image
:param depth_data: input depth image
:type depth_data: engine.data.Image
'''
image = image.convert(format='channels_last') / (2**8 - 1)
depth_img = depth_img.convert(format='channels_last') / (2**16 - 1)
# resize the images to 224x224
image = cv2.resize(image, (224, 224))
depth_img = cv2.resize(depth_img, (224, 224))
# concatenate and standardize
img = np.concatenate([image, np.expand_dims(depth_img, axis=-1)], axis=-1)
img = (img - self.mean) / self.std
img = Image(img, dtype=np.float32)
return img
if __name__ == '__main__':
# Select the device for running
try:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
except:
device = 'cpu'
# default topics are according to kinectv2 drivers at https://github.com/OpenKinect/libfreenect2
# and https://github.com/code-iai-iai_kinect2
depth_topic = "/kinect2/qhd/image_depth_rect"
image_topic = "/kinect2/qhd/image_color_rect"
gesture_node = RgbdHandGestureNode(input_image_topic=image_topic, input_depth_image_topic=depth_topic, device=device)
gesture_node.listen()
|
# This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup the moksha application"""
import logging
import transaction
from tg import config
from paste.deploy import appconfig
from moksha.config.environment import load_environment
log = logging.getLogger(__name__)
def setup_config(command, filename, section, vars):
"""Place any commands to setup moksha here"""
conf = appconfig('config:' + filename)
load_environment(conf.global_conf, conf.local_conf)
# Load the models
from moksha import model
print "Creating tables"
model.metadata.create_all(bind=config['pylons.app_globals'].sa_engine)
u = model.User()
u.user_name = u'manager'
u.display_name = u'Example manager'
u.email_address = u'manager@somedomain.com'
u.password = u'managepass'
model.DBSession.add(u)
g = model.Group()
g.group_name = u'managers'
g.display_name = u'Managers Group'
g.users.append(u)
model.DBSession.add(g)
p = model.Permission()
p.permission_name = u'manage'
p.description = u'This permission give an administrative right to the bearer'
p.groups.append(g)
model.DBSession.add(p)
u1 = model.User()
u1.user_name = u'editor'
u1.display_name = u'Example editor'
u1.email_address = u'editor@somedomain.com'
u1.password = u'editpass'
model.DBSession.add(u1)
model.DBSession.flush()
# Create some knowledge
#from moksha.apps.knowledge.model import Entity
#snake = Entity(u'Python')
#snake[u'type'] = 'snake'
#model.DBSession.add(snake)
transaction.commit()
print "Successfully setup"
|
from goopylib.objects.GraphicsObject import GraphicsObject
from goopylib.styles import *
from goopylib.constants import _root, ALL_CHARACTERS, ALIGN_OPTIONS
from tkinter import StringVar as tkStringVar
from tkinter import Entry as tkEntry
from tkinter import Frame as tkFrame
from tkinter import END as tkEND
class Entry(GraphicsObject):
def __init__(self, p, text_width=10, fill=None, font_colour=None, font_face=None, font_size=None,
outline=None, font_style=None, outline_width=None, border_relief="flat", password=False, layer=0,
active="NORMAL", justify="left", cursor="xterm", select_colour=None, prompt_text="", tag=None,
bounds=None, align="center", text="", max_characters=None):
self.anchor = p.copy()
if not isinstance(text_width, int):
raise GraphicsError(f"Text Width for the Entry must be an integer, not {text_width}")
self.text_width = text_width
if align not in ALIGN_OPTIONS:
raise GraphicsError(f"\n\nGraphicsError: Image align must be one of {ALIGN_OPTIONS}, not {align}")
self.align = align
self.text = tkStringVar(_root)
self.text.set(text)
self.was_edited_since_last_call = False
if isinstance(fill, Colour):
self.fill = fill
else:
self.fill = STYLES["default"]["fill"]
if isinstance(select_colour, Colour):
self.select_colour = select_colour
else:
self.select_colour = STYLES["default"]["select colour"]
if isinstance(font_colour, Colour):
self.font_colour = font_colour
else:
self.font_colour = STYLES["default"]["font colour"]
if isinstance(outline_width, int):
self.outline_width = outline_width
else:
self.outline_width = STYLES["default"]["entry width"]
if isinstance(font_size, int):
self.font_size = font_size
else:
self.font_size = STYLES["default"]["font size"]
if isinstance(font_style, str):
self.font_style = font_style
else:
self.font_style = STYLES["default"]["font style"]
if isinstance(font_face, str):
self.font = font_face
else:
self.font = STYLES["default"]["font face"]
if isinstance(justify, str):
self.justify = justify
else:
self.justify = STYLES["default"]["justify"]
self.entry = None
self.border_type = border_relief
if password:
self.text_type = "*"
else:
self.text_type = ""
self.enabled = "normal"
self.cursor = cursor
self.prompt_text = prompt_text
self.initial_font_size = self.font_size
self.edited = False
self.allowed_symbols = ALL_CHARACTERS
self.max_characters = None
self.set_max_characters(max_characters)
self.last_graphwin = None
GraphicsObject.__init__(self, (), layer=layer, tag=tag, bounds=bounds)
def __repr__(self):
if self.drawn:
return f"Entry({self.anchor}, {self.get_text()})"
else:
return f"Entry({self.anchor})"
def _draw(self, canvas, options):
x, y = self.anchor
if self.last_graphwin != canvas:
self.frm = tkFrame(canvas.master)
self.set_font_size(int(self.initial_font_size / canvas.trans.x_scale), False)
self.last_graphwin = canvas
self.entry = tkEntry(self.frm, width=self.text_width, textvariable=self.text, bg=self.fill, fg=self.font_colour,
bd=self.outline_width, font=(self.font, self.font_size, self.font_style),
insertbackground=self.font_colour, show=self.text_type, state=self.enabled,
justify=self.justify, cursor="xterm", exportselection=0,
selectbackground=self.select_colour, insertborderwidth=0)
if not self.edited:
self.entry.insert(0, self.prompt_text)
self.entry.config(show="")
self.entry.pack()
self.entry.bind("<Return>", self._on_enter)
self.text.trace('w', self._on_edit)
width = self.get_width()
height = self.get_height()
if self.align == "center":
pass
elif self.align == "bottom":
y -= self.font_size / 2
elif self.align == "top":
y += self.font_size / 2
elif self.align == "left":
x += self.font_size * self.text_width / 2
elif self.align == "right":
x -= self.font_size * self.text_width / 2
elif self.align == "bottomleft":
y -= self.font_size / 2
x += self.font_size * self.text_width / 2
elif self.align == "bottomright":
y -= self.font_size / 2
x -= self.font_size * self.text_width / 2
elif self.align == "topleft":
y += self.font_size / 2
x += self.font_size * self.text_width / 2
elif self.align == "topright":
y += self.font_size / 2
x -= self.font_size * self.text_width / 2
x, y = canvas.to_screen(x, y)
return canvas.create_window(x, y, window=self.frm)
def _move(self, dx, dy):
self.anchor.move(dx, dy)
def _in_allowed(self, c):
return c in self.allowed_symbols
def was_edited(self):
if self.was_edited_since_last_call:
self.was_edited_since_last_call = False
return True
return False
def _on_edit(self, *args):
text = self.text.get()
if self.max_characters is not None:
if len(text) > self.max_characters:
self.text.set(text[:-1])
return
corrected = ''.join(filter(self._in_allowed, text))
self.text.set(corrected)
if not self.edited:
self.edited = True
self.set_text(corrected.replace(self.prompt_text, "", 1))
if self.text_type == "*":
self.entry.config(show="*")
self.was_edited_since_last_call = True
def _on_enter(self, e):
pass
def is_clicked(self, mouse_pos):
if self.bounds is None:
if self.entry:
if mouse_pos is not None:
width, height = self.get_width(), self.get_height()
if (self.anchor[0] - width / 2 > mouse_pos[0] > self.anchor[0] + width / 2) and \
(self.anchor[1] - height / 2 > mouse_pos[1] > self.anchor[1] + height / 2):
return True
return False
return self.bounds.is_clicked(mouse_pos)
def allow_character(self, character):
if character not in self.allowed_symbols and character in ALL_CHARACTERS:
self.allowed_symbols.append(character)
return self
def allow_only_numeric(self, allow=True):
if allow:
self.allowed_symbols = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', "-", "."]
else:
self.allowed_symbols = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y',
'Z', '!', '"', '#', '$', '%', '&', '\\', "'", '(', ')', '*', '+', ',', '-', '.',
'/', ':', ';', '<', '=', '>', '?', '@', '[', '\'', '"', ']', '^', '_', '`', '{',
'|', '}', '~', ':', " "]
return self
def allow_only_integers(self, allow=True):
if allow:
self.allowed_symbols = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', "-"]
else:
self.allowed_symbols = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y',
'Z', '!', '"', '#', '$', '%', '&', '\\', "'", '(', ')', '*', '+', ',', '-', '.',
'/', ':', ';', '<', '=', '>', '?', '@', '[', '\'', '"', ']', '^', '_', '`', '{',
'|', '}', '~', ':', " "]
return self
def allow_only_positive_integers(self, allow=True):
if allow:
self.allowed_symbols = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
else:
self.allowed_symbols = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q',
'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H',
'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y',
'Z', '!', '"', '#', '$', '%', '&', '\\', "'", '(', ')', '*', '+', ',', '-', '.',
'/', ':', ';', '<', '=', '>', '?', '@', '[', '\'', '"', ']', '^', '_', '`', '{',
'|', '}', '~', ':', " "]
return self
def allow_all_characters(self):
self.allowed_symbols = ALL_CHARACTERS
def allow_only_alpha(self, allow=True):
if allow:
self.allowed_symbols = self.allowed_symbols = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
'm','n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J',
'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', " "]
else:
self.allowed_symbols = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '!', '"', '#', '$', '%', '&',
'\\', "'", '(', ')', '*', '+', ',', '-', '.', '/', ':', ';', '<', '=', '>', '?',
'@', '[', '\'', '"', ']', '^', '_', '`', '{', '|', '}', '~', ':', " "]
return self
def allow_only_custom(self, custom):
if not set(custom).issubset(set(ALL_CHARACTERS)):
raise GraphicsError("The Custom list of character contains invalid characters!")
self.allowed_symbols = custom.copy()
return self
def set_max_characters(self, max_characters=None):
self.max_characters = max_characters
return self
def hide_cursor(self):
self.entry.config(insertontime=0)
def show_cursor(self):
self.entry.config(insertontime=1)
def disable(self):
self.enabled = "disabled"
if self.entry:
self.entry.config(state=self.enabled)
def enable(self):
self.enabled = "normal"
if self.entry:
self.entry.config(state=self.enabled)
def toggle_enabled(self):
self.enabled = ["disabled", "normal"].index(int(self.enabled))
if self.entry:
self.entry.config(state=self.enabled)
def set_enabled(self, active):
if active not in ["normal", "disabled", "readonly"]:
raise GraphicsError("\n\nState must be either normal, disabled or readonly")
self.enabled = active
if self.entry:
self.entry.config(state=self.enabled)
def set_text_type(self, text_type):
self.text = text_type
if self.entry:
self.entry.config(show=self.text)
def get_text(self):
return self.text.get()
def get_anchor(self):
return self.anchor.clone()
def get_width(self):
if self.entry:
return self.entry.winfo_width()
def get_height(self):
if self.entry:
return self.entry.winfo_height()
def get_font_size(self):
return self.font_size
def get_font_face(self):
return self.font
def get_font_colour(self):
return self.font_colour
def get_font_style(self):
return self.font_style
def get_justify(self):
return self.justify
def get_selection_colour(self):
return self.select_colour
def get_maximum_characters(self):
return self.max_characters
def get_outline_width(self):
return self.outline_width
def get_fill(self):
return self.fill
def clone(self):
other = Entry(self.anchor, self.text_width)
other.config = self.config.copy()
other.text = tkStringVar()
other.text.set(self.text.get())
other.fill = self.fill
return other
def set_text(self, t):
self.text.set(t)
self._update_layer()
return self
def set_fill(self, colour):
self.fill = colour
if self.entry:
self.entry.config(bg=colour)
self._update_layer()
return self
def set_face(self, font_face):
if font_face in STYLES[self.style].keys():
self.font = STYLES[self.style][font_face]
elif isinstance(font_face, str):
self.font = font_face
else:
if "font face" in STYLES[self.style].keys():
self.font = STYLES[self.style]["font face"]
else:
self.font = STYLES["default"]["font face"]
if self.entry:
self.entry.config(font=(self.font, self.font_size, self.font_style))
self._update_layer()
return self
def set_font_size(self, font_size, set_initial_font_size=True):
if isinstance(font_size, int):
self.font_size = font_size
elif font_size in STYLES[self.style].keys():
self.font_size = STYLES[self.style][font_size]
else:
if "font size" in STYLES[self.style].keys():
self.font_size = STYLES[self.style]["font size"]
else:
self.font_size = STYLES["default"]["font size"]
if set_initial_font_size:
self.initial_font_size = round(font_size)
if self.entry:
self.entry.config(font=(self.font, self.font_size, self.font_style))
self._update_layer()
return self
def set_font_style(self, font_style):
if font_style in STYLES[self.style].keys():
self.font_style = STYLES[self.style][font_style]
elif isinstance(font_style, str):
self.font_style = font_style
else:
if "font style" in STYLES[self.style].keys():
self.font_style = STYLES[self.style]["font style"]
else:
self.font_style = STYLES["default"]["font style"]
if self.entry:
self.entry.config(font=(self.font, self.font_size, self.font_style))
self._update_layer()
return self
def set_text_colour(self, colour):
self.font_colour = colour
if self.entry:
self.entry.config(fg=colour)
self._update_layer()
return self
def set_border_relief(self, border):
if border not in ["flat", "groove", "raised", "ridge", "solid", "sunken"]:
raise GraphicsError("\n\nBorder type must be one of "
"['flat', 'groove', 'raised', 'ridge', 'solid', 'sunken']")
self.border_type = border
if self.entry:
self.entry.config(relief=border)
self._update_layer()
return self
def set_width(self, text_width):
if not isinstance(text_width, int):
raise GraphicsError(f"Text Width for the Entry must be an integer, not {text_width}")
self.text_width = text_width
if self.entry:
self.entry.config(width=text_width)
self._update_layer()
return self
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2018-09-26 10:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('forum', '0013_board_locked'),
]
operations = [
migrations.AddField(
model_name='thread',
name='pinned',
field=models.BooleanField(default=False),
),
]
|
from ISO19115Creator import gdalreader
from ISO19115Creator import functions
import gdal
import osr
import sridentify
rootDir = r'D:\test\dem.tif'
test = gdalreader.GdalData(rootDir)
print(test.getBoundingBox())
test = gdal.Open(rootDir)
print(test.GetProjection())
print(test)
print(test.getBoundingBox())
spatialRef = osr.SpatialReference(wkt=test.GetProjection())
print (spatialRef)
test2 = functions.EPSGfromWKT(spatialRef)
test = sridentify.Sridentify(prj='PROJCS["unnamed",GEOGCS["ETRS89",DATUM["unknown",SPHEROID["GRS 1980",6378137,298.2572221010042,AUTHORITY["EPSG","7019"]],AUTHORITY["EPSG","6258"]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433],AUTHORITY["EPSG","4258"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",9],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",0],UNIT["metre",1,AUTHORITY["EPSG","9001"]],AUTHORITY["EPSG","25832"]]', epsg_code=25832)
test.save_to_db()
help(sridentify)
from owslib.etree import etree
import io
XML=r"D:\test\dem.xml"
record=open(XML, 'rb').read()
record
etree.fromstring(record)
|
#!/usr/bin/env python3
from hashlib import md5
secret = "bgvyzdsv"
i = 1
done = [False, False]
while True:
key = secret + str(i)
if not done[0] and md5(key.encode()).hexdigest()[0:5] == "00000":
print("Part 1: " + str(i))
done[0] = True
if not done[1] and md5(key.encode()).hexdigest()[0:6] == "000000":
print("Part 2: " + str(i))
done[1] = True
if done == [True, True]:
exit()
i += 1
|
"""Manager utility implementations of grading managers."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods
# Number of methods are defined in specification
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from ..osid import managers as osid_managers
from ..osid.osid_errors import NullArgument
from ..osid.osid_errors import Unimplemented
from ..type.objects import TypeList
from dlkit.abstract_osid.grading import managers as abc_grading_managers
class GradingProfile(abc_grading_managers.GradingProfile, osid_managers.OsidProfile):
"""The ``GradingProfile`` describes the interoperability among grading services."""
def supports_visible_federation(self):
"""Tests if federation is visible.
return: (boolean) - ``true`` if visible federation is supported
``,`` ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_system_lookup(self):
"""Tests if a grade system lookup service is supported.
return: (boolean) - true if grade system lookup is supported,
false otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_system_query(self):
"""Tests if a grade system query service is supported.
return: (boolean) - ``true`` if grade system query is supported,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_system_search(self):
"""Tests if a grade system search service is supported.
return: (boolean) - ``true`` if grade system search is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_system_admin(self):
"""Tests if a grade system administrative service is supported.
return: (boolean) - ``true`` if grade system admin is supported,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_system_notification(self):
"""Tests if grade system notification is supported.
Messages may be sent when grade entries are created, modified,
or deleted.
return: (boolean) - ``true`` if grade system notification is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_system_gradebook(self):
"""Tests if a grade system to gradebook lookup session is available.
return: (boolean) - ``true`` if grade system gradebook lookup
session is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_system_gradebook_assignment(self):
"""Tests if a grade system to gradebook assignment session is available.
return: (boolean) - ``true`` if grade system gradebook
assignment is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_system_smart_gradebook(self):
"""Tests if a grade system smart gradebook session is available.
return: (boolean) - ``true`` if grade system smart gradebook is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_entry_lookup(self):
"""Tests if a grade entry lookup service is supported.
return: (boolean) - true if grade entry lookup is supported,
false otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_entry_query(self):
"""Tests if a grade entry query service is supported.
return: (boolean) - true if grade entry query is supported,
false otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_entry_search(self):
"""Tests if a grade entry search service is supported.
return: (boolean) - ``true`` if grade entry search is supported,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_entry_admin(self):
"""Tests if a grade entry administrative service is supported.
return: (boolean) - ``true`` if grade entry admin is supported,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grade_entry_notification(self):
"""Tests if grade entry notification is supported.
return: (boolean) - ``true`` if grade entry notification is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_column_lookup(self):
"""Tests if a gradebook column lookup service is supported.
return: (boolean) - true if gradebook column lookup is
supported, false otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_column_query(self):
"""Tests if a gradebook column query service is supported.
return: (boolean) - ``true`` if grade system query is supported,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_column_search(self):
"""Tests if a gradebook column search service is supported.
return: (boolean) - ``true`` if grade system search is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_column_admin(self):
"""Tests if a gradebook column administrative service is supported.
return: (boolean) - ``true`` if gradebook column admin is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_column_notification(self):
"""Tests if gradebook column notification is supported.
Messages may be sent when grade entries are created, modified,
or deleted.
return: (boolean) - ``true`` if gradebook column notification is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_column_gradebook(self):
"""Tests if a gradebook column to gradebook lookup session is available.
return: (boolean) - ``true`` if gradebook column gradebook
lookup session is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_column_gradebook_assignment(self):
"""Tests if a gradebook column to gradebook assignment session is available.
return: (boolean) - ``true`` if gradebook column gradebook
assignment is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_column_smart_gradebook(self):
"""Tests if a gradebook column smart gradebookt session is available.
return: (boolean) - ``true`` if gradebook column amsrt gradebook
is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_lookup(self):
"""Tests if a gradebook lookup service is supported.
return: (boolean) - ``true`` if gradebook lookup is supported,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_query(self):
"""Tests if a gradebook query service is supported.
return: (boolean) - ``true`` if gradebook query is supported,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_search(self):
"""Tests if a gradebook search service is supported.
return: (boolean) - ``true`` if gradebook search is supported,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_admin(self):
"""Tests if a gradebook administrative service is supported.
return: (boolean) - ``true`` if gradebook admin is supported,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_notification(self):
"""Tests if gradebook notification is supported.
Messages may be sent when gradebooks are created, modified, or
deleted.
return: (boolean) - ``true`` if gradebook notification is
supported ``,`` ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_hierarchy(self):
"""Tests if a gradebook hierarchy traversal is supported.
return: (boolean) - ``true`` if a gradebook hierarchy traversal
is supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_gradebook_hierarchy_design(self):
"""Tests if gradebook hierarchy design is supported.
return: (boolean) - ``true`` if a gradebook hierarchy design is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grading_batch(self):
"""Tests if a grading batch service is supported.
return: (boolean) - ``true`` if a grading batch service is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grading_calculation(self):
"""Tests if a grading calculation service is supported.
return: (boolean) - ``true`` if a grading calculation service is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def supports_grading_transform(self):
"""Tests if a grade system transform service is supported.
return: (boolean) - ``true`` if a grading transform service is
supported, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
return False
def get_grade_record_types(self):
"""Gets the supported ``Grade`` record types.
return: (osid.type.TypeList) - a list containing the supported
``Grade`` record types
*compliance: mandatory -- This method must be implemented.*
"""
return TypeList([])
grade_record_types = property(fget=get_grade_record_types)
def supports_grade_record_type(self, grade_record_type=None):
"""Tests if the given ``Grade`` record type is supported.
arg: grade_record_type (osid.type.Type): a ``Type``
indicating a ``Grade`` record type
return: (boolean) - ``true`` if the given Type is supported,
``false`` otherwise
raise: NullArgument - ``grade_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if grade_record_type is None:
raise NullArgument()
return False
def get_grade_system_record_types(self):
"""Gets the supported ``GradeSystem`` record types.
return: (osid.type.TypeList) - a list containing the supported
``GradeSystem`` record types
*compliance: mandatory -- This method must be implemented.*
"""
return TypeList([])
grade_system_record_types = property(fget=get_grade_system_record_types)
def supports_grade_system_record_type(self, grade_system_record_type=None):
"""Tests if the given ``GradeSystem`` record type is supported.
arg: grade_system_record_type (osid.type.Type): a ``Type``
indicating a ``GradeSystem`` record type
return: (boolean) - ``true`` if the given Type is supported,
``false`` otherwise
raise: NullArgument - ``grade_system_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if grade_system_record_type is None:
raise NullArgument()
return False
def get_grade_system_search_record_types(self):
"""Gets the supported ``GradeSystem`` search record types.
return: (osid.type.TypeList) - a list containing the supported
``GradeSystem`` search record types
*compliance: mandatory -- This method must be implemented.*
"""
return TypeList([])
grade_system_search_record_types = property(fget=get_grade_system_search_record_types)
def supports_grade_system_search_record_type(self, grade_system_search_record_type=None):
"""Tests if the given ``GradeSystem`` search record type is supported.
arg: grade_system_search_record_type (osid.type.Type): a
``Type`` indicating a ``GradeSystem`` search record type
return: (boolean) - ``true`` if the given Type is supported,
``false`` otherwise
raise: NullArgument - ``grade_system_search_record_type`` is
``null``
*compliance: mandatory -- This method must be implemented.*
"""
if grade_system_search_record_type is None:
raise NullArgument()
return False
def get_grade_entry_record_types(self):
"""Gets the supported ``GradeEntry`` record types.
return: (osid.type.TypeList) - a list containing the supported
``GradeEntry`` record types
*compliance: mandatory -- This method must be implemented.*
"""
return TypeList([])
grade_entry_record_types = property(fget=get_grade_entry_record_types)
def supports_grade_entry_record_type(self, grade_entry_record_type=None):
"""Tests if the given ``GradeEntry`` record type is supported.
arg: grade_entry_record_type (osid.type.Type): a ``Type``
indicating a ``GradeEntry`` record type
return: (boolean) - ``true`` if the given Type is supported,
``false`` otherwise
raise: NullArgument - ``grade_entry_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if grade_entry_record_type is None:
raise NullArgument()
return False
def get_grade_entry_search_record_types(self):
"""Gets the supported ``GradeEntry`` search record types.
return: (osid.type.TypeList) - a list containing the supported
``GradeEntry`` search record types
*compliance: mandatory -- This method must be implemented.*
"""
return TypeList([])
grade_entry_search_record_types = property(fget=get_grade_entry_search_record_types)
def supports_grade_entry_search_record_type(self, grade_entry_search_record_type=None):
"""Tests if the given ``GradeEntry`` search record type is supported.
arg: grade_entry_search_record_type (osid.type.Type): a
``Type`` indicating a ``GradeEntry`` search record type
return: (boolean) - ``true`` if the given Type is supported,
``false`` otherwise
raise: NullArgument - ``grade_entry_search_record_type`` is
``null``
*compliance: mandatory -- This method must be implemented.*
"""
if grade_entry_search_record_type is None:
raise NullArgument()
return False
def get_gradebook_column_record_types(self):
"""Gets the supported ``GradebookColumn`` record types.
return: (osid.type.TypeList) - a list containing the supported
``GradebookColumn`` record types
*compliance: mandatory -- This method must be implemented.*
"""
return TypeList([])
gradebook_column_record_types = property(fget=get_gradebook_column_record_types)
def supports_gradebook_column_record_type(self, gradebook_column_record_type=None):
"""Tests if the given ``GradebookColumn`` record type is supported.
arg: gradebook_column_record_type (osid.type.Type): a
``Type`` indicating a ``GradebookColumn`` type
return: (boolean) - ``true`` if the given gradebook column
record ``Type`` is supported, ``false`` otherwise
raise: NullArgument - ``gradebook_column_record_type`` is
``null``
*compliance: mandatory -- This method must be implemented.*
"""
if gradebook_column_record_type is None:
raise NullArgument()
return False
def get_gradebook_column_search_record_types(self):
"""Gets the supported gradebook column search record types.
return: (osid.type.TypeList) - a list containing the supported
``GradebookColumn`` search record types
*compliance: mandatory -- This method must be implemented.*
"""
return TypeList([])
gradebook_column_search_record_types = property(fget=get_gradebook_column_search_record_types)
def supports_gradebook_column_search_record_type(self, gradebook_column_search_record_type=None):
"""Tests if the given gradebook column search record type is supported.
arg: gradebook_column_search_record_type (osid.type.Type): a
``Type`` indicating a ``GradebookColumn`` search record
type
return: (boolean) - ``true`` if the given search record ``Type``
is supported, ``false`` otherwise
raise: NullArgument - ``gradebook_column_search_record_type``
is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if gradebook_column_search_record_type is None:
raise NullArgument()
return False
def get_gradebook_column_summary_record_types(self):
"""Gets the supported ``GradebookColumnSummary`` record types.
return: (osid.type.TypeList) - a list containing the supported
``GradebookColumnSummary`` record types
*compliance: mandatory -- This method must be implemented.*
"""
return TypeList([])
gradebook_column_summary_record_types = property(fget=get_gradebook_column_summary_record_types)
def supports_gradebook_column_summary_record_type(self, gradebook_column_summary_record_type=None):
"""Tests if the given ``GradebookColumnSummary`` record type is supported.
arg: gradebook_column_summary_record_type (osid.type.Type): a
``Type`` indicating a ``GradebookColumnSummary`` type
return: (boolean) - ``true`` if the given gradebook column
summary record ``Type`` is supported, ``false``
otherwise
raise: NullArgument - ``gradebook_column_record_type`` is
``null``
*compliance: mandatory -- This method must be implemented.*
"""
if gradebook_column_summary_record_type is None:
raise NullArgument()
return False
def get_gradebook_record_types(self):
"""Gets the supported ``Gradebook`` record types.
return: (osid.type.TypeList) - a list containing the supported
``Gradebook`` record types
*compliance: mandatory -- This method must be implemented.*
"""
return TypeList([])
gradebook_record_types = property(fget=get_gradebook_record_types)
def supports_gradebook_record_type(self, gradebook_record_type=None):
"""Tests if the given ``Gradebook`` record type is supported.
arg: gradebook_record_type (osid.type.Type): a ``Type``
indicating a ``Gradebook`` type
return: (boolean) - ``true`` if the given gradebook record
``Type`` is supported, ``false`` otherwise
raise: NullArgument - ``gradebook_record_type`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if gradebook_record_type is None:
raise NullArgument()
return False
def get_gradebook_search_record_types(self):
"""Gets the supported gradebook search record types.
return: (osid.type.TypeList) - a list containing the supported
``Gradebook`` search record types
*compliance: mandatory -- This method must be implemented.*
"""
return TypeList([])
gradebook_search_record_types = property(fget=get_gradebook_search_record_types)
def supports_gradebook_search_record_type(self, gradebook_search_record_type=None):
"""Tests if the given gradebook search record type is supported.
arg: gradebook_search_record_type (osid.type.Type): a
``Type`` indicating a ``Gradebook`` search record type
return: (boolean) - ``true`` if the given search record ``Type``
is supported, ``false`` otherwise
raise: NullArgument - ``gradebook_search_record_type`` is
``null``
*compliance: mandatory -- This method must be implemented.*
"""
if gradebook_search_record_type is None:
raise NullArgument()
return False
class GradingManager(abc_grading_managers.GradingManager, osid_managers.OsidManager, GradingProfile):
"""The grading manager provides access to grading sessions and provides interoperability tests for various aspects of this service.
The sessions included in this manager are:
* ``GradeSystemLookupSession:`` a session to look up grades and
grade systems
* ``GradeSystemQuerySession:`` a session to query grade systems
``None``
* ``GradeSystemSearchSession:`` a session to search grade systems
* ``GradeSystemAdminSession:`` a session to manage grade systems
* ``GradeSystemNotificationSession`` a session for subscribing to
new or deleted grades or grade systems
* ``GradeSystemGradebookSession:`` a session for retrieving grade
system to gradebook mappings
* ``GradeSystemGradebookAssignmentSession:`` a session for
managing grade system to gradebook mappings
* ``GradeSystemSmartGradebookSession:`` a session for managing
smart gradebooks of grade systems
* ``GradeEntryLookupSession:`` a session to look up grade entries
* ``GradeEntryQuerySession:`` a session to query grade entries
``None``
* ``GradeEntrySearchSession:`` a session to search grade entries
* ``GradeEntryAdminSession:`` a session to create, modify and
delete grade entries ``None``
* ``GradeEntryNotificationSession: a`` session to receive messages
pertaining to grade entry ```` changes
* ``GradebookColumnLookupSession:`` a session to look up gradebook
columns
* ``GradebookColumnQuerySession:`` a session to query gradebook
columns ``None``
* ``GradebookColumnSearchSession:`` a session to search gradebook
columns
* ``GradebookColumnAdminSession:`` a session to manage gradebook
columns
* ``GradebookColumnNotificationSession`` a session for subscribing
to new or deleted gradebook columns
* ``GradebookColumnGradebookSession:`` a session for retrieving
gradebook column to gradebook mappings
* ``GradebookColumnGradebookAssignmentSession:`` a session for
managing gradebook column to gradebook mappings
* ``GradebookColumnSmartGradebookSession:`` a session for managing
smart gradebooks of gradebook columns
* ``GradebookLookupSession:`` a session to lookup gradebooks
* ``GradebookQuerySession:`` a session to query gradebooks
* ``GradebookSearchSession`` : a session to search gradebooks
* ``GradebookAdminSession`` : a session to create, modify and
delete gradebooks
* ``GradebookNotificationSession`` : a session to receive messages
pertaining to gradebook changes
* ``GradebookHierarchySession:`` a session to traverse the
gradebook hierarchy
* ``GradebookHierarchyDesignSession:`` a session to manage the
gradebook hierarchy
"""
def get_grade_system_lookup_session(self):
"""Gets the ``OsidSession`` associated with the grade system lookup service.
return: (osid.grading.GradeSystemLookupSession) - a
``GradeSystemLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_lookup()`` is ``true``.*
"""
raise Unimplemented()
grade_system_lookup_session = property(fget=get_grade_system_lookup_session)
def get_grade_system_lookup_session_for_gradebook(self, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the grade system lookup service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradeSystemLookupSession) - ``a
GradeSystemLookupSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_system_lookup()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_grade_system_query_session(self):
"""Gets the ``OsidSession`` associated with the grade system query service.
return: (osid.grading.GradeSystemQuerySession) - a
``GradeSystemQuerySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_query()`` is ``true``.*
"""
raise Unimplemented()
grade_system_query_session = property(fget=get_grade_system_query_session)
def get_grade_system_query_session_for_gradebook(self, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the grade system query service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradeSystemQuerySession) - ``a
GradeSystemQuerySession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_system_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_grade_system_search_session(self):
"""Gets the ``OsidSession`` associated with the grade system search service.
return: (osid.grading.GradeSystemSearchSession) - a
``GradeSystemSearchSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_search()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_search()`` is ``true``.*
"""
raise Unimplemented()
grade_system_search_session = property(fget=get_grade_system_search_session)
def get_grade_system_search_session_for_gradebook(self, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the grade system search service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradeSystemSearchSession) - ``a
GradeSystemSearchSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_system_search()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_grade_system_admin_session(self):
"""Gets the ``OsidSession`` associated with the grade system administration service.
return: (osid.grading.GradeSystemAdminSession) - a
``GradeSystemAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_admin()`` is ``true``.*
"""
raise Unimplemented()
grade_system_admin_session = property(fget=get_grade_system_admin_session)
def get_grade_system_admin_session_for_gradebook(self, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the grade system admin service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradeSystemAdminSession) - ``a
GradeSystemAdminSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_system_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_grade_system_notification_session(self, grade_system_receiver=None):
"""Gets the notification session for notifications pertaining to grade system changes.
arg: grade_system_receiver
(osid.grading.GradeSystemReceiver): the grade system
receiver
return: (osid.grading.GradeSystemNotificationSession) - a
``GradeSystemNotificationSession``
raise: NullArgument - ``grade_system_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_notification()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_notification()`` is ``true``.*
"""
raise Unimplemented()
def get_grade_system_notification_session_for_gradebook(self, grade_system_receiver=None, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the grade system notification service for the given gradebook.
arg: grade_system_receiver
(osid.grading.GradeSystemReceiver): the grade system
receiver
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradeSystemNotificationSession) - ``a
_grade_system_notification_session``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``grade_system_receiver`` or
``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_system_notification()``
or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
if grade_system_receiver is None:
raise NullArgument
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_grade_system_gradebook_session(self):
"""Gets the session for retrieving grade system to gradebook mappings.
return: (osid.grading.GradeSystemGradebookSession) - a
``GradeSystemGradebookSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_gradebook()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_gradebook()`` is ``true``.*
"""
raise Unimplemented()
grade_system_gradebook_session = property(fget=get_grade_system_gradebook_session)
def get_grade_system_gradebook_assignment_session(self):
"""Gets the session for assigning grade system to gradebook mappings.
return: (osid.grading.GradeSystemGradebookSession) - a
``GradeSystemGradebookAssignmentSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_grade_system_gradebook_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_gradebook_assignment()`` is ``true``.*
"""
raise Unimplemented()
grade_system_gradebook_assignment_session = property(fget=get_grade_system_gradebook_assignment_session)
def get_grade_system_smart_gradebook_session(self, gradebook_id=None):
"""Gets the session for managing smart gradebooks of grade systems.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradeSystemSmartGradebookSession) - a
``GradeSystemSmartGradebookSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_grade_system_smart_gradebook()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_smart_gradebook()`` is ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_grade_entry_lookup_session(self):
"""Gets the ``OsidSession`` associated with the grade entry lookup service.
return: (osid.grading.GradeEntryLookupSession) - a
``GradeEntryLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_lookup()`` is ``true``.*
"""
raise Unimplemented()
grade_entry_lookup_session = property(fget=get_grade_entry_lookup_session)
def get_grade_entry_lookup_session_for_gradebook(self, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the grade entry lookup service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradeEntryLookupSession) - ``a
GradeEntryLookupSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_entry_lookup()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_grade_entry_query_session(self):
"""Gets the ``OsidSession`` associated with the grade entry query service.
return: (osid.grading.GradeEntryQuerySession) - a
``GradeEntryQuerySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_query()`` is ``true``.*
"""
raise Unimplemented()
grade_entry_query_session = property(fget=get_grade_entry_query_session)
def get_grade_entry_query_session_for_gradebook(self, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the grade entry query service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradeEntryQuerySession) - ``a
GradeEntryQuerySession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_entry_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_grade_entry_search_session(self):
"""Gets the ``OsidSession`` associated with the grade entry search service.
return: (osid.grading.GradeEntrySearchSession) - a
``GradeEntrySearchSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_search()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_search()`` is ``true``.*
"""
raise Unimplemented()
grade_entry_search_session = property(fget=get_grade_entry_search_session)
def get_grade_entry_search_session_for_gradebook(self, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the grade entry search service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradeEntrySearchSession) - ``a
GradeEntrySearchSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_entry_search()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_grade_entry_admin_session(self):
"""Gets the ``OsidSession`` associated with the grade entry administration service.
return: (osid.grading.GradeEntryAdminSession) - a
``GradeEntryAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` is ``true``.*
"""
raise Unimplemented()
grade_entry_admin_session = property(fget=get_grade_entry_admin_session)
def get_grade_entry_admin_session_for_gradebook(self, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the grade entry admin service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradeEntryAdminSession) - ``a
GradeEntryAdminSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_entry_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_grade_entry_notification_session(self, receiver=None):
"""Gets the notification session for notifications pertaining to grade entry changes.
arg: receiver (osid.grading.GradeEntryReceiver): the grade
entry receiver
return: (osid.grading.GradeEntryNotificationSession) - a
``GradeEntryNotificationSession``
raise: NullArgument - ``receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_notification()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_notification()`` is ``true``.*
"""
raise Unimplemented()
def get_grade_entry_notification_session_for_gradebook(self, receiver=None, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the grade entry notification service for the given gradebook.
arg: receiver (osid.grading.GradeEntryReceiver): the grade
entry receiver
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradeEntryNotificationSession) - ``a
_grade_entry_notification_session``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``receiver`` or ``gradebook_id`` is
``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_entry_notification()``
or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
if receiver is None:
raise NullArgument
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_column_lookup_session(self):
"""Gets the ``OsidSession`` associated with the gradebook column lookup service.
return: (osid.grading.GradebookColumnLookupSession) - a
``GradebookColumnLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_column_lookup()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_lookup()`` is ``true``.*
"""
raise Unimplemented()
gradebook_column_lookup_session = property(fget=get_gradebook_column_lookup_session)
def get_gradebook_column_lookup_session_for_gradebook(self, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the gradebook column lookup service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradebookColumnLookupSession) - ``a
_gradebook_column_lookup_session``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_gradebook_column_lookup()``
or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_column_query_session(self):
"""Gets the ``OsidSession`` associated with the gradebook column query service.
return: (osid.grading.GradebookColumnQuerySession) - a
``GradebookColumnQuerySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_column_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
raise Unimplemented()
gradebook_column_query_session = property(fget=get_gradebook_column_query_session)
def get_gradebook_column_query_session_for_gradebook(self, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the gradebook column query service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradebookColumnQuerySession) - ``a
GradebookColumnQuerySession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_gradebook_column_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_column_search_session(self):
"""Gets the ``OsidSession`` associated with the gradebook column search service.
return: (osid.grading.GradebookColumnSearchSession) - a
``GradebookColumnSearchSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_column_search()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_search()`` is ``true``.*
"""
raise Unimplemented()
gradebook_column_search_session = property(fget=get_gradebook_column_search_session)
def get_gradebook_column_search_session_for_gradebook(self, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the gradebook column search service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradebookColumnSearchSession) - ``a
_gradebook_column_search_session``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_gradebook_column_search()``
or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_column_admin_session(self):
"""Gets the ``OsidSession`` associated with the gradebook column administration service.
return: (osid.grading.GradebookColumnAdminSession) - a
``GradebookColumnAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_column_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_admin()`` is ``true``.*
"""
raise Unimplemented()
gradebook_column_admin_session = property(fget=get_gradebook_column_admin_session)
def get_gradebook_column_admin_session_for_gradebook(self, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the gradebook column admin service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradebookColumnAdminSession) - ``a
GradebookColumnAdminSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_gradebook_column_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_column_notification_session(self, gradebook_column_receiver=None):
"""Gets the notification session for notifications pertaining to gradebook column changes.
arg: gradebook_column_receiver
(osid.grading.GradebookColumnReceiver): the grade system
receiver
return: (osid.grading.GradebookColumnNotificationSession) - a
``GradebookColumnNotificationSession``
raise: NullArgument - ``gradebook_column_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_notification()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_notification()`` is ``true``.*
"""
raise Unimplemented()
def get_gradebook_column_notification_session_for_gradebook(self, gradebook_column_receiver=None, gradebook_id=None):
"""Gets the ``OsidSession`` associated with the gradebook column notification service for the given gradebook.
arg: gradebook_column_receiver
(osid.grading.GradebookColumnReceiver): the gradebook
column receiver
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradebookColumnNotificationSession) - ``a
_gradebook_column_notification_session``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_column_receiver`` or
``gradebook_id`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented -
``supports_gradebook_column_notification()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_column_receiver is None:
raise NullArgument
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_column_gradebook_session(self):
"""Gets the session for retrieving gradebook column to gradebook mappings.
return: (osid.grading.GradebookColumnGradebookSession) - a
``GradebookColumnGradebookSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_gradebook()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook()`` is ``true``.*
"""
raise Unimplemented()
gradebook_column_gradebook_session = property(fget=get_gradebook_column_gradebook_session)
def get_gradebook_column_gradebook_assignment_session(self):
"""Gets the session for assigning gradebook column to gradebook mappings.
return: (osid.grading.GradebookColumnGradebookAssignmentSession)
- a ``GradebookColumnGradebookAssignmentSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_gradebook_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook_assignment()`` is
``true``.*
"""
raise Unimplemented()
gradebook_column_gradebook_assignment_session = property(fget=get_gradebook_column_gradebook_assignment_session)
def get_gradebook_column_smart_gradebook_session(self, gradebook_id=None):
"""Gets the session for managing smart gradebooks of gradebook columns.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
return: (osid.grading.GradebookColumnSmartGradebookSession) - a
``GradebookColumnSmartGradebookSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_smart_gradebook()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_smart_gradebook()`` is ``true``.*
"""
if gradebook_id is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_lookup_session(self):
"""Gets the OsidSession associated with the gradebook lookup service.
return: (osid.grading.GradebookLookupSession) - a
``GradebookLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_lookup() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_lookup()`` is true.*
"""
raise Unimplemented()
gradebook_lookup_session = property(fget=get_gradebook_lookup_session)
def get_gradebook_query_session(self):
"""Gets the OsidSession associated with the gradebook query service.
return: (osid.grading.GradebookQuerySession) - a
``GradebookQuerySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_query() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_query()`` is true.*
"""
raise Unimplemented()
gradebook_query_session = property(fget=get_gradebook_query_session)
def get_gradebook_search_session(self):
"""Gets the OsidSession associated with the gradebook search service.
return: (osid.grading.GradebookSearchSession) - a
``GradebookSearchSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_search() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_search()`` is true.*
"""
raise Unimplemented()
gradebook_search_session = property(fget=get_gradebook_search_session)
def get_gradebook_admin_session(self):
"""Gets the OsidSession associated with the gradebook administration service.
return: (osid.grading.GradebookAdminSession) - a
``GradebookAdminSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_admin() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_admin()`` is true.*
"""
raise Unimplemented()
gradebook_admin_session = property(fget=get_gradebook_admin_session)
def get_gradebook_notification_session(self, gradebook_receiver=None):
"""Gets the notification session for notifications pertaining to gradebook service changes.
arg: gradebook_receiver (osid.grading.GradebookReceiver): the
gradebook receiver
return: (osid.grading.GradebookNotificationSession) - a
``GradebookNotificationSession``
raise: NullArgument - ``gradebook_receiver`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_notification() is
false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_notification()`` is true.*
"""
raise Unimplemented()
def get_gradebook_hierarchy_session(self):
"""Gets the session traversing gradebook hierarchies.
return: (osid.grading.GradebookHierarchySession) - a
``GradebookHierarchySession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_hierarchy() is
false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_hierarchy()`` is true.*
"""
raise Unimplemented()
gradebook_hierarchy_session = property(fget=get_gradebook_hierarchy_session)
def get_gradebook_hierarchy_design_session(self):
"""Gets the session designing gradebook hierarchies.
return: (osid.grading.GradebookHierarchyDesignSession) - a
``GradebookHierarchyDesignSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_hierarchy_design()
is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_hierarchy_design()`` is true.*
"""
raise Unimplemented()
gradebook_hierarchy_design_session = property(fget=get_gradebook_hierarchy_design_session)
def get_grading_batch_manager(self):
"""Gets the ``GradingBatchManager``.
return: (osid.grading.batch.GradingBatchManager) - a
``GradingBatchManager``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grading_batch() is false``
*compliance: optional -- This method must be implemented if
``supports_grading_batch()`` is true.*
"""
raise Unimplemented()
grading_batch_manager = property(fget=get_grading_batch_manager)
def get_grading_calculation_manager(self):
"""Gets the ``GradingCalculationManager``.
return: (osid.grading.calculation.GradingCalculationManager) - a
``GradingCalculationManager``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grading_calculation() is
false``
*compliance: optional -- This method must be implemented if
``supports_grading_calculation()`` is true.*
"""
raise Unimplemented()
grading_calculation_manager = property(fget=get_grading_calculation_manager)
def get_grading_transform_manager(self):
"""Gets the ``GradingTransformManager``.
return: (osid.grading.transform.GradingTransformManager) - a
``GradingTransformManager``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grading_transform() is
false``
*compliance: optional -- This method must be implemented if
``supports_grading_transform()`` is true.*
"""
raise Unimplemented()
grading_transform_manager = property(fget=get_grading_transform_manager)
class GradingProxyManager(abc_grading_managers.GradingProxyManager, osid_managers.OsidProxyManager, GradingProfile):
"""The grading manager provides access to grading sessions and provides interoperability tests for various aspects of this service.
Methods in this manager accept a ``Proxy`` for passing information
from server environments.The sessions included in this manager are:
* ``GradeSystemLookupSession:`` a session to look up grades and
grade systems
* ``GradeSystemQuerySession:`` a session to query grade systems
``None``
* ``GradeSystemSearchSession:`` a session to search grade systems
* ``GradeSystemAdminSession:`` a session to manage grade systems
* ``GradeSystemNotificationSession`` a session for subscribing to
new or deleted grades or grade systems
* ``GradeSystemGradebookSession:`` a session for retrieving grade
system to gradebook mappings
* ``GradeSystemGradebookAssignmentSession:`` a session for
managing grade system to gradebook mappings
* ``GradeSystemSmartGradebookSession:`` a session for managing
smart gradebooks of grade systems
* ``GradeEntryLookupSession:`` a session to look up grade entries
* ``GradeEntryQuerySession:`` a session to query grade entries
``None``
* ``GradeEntrySearchSession:`` a session to search grade entries
* ``GradeEntryAdminSession:`` a session to create, modify and
delete grade entries ``None``
* ``GradeEntryNotificationSession: a`` session to receive messages
pertaining to grade entry ```` changes
* ``GradebookColumnLookupSession:`` a session to look up gradebook
columns
* ``GradebookColumnQuerySession:`` a session to query gradebook
columns ``None``
* ``GradebookColumnSearchSession:`` a session to search gradebook
columns
* ``GradebookColumnAdminSession:`` a session to manage gradebook
columns
* ``GradebookColumnDerivationSession:`` a session to manage
derived gradebook columns
* ``GradebookColumnNotificationSession`` a session for subscribing
to new or deleted gradebook columns
* ``GradebookColumnGradebookSession:`` a session for retrieving
gradebook column to gradebook mappings
* ``GradebookColumnGradebookAssignmentSession:`` a session for
managing gradebook column to gradebook mappings
* ``GradebookColumnSmartGradebookSession:`` a session for managing
smart gradebooks of gradebook columns
* ``GradebookLookupSession:`` a session to lookup gradebooks
* ``GradebookQuerySession:`` a session to query gradebooks
* ``GradebookSearchSession`` : a session to search gradebooks
* ``GradebookAdminSession`` : a session to create, modify and
delete gradebooks
* ``GradebookNotificationSession`` : a session to receive messages
pertaining to gradebook changes
* ``GradebookHierarchySession:`` a session to traverse the
gradebook hierarchy
* ``GradebookHierarchyDesignSession:`` a session to manage the
gradebook hierarchy
"""
def get_grade_system_lookup_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the grade system lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemLookupSession) - a
``GradeSystemLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_lookup()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_system_lookup_session_for_gradebook(self, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the grade system lookup service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemLookupSession) - ``a
GradeSystemLookupSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_system_lookup()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_grade_system_query_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the grade system query service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemQuerySession) - a
``GradeSystemQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_query()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_system_query_session_for_gradebook(self, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the grade system query service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemQuerySession) - ``a
GradeSystemQuerySession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_system_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_grade_system_search_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the grade system search service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemSearchSession) - a
``GradeSystemSearchSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_search()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_search()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_system_search_session_for_gradebook(self, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the grade system search service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemSearchSession) - ``a
GradeSystemSearchSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_system_search()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_grade_system_admin_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the grade system administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemAdminSession) - a
``GradeSystemAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_admin()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_system_admin_session_for_gradebook(self, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the grade system admin service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemAdminSession) - ``a
GradeSystemAdminSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_system_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_grade_system_notification_session(self, grade_system_receiver=None, proxy=None):
"""Gets the notification session for notifications pertaining to grade system changes.
arg: grade_system_receiver
(osid.grading.GradeSystemReceiver): the grade system
receiver
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemNotificationSession) - a
``GradeSystemNotificationSession``
raise: NullArgument - ``grade_system_receiver`` or ``proxy`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_notification()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_notification()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_system_notification_session_for_gradebook(self, grade_system_receiver=None, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the grade system notification service for the given gradebook.
arg: grade_system_receiver
(osid.grading.GradeSystemReceiver): the grade system
receiver
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemNotificationSession) - ``a
_grade_system_notification_session``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``grade_system_receiver, gradebook_id``
or ``porxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_system_notification()``
or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
if grade_system_receiver is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_grade_system_gradebook_session(self, proxy=None):
"""Gets the session for retrieving grade system to gradebook mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemGradebookSession) - a
``GradeSystemGradebookSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_system_gradebook()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_gradebook()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_system_gradebook_assignment_session(self, proxy=None):
"""Gets the session for assigning grade system to gradebook mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemGradebookSession) - a
``GradeSystemGradebookAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_grade_system_gradebook_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_gradebook_assignment()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_system_smart_gradebook_session(self, gradebook_id=None, proxy=None):
"""Gets the session for managing smart gradebooks of grade systems.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeSystemSmartGradebookSession) - a
``GradeSystemSmartGradebookSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_grade_system_smart_gradebook()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_smart_gradebook()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_entry_lookup_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the grade entry lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeEntryLookupSession) - a
``GradeEntryLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_lookup()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_entry_lookup_session_for_gradebook(self, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the grade entry lookup service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeEntryLookupSession) - ``a
GradeEntryLookupSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_entry_lookup()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_grade_entry_query_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the grade entry query service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeEntryQuerySession) - a
``GradeEntryQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_query()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_entry_query_session_for_gradebook(self, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the grade entry query service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeEntryQuerySession) - ``a
GradeEntryQuerySession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_entry_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_grade_entry_search_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the grade entry search service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeEntrySearchSession) - a
``GradeEntrySearchSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_search()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_search()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_entry_search_session_for_gradebook(self, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the grade entry search service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeEntrySearchSession) - ``a
GradeEntrySearchSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_entry_search()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_grade_entry_admin_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the grade entry administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeEntryAdminSession) - a
``GradeEntryAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_entry_admin_session_for_gradebook(self, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the grade entry admin service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeEntryAdminSession) - ``a
GradeEntryAdminSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_entry_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_grade_entry_notification_session(self, grade_entry_receiver=None, proxy=None):
"""Gets the notification session for notifications pertaining to grade entry changes.
arg: grade_entry_receiver (osid.grading.GradeEntryReceiver):
the grade entry receiver
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeEntryNotificationSession) - a
``GradeEntryNotificationSession``
raise: NullArgument - ``grade_entry_receiver`` or ``proxy`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grade_entry_notification()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_notification()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grade_entry_notification_session_for_gradebook(self, grade_entry_receiver=None, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the grade entry notification service for the given gradebook.
arg: grade_entry_receiver (osid.grading.GradeEntryReceiver):
the grade entry receiver
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradeEntryNotificationSession) - ``a
_grade_entry_notification_session``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``grade_entry_receiver, gradebook_id`` or
``porxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_grade_entry_notification()``
or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
if grade_entry_receiver is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_column_lookup_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the gradebook column lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnLookupSession) - a
``GradebookColumnLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_column_lookup()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_lookup()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_column_lookup_session_for_gradebook(self, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the gradebook column lookup service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnLookupSession) - ``a
_gradebook_column_lookup_session``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_gradebook_column_lookup()``
or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_lookup()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_column_query_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the gradebook column query service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnQuerySession) - a
``GradebookColumnQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_column_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_column_query_session_for_gradebook(self, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the gradebook column query service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnQuerySession) - a
``GradebookColumnQuerySession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_gradebook_column_query()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_column_search_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the gradebook column search service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnSearchSession) - a
``GradebookColumnSearchSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_column_search()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_search()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_column_search_session_for_gradebook(self, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the gradebook column search service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnSearchSession) - ``a
_gradebook_column_search_session``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_gradebook_column_search()``
or ``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_search()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_column_admin_session(self, proxy=None):
"""Gets the ``OsidSession`` associated with the gradebook column administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnAdminSession) - a
``GradebookColumnAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_column_admin()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_admin()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_column_admin_session_for_gradebook(self, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the gradebook column admin service for the given gradebook.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnAdminSession) - ``a
GradebookColumnAdminSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented - ``supports_gradebook_column_admin()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_admin()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_id is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_column_notification_session(self, gradebook_column_receiver=None, proxy=None):
"""Gets the notification session for notifications pertaining to gradebook column changes.
arg: gradebook_column_receiver
(osid.grading.GradebookColumnReceiver): the grade system
receiver
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnNotificationSession) - a
``GradebookColumnNotificationSession``
raise: NullArgument - ``gradebook_column_receiver`` or
``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_notification()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_notification()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_column_notification_session_for_gradebook(self, gradebook_column_receiver=None, gradebook_id=None, proxy=None):
"""Gets the ``OsidSession`` associated with the gradebook column notification service for the given gradebook.
arg: gradebook_column_receiver
(osid.grading.GradebookColumnReceiver): the gradebook
column receiver
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnNotificationSession) - ``a
_gradebook_column_notification_session``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_column_receiver,
gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - ``unable to complete request``
raise: Unimplemented -
``supports_gradebook_column_notification()`` or
``supports_visible_federation()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_notification()`` and
``supports_visible_federation()`` are ``true``.*
"""
if gradebook_column_receiver is None or proxy is None:
raise NullArgument
raise Unimplemented()
def get_gradebook_column_gradebook_session(self, proxy=None):
"""Gets the session for retrieving gradebook column to gradebook mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnGradebookSession) - a
``GradebookColumnGradebookSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_gradebook()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_column_gradebook_assignment_session(self, proxy=None):
"""Gets the session for assigning gradebook column to gradebook mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnGradebookAssignmentSession)
- a ``GradebookColumnGradebookAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_gradebook_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook_assignment()`` is
``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_column_smart_gradebook_session(self, gradebook_id=None, proxy=None):
"""Gets the session for managing smart gradebooks of gradebook columns.
arg: gradebook_id (osid.id.Id): the ``Id`` of the gradebook
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnSmartGradebookSession) - a
``GradebookColumnSmartGradebookSession``
raise: NotFound - ``gradebook_id`` not found
raise: NullArgument - ``gradebook_id`` or ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_smart_gradebook()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_smart_gradebook()`` is ``true``.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_lookup_session(self, proxy=None):
"""Gets the OsidSession associated with the gradebook lookup service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookLookupSession) - a
``GradebookLookupSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_lookup() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_lookup()`` is true.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_query_session(self, proxy=None):
"""Gets the OsidSession associated with the gradebook query service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookQuerySession) - a
``GradebookQuerySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_query() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_query()`` is true.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_search_session(self, proxy=None):
"""Gets the OsidSession associated with the gradebook search service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookSearchSession) - a
``GradebookSearchSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_search() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_search()`` is true.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_admin_session(self, proxy=None):
"""Gets the OsidSession associated with the gradebook administration service.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookAdminSession) - a
``GradebookAdminSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_admin() is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_admin()`` is true.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_notification_session(self, gradebook_receiver=None, proxy=None):
"""Gets the notification session for notifications pertaining to gradebook service changes.
arg: gradebook_receiver (osid.grading.GradebookReceiver): the
gradebook receiver
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookNotificationSession) - a
``GradebookNotificationSession``
raise: NullArgument - ``gradebook_receiver`` or ``proxy`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_notification() is
false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_notification()`` is true.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_hierarchy_session(self, proxy=None):
"""Gets the session traversing gradebook hierarchies.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookHierarchySession) - a
``GradebookHierarchySession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_hierarchy() is
false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_hierarchy()`` is true.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_gradebook_hierarchy_design_session(self, proxy=None):
"""Gets the session designing gradebook hierarchies.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookHierarchyDesignSession) - a
``GradebookHierarchyDesignSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_gradebook_hierarchy_design()
is false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_hierarchy_design()`` is true.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
def get_grading_batch_proxy_manager(self):
"""Gets the ``GradingBatchProxyManager``.
return: (osid.grading.batch.GradingBatchProxyManager) - a
``GradingBatchProxyManager``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grading_batch() is false``
*compliance: optional -- This method must be implemented if
``supports_grading_batch()`` is true.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
grading_batch_proxy_manager = property(fget=get_grading_batch_proxy_manager)
def get_grading_calculation_proxy_manager(self):
"""Gets the ``GradingCalculationProxyManager``.
return:
(osid.grading.calculation.GradingCalculationProxyManager
) - a ``GradingCalculationProxyManager``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grading_calculation() is
false``
*compliance: optional -- This method must be implemented if
``supports_grading_calculation()`` is true.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
grading_calculation_proxy_manager = property(fget=get_grading_calculation_proxy_manager)
def get_grading_transform_proxy_manager(self):
"""Gets the ``GradingTransformProxyManager``.
return: (osid.grading.transform.GradingTransformProxyManager) -
a ``GradingTransformManager``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_grading_transform() is
false``
*compliance: optional -- This method must be implemented if
``supports_grading_transform()`` is true.*
"""
if proxy is None:
raise NullArgument()
raise Unimplemented()
grading_transform_proxy_manager = property(fget=get_grading_transform_proxy_manager)
|
import numpy as np
import pytest
from sklearn.utils._weight_vector import (
WeightVector32,
WeightVector64,
)
@pytest.mark.parametrize(
"dtype, WeightVector",
[
(np.float32, WeightVector32),
(np.float64, WeightVector64),
],
)
def test_type_invariance(dtype, WeightVector):
"""Check the `dtype` consistency of `WeightVector`."""
weights = np.random.rand(100).astype(dtype)
average_weights = np.random.rand(100).astype(dtype)
weight_vector = WeightVector(weights, average_weights)
assert np.asarray(weight_vector.w).dtype is np.dtype(dtype)
assert np.asarray(weight_vector.aw).dtype is np.dtype(dtype)
|
import argparse
args = argparse.ArgumentParser(description='MolGAN model for molecular.')
args.add_argument('--device', default=1)
args.add_argument("--mode", default="train", help='mode for model, default is true')
args.add_argument('--learning_rate', default=1e-3, help='learning rate')
args.add_argument('--batch_dim', default=128, help='size of a batch')
args.add_argument('--la', default=1.0, type=float)
args.add_argument('--dropout_rate', default=0.)
args.add_argument('--z_dim', default=8, type=int, help='the sample dim')
args.add_argument('--epochs', default=10, type=int)
args.add_argument('--temperature', default=1.0, type=float)
args.add_argument('--n_critic', default=5, type=int, help='the ratio of train discriminator and generator' )
# parser = argparse.ArgumentParser(description='Process some integers.')
args.add_argument('--dataset', default='../data/', help='the data path')
# args.add_argument('--model', default='wavelet_gcn', help='model name')
args.add_argument('--max_atom_num', default=29)
args = args.parse_args()
print(args)
# batch_dim = 128
# la = 1
# dropout = 0
# n_critic = 5
# metric = 'validity,sas'
# n_samples = 5000
# z_dim = 8
# epochs = 10
# save_every = 1 # May lead to errors if left as None
|
"""Parse arguments in python-style
NOTE:
1. You should acknownledge the usage of argparse module of python first.
2. Shell var '$' symbol can be used in 'default', if the order of arguments are right. e.g.
in the following example:
in the default of --output, '$input' will be replaced by <input>;
but note that you can't refer to '$output' in input parser, where
the '$output' is not initialized yet.
3. The line
'opts=$(python utils/parseopt.py $0 $*) && eval $opts || exit 1' in
shell script cannot be replace to
'eval $(python utils/parseopt.py $0 $*) || exit 1'
the latter would produce some unexpected prompt with 'example.sh -h'
4. To flag the start of parser, following statements are all allowed:
4.1 <<"PARSER" 4.2 <<'PARSER' 4.3 <<PARSER 4.4 << "PARSER" ...
Usage: in shell script
example.sh:
<<"PARSER"
("input", type=str, help="Input file.")
("-o", "--output", type=str, default="${input}_out",
help="Output file. Default: <input>_out")
PARSER
opts=$(python parseopt.py $0 $*) && eval $opts || exit 1
"""
import re
import sys
import argparse
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write(
"This script is used to parse options for shell script.\n"
f"... read header of {sys.argv[0]} for the usage.\n")
sys.exit(1)
script = sys.argv[1]
argsin = sys.argv[2:]
# match lines in '<<PARSER' in 'PARSER'
parser_pattern = re.compile(
r"^<<\s*(?:\"PARSER\"|'PARSER'|PARSER)\s*$((?:.|\n)*?)^\s*PARSER\s*$",
re.MULTILINE)
# split lines via brackets
argument_pattern = re.compile(r"^[(]((?:.|\n)*?)[)]$", re.MULTILINE)
with open(script, 'r') as fi:
s = fi.read()
parserinfo = parser_pattern.findall(s)
match = argument_pattern.findall(parserinfo[0])
parser = argparse.ArgumentParser(prog=script)
for arg in match:
eval(f"parser.add_argument({arg})")
try:
for arg, value in vars(parser.parse_args(argsin)).items():
if isinstance(value, list):
# deal with nargs='+' and nargs='*'
value = f"\"{' '.join([str(x) for x in value])}\""
sys.stdout.write(f"export {arg}={value}; ")
except SystemExit as se:
# re-locate the help information to error
if se.code == 0:
parser.print_help(sys.stderr)
sys.exit(1)
else:
sys.exit(0)
|
import json
import glob, os
import dash
import datetime
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
from django_plotly_dash import DjangoDash
from django.apps import apps
import plotly.graph_objects as go
from Lab_Misc.General import *
from dbfread import DBF
import pandas as pd
from django.http import HttpResponse
from django.utils import timezone
import numpy as np
import time
from Exp_Main.models import SEL
from Exp_Sub.models import LSP
from plotly.subplots import make_subplots
from Lab_Misc import General
def conv(x):
return x.replace(',', '.').encode()
def Gen_dash(dash_name):
class Gen_fig():
def load_data(self, target_id):
try:
entry = SEL.objects.get(id = target_id)
self.entry = entry
file = os.path.join( rel_path, entry.Link_XLSX)
df = pd.read_excel(file, 'Tabelle1')
new_vals = df[df>1]/1000000#correct for wrong format
Curr_Dash = self.entry.Dash
df.update(new_vals)
self.data = df
self.data["Time (min.)"] = Curr_Dash.Start_datetime_elli + pd.TimedeltaIndex(self.data["Time (min.)"], unit='m')
self.data["Time (min.)"] = self.data["Time (min.)"].dt.tz_convert(timezone.get_current_timezone())
return_str = 'The following data could be loaded: Ellisometry'
self.sub_data, return_str_sub = self.get_subs()
return_str += return_str_sub
os.chdir(cwd)
return True, return_str
except:
return False, 'No data found!'
def has_sub(self):
try:
#Sub_Exp = self.entry.Sub_Exp.all()
#print(Sub_Exp)
#self.get_subs()
return True
except:
return False
def get_subs(self):
Sub_Exps = self.entry.Sub_Exp.all()
Sub_Exps_dic = {}
return_str = ''
for Sub_Exp in Sub_Exps:
Device = Sub_Exp.Device
model = apps.get_model('Exp_Sub', str(Device.Abbrev))
Exp_in_Model = model.objects.get(id = Sub_Exp.id)
if Device.Abbrev == 'MFL':
Gas = Exp_in_Model.Gas.first()
if Gas.Name == 'H2O':
MFL_H2O_data = self.get_sub_csv(Exp_in_Model)
MFL_H2O_data['Date_Time'] = pd.to_datetime(MFL_H2O_data['Date_Time'], format='%d.%m.%Y %H:%M:%S', errors="coerce")
MFL_H2O_data['Date_Time'] = MFL_H2O_data['Date_Time'].dt.tz_localize(timezone.get_current_timezone())
Sub_Exps_dic.update(MFL_H2O_data = MFL_H2O_data)
return_str += ', massflow of water stream'
if Gas.Name == 'N2':
MFL_N2_data = self.get_sub_csv(Exp_in_Model)
MFL_N2_data['Date_Time'] = pd.to_datetime(MFL_N2_data['Date_Time'], format='%d.%m.%Y %H:%M:%S', errors="coerce")
MFL_N2_data['Date_Time'] = MFL_N2_data['Date_Time'].dt.tz_localize(timezone.get_current_timezone())
Sub_Exps_dic.update(MFL_N2_data = MFL_N2_data)
return_str += ', massflow of nitrogen stream'
if Device.Abbrev == 'HME':
for pos_env in Exp_in_Model.PossibleEnvironments:
if pos_env[0] == Exp_in_Model.Environments:
if pos_env[1] == 'Cell':
Humidity_data = self.get_sub_dbf(Exp_in_Model)
Humidity_data['UHRZEIT'] = pd.to_datetime(Humidity_data['DATUM'] + Humidity_data['UHRZEIT'], format='%d.%m.%Y %H:%M:%S', errors="coerce")
Humidity_data['UHRZEIT'] = Humidity_data['UHRZEIT'].dt.tz_localize(timezone.get_current_timezone())
Sub_Exps_dic.update(HME_data = Humidity_data)
return_str += ', humidity measurements'
return Sub_Exps_dic, return_str
def get_sub_dbf(self, model):
file = os.path.join( rel_path, model.Link)
table = DBF(file, load=True)
df = pd.DataFrame(iter(table))
return df
def get_sub_csv(self, model):
file = os.path.join( rel_path, model.Link)
#file_name = file[get_LastIndex(file, '\\')+1:get_LastIndex(file, '.')]
df = pd.read_csv(file, sep=';', error_bad_lines=False, decimal = ',', parse_dates=[['Date', 'Time']])#skips bad lines
return df
def slice_data(self, data):
DashTab = self.entry.Dash
if isinstance(DashTab.CA_high_degree, float):
slice_CA_high = (data['CA_L']<DashTab.CA_high_degree) & (data['CA_R']<DashTab.CA_high_degree)
data = data[slice_CA_high]
if isinstance(DashTab.CA_low_degree, float):
slice_CA_low = (data['CA_L']>DashTab.CA_low_degree) & (data['CA_R']>DashTab.CA_low_degree)
data = data[slice_CA_low]
if isinstance(DashTab.BD_high_mm, float):
slice_BD = (data['BI_left']<DashTab.BD_high_mm) & (data['BI_right']<DashTab.BD_high_mm)
data = data[slice_BD]
if isinstance(DashTab.BD_low_mm, float):
slice_BD = (data['BI_left']>DashTab.BD_low_mm) & (data['BI_right']>DashTab.BD_low_mm)
data = data[slice_BD]
if isinstance(DashTab.Time_high_sec, float):
slice_time = data['Age']<DashTab.Time_high_sec
data = data[slice_time]
if isinstance(DashTab.Time_low_sec, float):
slice_time = data['Age']>DashTab.Time_low_sec
data = data[slice_time]
return data
def CA_Time(self):
fig = go.Figure()
fig.add_trace(go.Scattergl(x=self.data['Time (min.)'], y=self.data['Thickness # 3'],
mode='markers',
name='CA left')
)
return fig
def CA_BD(self):
fig = go.Figure()
fig.add_trace(go.Scattergl(x=self.data['BD'], y=self.data['CA_L'],
mode='markers',
name='CA left')
)
fig.add_trace(go.Scattergl(x=self.data['BD'], y=self.data['CA_R'],
mode='markers',
name='CA right')
)
fig.add_trace(go.Scattergl(x=self.data['BD'], y=self.data['CA_M'],
mode='markers',
name='CA average')
)
fig.update_layout( xaxis_title='Base diameter [mm]',
yaxis_title='Contact angle [°]')
return fig
def Plot_3(self):
Humidity_data = self.sub_data['HME_data']
start_date = self.data['Time (min.)'][0]
end_date = self.data['Time (min.)'][len(self.data)-1]
date = start_date
time_step = datetime.timedelta(seconds = 15)
thickness = []
humidities = []
times = []
while(date<end_date):
thicknes = np.mean(self.data['Thickness # 3'][(self.data['Time (min.)']<date+time_step)&(self.data['Time (min.)']>date)])
thickness.append(thicknes)
humidity = np.mean(Humidity_data['CHN1RH'][(Humidity_data['UHRZEIT']<date+time_step)&(Humidity_data['UHRZEIT']>date)])
humidities.append(humidity)
times.append(date)
date = date + time_step
self.cal_dat = pd.DataFrame(times, columns = ['times'])
self.cal_dat["times"] = self.cal_dat["times"].dt.tz_convert('UTC')#time already shifted
self.cal_dat ['thickness'] = thickness
self.cal_dat ['humidities'] = humidities
def get_figure(df, x_col, y_col, selectedpoints, selectedpoints_local):
if selectedpoints_local and selectedpoints_local['range']:
ranges = selectedpoints_local['range']
selection_bounds = {'x0': ranges['x'][0], 'x1': ranges['x'][1],
'y0': ranges['y'][0], 'y1': ranges['y'][1]}
else:
selection_bounds = {'x0': np.min(df[x_col]), 'x1': np.max(df[x_col]),
'y0': np.min(df[y_col]), 'y1': np.max(df[y_col])}
# set which points are selected with the `selectedpoints` property
# and style those points with the `selected` and `unselected`
# attribute. see
# https://medium.com/@plotlygraphs/notes-from-the-latest-plotly-js-release-b035a5b43e21
# for an explanation
fig = px.scatter(df, x=df[x_col], y=df[y_col], text=df.index)
fig.update_traces(selectedpoints=selectedpoints,
customdata=df.index,
mode='markers+text', marker={ 'color': 'rgba(0, 116, 217, 0.7)', 'size': 5 }, unselected={'marker': { 'color': 'rgba(200, 116, 0, 0.1)', 'size': 5 }, 'textfont': { 'color': 'rgba(0, 0, 0, 0)' }})
fig.update_layout(margin={'l': 20, 'r': 0, 'b': 15, 't': 5}, dragmode='select', hovermode=False)
fig.add_shape(dict({'type': 'rect',
'line': { 'width': 1, 'dash': 'dot', 'color': 'darkgrey' }},
**selection_bounds))
return fig
value = 'temp'
app = DjangoDash(name=dash_name, id='target_id')
cwd = os.getcwd()
rel_path = General.get_BasePath()
GenFig = Gen_fig()
fig = {
'data': [{
'y': [1]
}],
'layout': {
'height': 800
}
}
app.layout = html.Div(children=[
html.Div([
html.Div(
dcc.Graph(id='g1', config={'displayModeBar': True}),
className='four columns',
style={'width': '33%', 'display': 'inline-block'},
),
html.Div(
dcc.Graph(id='g2', config={'displayModeBar': True}),
className='four columns',
style={'width': '33%', 'display': 'inline-block'},
),
html.Div(
dcc.Graph(id='g3', config={'displayModeBar': True}),
className='four columns',
style={'width': '33%', 'display': 'inline-block'},
)
], style={"display": "block"}, className='row'),
dcc.Input(id='target_id', type='hidden', value='1'),
html.Button('Load data', id='Load_Data'),
dcc.Loading(
id="loading",
children=[html.Div([html.Div(id="loading-output")])],
type="default",
),
])
@app.callback(
Output("loading-output", "children"),
[Input('Load_Data', 'n_clicks'),
Input('target_id', 'value'),])
def update_output(n_clicks, target_id, *args,**kwargs):
data_was_loaded, return_str = GenFig.load_data(target_id)
if data_was_loaded:
return_str += '\n Select the desired plot at the dropdown.'
GenFig.Plot_3()
return return_str
# this callback defines 3 figures
# as a function of the intersection of their 3 selections
@app.callback(
[Output('g1', 'figure'),
Output('g2', 'figure'),
Output('g3', 'figure')],
[Input('target_id', 'value'),
Input('g1', 'selectedData'),
Input('g2', 'selectedData'),
Input('g3', 'selectedData')]
)
def callback(target_id, selection1, selection2, selection3):
no_data = True
while no_data:
try:
GenFig.cal_dat
no_data = False
except:
time.sleep(1)
selectedpoints = GenFig.cal_dat.index
for selected_data in [selection1, selection2, selection3]:
if selected_data and selected_data['points']:
selectedpoints = np.intersect1d(selectedpoints,
[p['customdata'] for p in selected_data['points']])
return [get_figure(GenFig.cal_dat, "times", "thickness", selectedpoints, selection1),
get_figure(GenFig.cal_dat, "times", "humidities", selectedpoints, selection2),
get_figure(GenFig.cal_dat, "humidities", "thickness", selectedpoints, selection3)] |
import csv
import main
diseaseCode=[]
diseaseName=[]
symptomCode=[]
symptomName=[]
tempCode=[]
tempName=[]
# including diseaseName and diseaseCode to the database
disease,occurances,symptoms = main.funct()
for i in range(len(disease)):
diseaseCode.append(disease[i][5:13])
diseaseName.append(disease[i][14:])
for j in range(len(symptoms[i])):
tempCode.append(symptoms[i][j][5:13])
tempName.append(symptoms[i][j][14:])
symptomCode.append(tempCode)
symptomName.append(tempName)
tempName=[]
tempCode=[]
with open('data.csv',mode='w') as data:
fieldnames=["DiseaseCode","DiseaseName","Occurances","SymptomsCode","SymptomsName"]
writer=csv.DictWriter(data,fieldnames=fieldnames)
writer.writeheader()
for i in range(1,len(diseaseName)-1):
for j in range(len(symptoms[i])):
writer.writerow({'DiseaseCode':diseaseCode[i],'DiseaseName':diseaseName[i],'Occurances':occurances[i],'SymptomsCode':symptomCode[i][j],'SymptomsName':symptomName[i][j]}) |
from repos.models import Repo
from rest_framework import viewsets, permissions
from .serializers import RepoSerializer
# Repo Viewset
class RepoViewSet(viewsets.ModelViewSet):
queryset = Repo.objects.all()
permission_classes = [
permissions.AllowAny
]
serializer_class = RepoSerializer |
# python_version >= '3.7'
#: Okay
async with expr as Γ:
pass
#: N816
async with expr as γΓ:
pass
#: Okay
async for Γ1 in iterator:
pass
#: N816
async for γΓ1 in iterator:
pass
|
from minitf import kernel as K
class Tensor(object):
def __init__(self, value):
# TODO: cast to numpy array
self._value = get_val(value)
def __neg__(self): return K.negative(self)
def __add__(self, other): return K.add(self, other)
def __sub__(self, other): return K.subtract(self, other)
def __mul__(self, other): return K.multiply(self, other)
def __truediv__(self, other): return K.divide(self, other)
def __radd__(self, other): return K.add(other, self)
def __rsub__(self, other): return K.subtract(other, self)
def __rmul__(self, other): return K.multiply(other, self)
def __rtruediv__(self, other): return K.divide(other, self)
def __eq__(self, other): return K.equal(self, other)
def __ne__(self, other): return K.not_equal(self, other)
def __gt__(self, other): return K.greater(self, other)
def __ge__(self, other): return K.greater_equal(self, other)
def __lt__(self, other): return K.less(self, other)
def __le__(self, other): return K.less_equal(self, other)
def __hash__(self): return id(self)
def __str__(self):
numpy_obj = self.numpy()
return "tf.Tensor(id=%s, shape=%s, dtype=%s, numpy=%s)" % (
id(self), K.shape(numpy_obj), self.dtype, str(numpy_obj))
def __repr__(self):
numpy_obj = self.numpy()
return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % (
id(self), K.shape(numpy_obj), self.dtype, numpy_obj)
def numpy(self): return K.asnumpy(self._value)
@property
def data(self): return self._value
@property
def dtype(self): return getattr(self._value, 'dtype', None)
def is_tensor(x):
return isinstance(x, Tensor)
def get_val(x):
return x.data if is_tensor(x) else x |
import enum
import inspect
import pycspr
def _has_class(mod, cls):
"""Asserts that a container exposes a class.
"""
_has_member(mod, cls)
assert inspect.isclass(getattr(mod, cls)), '{} is not a class'.format(cls)
def _has_constant(mod, constant):
"""Asserts that a container exposes a constant.
"""
_has_member(mod, constant)
def _has_enum(mod, enm):
"""Asserts that a container exposes an enumeration.
"""
_has_member(mod, enm)
assert issubclass(getattr(mod, enm), enum.Enum), '{} is not an enum'.format(enm)
def _has_exception(mod, err):
"""Asserts that a container exposes an exception.
"""
_has_class(mod, err)
assert issubclass(getattr(mod, err), Exception), \
'Exception type does not inherit from builtin Exception class.'
def _has_function(mod, func):
"""Asserts that a container exposes a function.
"""
_has_member(mod, func)
assert inspect.isfunction(getattr(mod, func)), '{} is not a function'.format(func)
def _has_member(mod, member):
"""Asserts that a module exposes a member.
"""
assert inspect.ismodule(mod)
assert hasattr(mod, member), 'Missing member: {}'.format(member)
# Expected interface.
_INTERFACE_OF_LIBRARY = {
_has_class: {
"NodeClient",
"NodeConnectionInfo",
},
_has_enum: {
"NodeSseChannelType",
"NodeSseEventType",
"HashAlgorithm",
"KeyAlgorithm",
},
_has_constant: set(),
_has_exception: set(),
_has_function: {
"create_deploy",
"create_deploy_approval",
"create_deploy_parameters",
"create_deploy_argument",
"create_standard_payment",
"create_native_transfer",
"create_validator_auction_bid",
"create_validator_auction_bid_withdrawal",
"create_validator_delegation",
"create_validator_delegation_withdrawal",
"parse_public_key",
"parse_private_key",
"get_account_hash",
"get_account_key",
"get_account_key_algo",
"read_deploy",
"read_wasm",
"write_deploy",
},
_has_member: {
"crypto",
"factory",
"serialisation",
"types",
}
}
# Expected interface of factory methods.
_INTERFACE_OF_FACTORY = {
_has_member: {
"accounts",
"cl",
"deploys",
},
}
def test_version_of_library():
assert pycspr.__version__ == "0.7.5"
def test_exports_of_library():
for assertor, members in _INTERFACE_OF_LIBRARY.items():
for member in members:
assertor(pycspr, member)
def test_exports_of_factory():
_test_exports(pycspr.factory, _INTERFACE_OF_FACTORY)
def _test_exports(module, interface):
for assertor, members in interface.items():
for member in members:
assertor(module, member)
|
#Desenvolva um programa que leia as duas notas de um aluno, calcule e mostre sua media.
#nota1=float(input('digite sua primeira nota: '))
#nota2=float(input('digite sua segunda nota: '))
#media=(nota1+nota2)/2
#print('sua media é igual a: {}'.format(media))
#opcional: desenvolba um programa que leia as quatro notas de um aluno, calcule sua media e imprima na tela se a media final
#for maior igual a 7 "aprovado", se nao "reprovado"
nota1=float(input('digite sua primeira nota:'))
nota2=float(input('digite sua segunda nota: '))
nota3=float(input('digite sua terceira nota:'))
nota4=float(input('digite sua quarta nota:'))
media=(nota1+nota2+nota3+nota4)/4
if media>=7:
print('sua media é igual a: {} aprovado!!!'.format(media))
else:
print('sua media é igual a: {} reprovado!!!'.format(media)) |
def closest_row(dataframe, column, value):
"""
Function which takes a dataframe and returns the row that is closest to the specified value of the specified column.
:param dataframe: Dataframe object
:param column: String which matches to a column in the dataframe in which you would like to find the closest value of.
:param value: Value to find the closest row to.
:return: Returns row that is closest to the value of the selected column of the dataframe
"""
sort = dataframe.iloc[(dataframe[column]-value).abs().argsort()[:1]]
return sort
|
#! /usr/bin/env python3
# std imports
import logging
import typing as T
import pathlib
import functools
import glob
import fnmatch
from pyscooper.tex_utils import sanitize_tex
from pyscooper import deps
from pyscooper.cli_utils import debug, info, warning, error
class TOCFile:
def __init__(self, filepath: pathlib.Path, keypath: T.Tuple[str], ext_key: T.Optional[str] = None):
self.filepath = filepath
self.ext_key = ext_key if ext_key is not None else ext_match(filepath)
self.keypath = keypath # To the PARENT dir!
def __repr__(self):
return (f"<{self.__class__}"
f" filepath={self.filepath}"
f", ext_key={self.ext_key}"
f", keypath={self.keypath}"
">")
def sanitize_path(in_str: T.Union[pathlib.Path, str], ) -> str:
return (str(in_str)
.replace(' ', r'\space ')
)
def parametrized(dec):
"""This decorator can be used to create other decorators that accept arguments"""
def layer(*args, **kwargs):
def repl(f):
return dec(f, *args, **kwargs)
return repl
return layer
@parametrized
def tex_centering(fcn, add_pagebreak: bool = False):
def wrapper(p):
out_str = "\n".join(
["",
r"\begin{centering}",
r" \vspace*{\fill}",
f" {fcn(p)}",
r" \vspace*{\fill}",
r"\end{centering}",
"",
]
)
if add_pagebreak:
out_str += "\n" + r"\pagebreak" + "\n"
return out_str
return wrapper
def verbatim(fcn):
def wrapper(p):
return "\n".join(
["",
r"\begin{verbatim}",
f"{fcn(p)}",
r"\end{verbatim}",
"",
]
)
return wrapper
def blank_pad(fcn):
@functools.wraps(fcn)
def wrapper(*args, **kwargs):
pad = '\n' * 2
return f"{pad}{fcn(*args, **kwargs)}{pad}"
return wrapper
def centering(fcn):
@functools.wraps(fcn)
def wrapper(*args, **kwargs):
return "\n" + r"\begin{centering}" + f"\n{fcn(*args, **kwargs)}\n" + r"\end{centering}" + "\n"
return wrapper
def vspace(fcn):
@functools.wraps(fcn)
def wrapper(*args, **kwargs):
return "\n\t" + r"\vspace*{\fill}" + f"\n{fcn(*args, **kwargs)}\n\t" + r"\vspace*{\fill}" + "\n"
return wrapper
def pagebreak_after(fcn):
@functools.wraps(fcn)
def wrapper(*args, **kwargs):
return f"{fcn(*args, **kwargs)}\n" + r"\pagebreak" + "\n"
return wrapper
@blank_pad
@pagebreak_after
@centering
@vspace
def scoop_img(file: pathlib.Path, width: None = None, ) -> str:
return r'\includegraphics[width=\linewidth]{' + sanitize_path(file.absolute()) + r'}'
@blank_pad
@pagebreak_after
@centering
@verbatim
def scoop_text(file: pathlib.Path) -> str:
with open(file, 'r') as fp:
contents = sanitize_tex('\n'.join(fp.readlines()))
# Ensure the encodings are OK
# encoding = 'ascii' # Safe for TeX
encoding = 'utf-8' # Should work... might be riskier for TeX
contents = contents.encode(encoding, 'ignore').decode(encoding)
return contents
@blank_pad
def scoop_pdf(file: pathlib.Path, ) -> str:
return r'\includepdf[pages=-,pagecommand={},width=\linewidth]{' + sanitize_path(file.absolute()) + r'}'
@blank_pad
@pagebreak_after
@centering
def scoop_vid(file: pathlib.Path) -> str:
return "TODO"
@blank_pad
@pagebreak_after
@centering
def scoop_song(file: pathlib.Path) -> str:
return "TODO"
@blank_pad
@pagebreak_after
@centering
def scoop_3d(file: pathlib.Path) -> str:
return "TODO "
@blank_pad
@pagebreak_after
@centering
@vspace
def pandas_scoop_csv(file: pathlib.Path) -> str:
import pandas as pd
df = pd.read_csv(file,
delim_whitespace=True,
skipinitialspace=True,
)
# delimiter=','
# sep=','
return df.to_latex()
pandas_scoop_tsv = pandas_scoop_csv
# Minted scoopers -> All the same with different types
def scoop_minted_fcn(lexer: str) -> T.Callable[[pathlib.Path], str]:
@blank_pad
@pagebreak_after
def wrapped(file: pathlib.Path, ) -> str:
return r'\inputminted{' + str(lexer) + r"}{" + sanitize_path(file.absolute()) + r'}'
return wrapped
EXT_MAP = {
'*.jpg': scoop_img,
'*.jpeg': scoop_img,
'*.png': scoop_img,
'*.txt': scoop_text,
'*.log': scoop_text,
# '*.svg': 'todo',
# '*.eps': 'todo',
'*.pdf': scoop_pdf,
# '*.mp4': scoop_vid,
# '*.gif': 'todo',
# '*.mp3': scoop_song,
}
MINTED_LEXERS = dict()
MINTED_EXTS = set()
if deps.PYGMENTIZE_OK:
# deps.get_pygmentize_lexers
unique_ext2lexer = {
'*.abap': 'abap',
'*.abnf': 'abnf',
'*.ada': 'ada',
'*.adb': 'ada',
'*.ads': 'ada',
'*.adl': 'adl',
'*.adlf': 'adl',
'*.adls': 'adl',
'*.adlx': 'adl',
'*.agda': 'agda',
'*.aheui': 'aheui',
'*.als': 'alloy',
'*.at': 'ambienttalk',
'*.isa': 'amdgpu',
'*.run': 'ampl',
'*.ans': 'ansys',
'.htaccess': 'apacheconf',
'apache.conf': 'apacheconf',
'apache2.conf': 'apacheconf',
'*.apl': 'apl',
'*.aplc': 'apl',
'*.aplf': 'apl',
'*.apli': 'apl',
'*.apln': 'apl',
'*.aplo': 'apl',
'*.dyalog': 'apl',
'*.applescript': 'applescript',
'*.ino': 'arduino',
'*.arw': 'arrow',
'*.aj': 'aspectj',
'*.asy': 'asymptote',
'*.aug': 'augeas',
'*.ahk': 'autohotkey',
'*.ahkl': 'autohotkey',
'*.au3': 'autoit',
'*.awk': 'awk',
'*.bare': 'bare',
'*.bash': 'bash',
'*.ebuild': 'bash',
'*.eclass': 'bash',
'*.exheres-0': 'bash',
'*.exlib': 'bash',
'*.ksh': 'bash',
'*.sh': 'bash',
'*.zsh': 'bash',
'.bash_*': 'bash',
'.bashrc': 'bash',
'.zshrc': 'bash',
'bash_*': 'bash',
'bashrc': 'bash',
'pkgbuild': 'bash',
'zshrc': 'bash',
'*.bat': 'batch',
'*.cmd': 'batch',
'*.bbc': 'bbcbasic',
'*.bc': 'bc',
'*.befunge': 'befunge',
'*.bib': 'bibtex',
'*.bb': 'blitzbasic',
'*.decls': 'blitzbasic',
'*.bmx': 'blitzmax',
'*.bnf': 'bnf',
'*.boa': 'boa',
'*.boo': 'boo',
'*.bpl': 'boogie',
'*.bf': 'brainfuck',
'*.bst': 'bst',
'*.c-objdump': 'c-objdump',
'*.idc': 'c',
'*.cadl': 'cadl',
'*.camkes': 'camkes',
'*.idl4': 'camkes',
'*.cdl': 'capdl',
'*.capnp': 'capnp',
'*.cddl': 'cddl',
'*.ceylon': 'ceylon',
'*.cfc': 'cfc',
'*.cf': 'cfengine3',
'*.cfm': 'cfm',
'*.cfml': 'cfm',
'*.chai': 'chaiscript',
'*.chpl': 'chapel',
'*.ci': 'charmci',
'*.spt': 'cheetah',
'*.tmpl': 'cheetah',
'*.cirru': 'cirru',
'*.clay': 'clay',
'*.dcl': 'clean',
'*.icl': 'clean',
'*.clj': 'clojure',
'*.cljs': 'clojurescript',
'*.cmake': 'cmake',
'cmakelists.txt': 'cmake',
'*.cob': 'cobol',
'*.cpy': 'cobol',
'*.cbl': 'cobolfree',
'*.coffee': 'coffeescript',
'*.cl': 'common-lisp',
'*.lisp': 'common-lisp',
'*.cps': 'componentpascal',
'*.sh-session': 'console',
'*.shell-session': 'console',
'*.c++': 'cpp',
'*.cc': 'cpp',
'*.cpp': 'cpp',
'*.cxx': 'cpp',
'*.h++': 'cpp',
'*.hpp': 'cpp',
'*.hxx': 'cpp',
'*.c++-objdump': 'cpp-objdump',
'*.cpp-objdump': 'cpp-objdump',
'*.cxx-objdump': 'cpp-objdump',
'*.cpsa': 'cpsa',
'*.cr': 'cr',
'*.crmsh': 'crmsh',
'*.pcmk': 'crmsh',
'*.croc': 'croc',
'*.cry': 'cryptol',
'*.cs': 'csharp',
'*.orc': 'csound',
'*.udo': 'csound',
'*.csd': 'csound-document',
'*.sco': 'csound-score',
'*.css.in': 'css+mozpreproc',
'*.css': 'css',
'*.cu': 'cuda',
'*.cuh': 'cuda',
'*.cyp': 'cypher',
'*.cypher': 'cypher',
'*.pxd': 'cython',
'*.pxi': 'cython',
'*.pyx': 'cython',
'*.d-objdump': 'd-objdump',
'*.d': 'd',
'*.di': 'd',
'*.dart': 'dart',
'*.dasm': 'dasm16',
'*.dasm16': 'dasm16',
'control': 'debcontrol',
'sources.list': 'debsources',
'*.dpr': 'delphi',
'*.pas': 'delphi',
'*.dts': 'devicetree',
'*.dtsi': 'devicetree',
'*.dg': 'dg',
'*.diff': 'diff',
'*.patch': 'diff',
'*.docker': 'docker',
'dockerfile': 'docker',
'*.darcspatch': 'dpatch',
'*.dpatch': 'dpatch',
'*.dtd': 'dtd',
'*.duel': 'duel',
'*.jbst': 'duel',
'*.dylan-console': 'dylan-console',
'*.hdp': 'dylan-lid',
'*.lid': 'dylan-lid',
'*.dyl': 'dylan',
'*.dylan': 'dylan',
'*.intr': 'dylan',
'*.eg': 'earl-grey',
'*.ezt': 'easytrieve',
'*.mac': 'easytrieve',
'*.ebnf': 'ebnf',
'*.ec': 'ec',
'*.eh': 'ec',
'*.e': 'eiffel',
'*.eex': 'elixir',
'*.ex': 'elixir',
'*.exs': 'elixir',
'*.leex': 'elixir',
'*.elm': 'elm',
'*.el': 'emacs-lisp',
'*.eml': 'email',
'*.erl-sh': 'erl',
'*.erl': 'erlang',
'*.es': 'erlang',
'*.escript': 'erlang',
'*.hrl': 'erlang',
'*.evoque': 'evoque',
'*.exec': 'execline',
'*.xtm': 'extempore',
'*.factor': 'factor',
'*.fan': 'fan',
'*.fancypack': 'fancy',
'*.fy': 'fancy',
'*.flx': 'felix',
'*.flxh': 'felix',
'*.fnl': 'fennel',
'*.fish': 'fish',
'*.load': 'fish',
'*.flo': 'floscript',
'*.frt': 'forth',
'*.f03': 'fortran',
'*.f90': 'fortran',
'*.f': 'fortranfixed',
'*.prg': 'foxpro',
'*.edp': 'freefem',
'*.fsi': 'fsharp',
'*.fst': 'fstar',
'*.fsti': 'fstar',
'*.fut': 'futhark',
'*.gap': 'gap',
'*.gi': 'gap',
'*.gcode': 'gcode',
'*.kid': 'genshi',
'*.feature': 'gherkin',
'*.frag': 'glsl',
'*.geo': 'glsl',
'*.vert': 'glsl',
'*.plot': 'gnuplot',
'*.plt': 'gnuplot',
'*.go': 'go',
'*.golo': 'golo',
'*.gdc': 'gooddata-cl',
'*.gs': 'gosu',
'*.gsp': 'gosu',
'*.gsx': 'gosu',
'*.vark': 'gosu',
'*.dot': 'graphviz',
'*.gv': 'graphviz',
'*.[1234567]': 'groff',
'*.man': 'groff',
'*.gradle': 'groovy',
'*.groovy': 'groovy',
'*.gst': 'gst',
'*.haml': 'haml',
'*.hs': 'haskell',
'*.hx': 'haxe',
'*.hxsl': 'haxe',
'*.hxml': 'haxeml',
'*.hlsl': 'hlsl',
'*.hlsli': 'hlsl',
'*.hsail': 'hsail',
'*.handlebars': 'html+handlebars',
'*.hbs': 'html+handlebars',
'*.ng2': 'html+ng2',
'*.phtml': 'html+php',
'*.twig': 'html+twig',
'*.htm': 'html',
'*.xhtml': 'html',
'*.hyb': 'hybris',
'*.i6t': 'i6t',
'*.icon': 'icon',
'*.idr': 'idris',
'*.ipf': 'igor',
'*.i7x': 'inform7',
'*.ni': 'inform7',
'*.cfg': 'ini',
'*.ini': 'ini',
'*.io': 'io',
'*.ik': 'ioke',
'*.weechatlog': 'irc',
'*.thy': 'isabelle',
'*.ijs': 'j',
'*.jag': 'jags',
'*.java': 'java',
'*.js.in': 'javascript+mozpreproc',
'*.cjs': 'javascript',
'*.js': 'javascript',
'*.jsm': 'javascript',
'*.mjs': 'javascript',
'*.jcl': 'jcl',
'*.jsgf': 'jsgf',
'*.json': 'json',
'pipfile.lock': 'json',
'*.jsonld': 'jsonld',
'*.jsp': 'jsp',
'*.jl': 'julia',
'*.juttle': 'juttle',
'*.kal': 'kal',
'*config.in*': 'kconfig',
'external.in*': 'kconfig',
'kconfig*': 'kconfig',
'standard-modules.in': 'kconfig',
'*.dmesg': 'kmsg',
'*.kmsg': 'kmsg',
'*.kk': 'koka',
'*.kki': 'koka',
'*.kt': 'kotlin',
'*.kts': 'kotlin',
'*.kn': 'kuin',
'*.lasso': 'lasso',
'*.lasso[89]': 'lasso',
'*.lean': 'lean',
'*.less': 'less',
'lighttpd.conf': 'lighttpd',
'*.liquid': 'liquid',
'*.lagda': 'literate-agda',
'*.lcry': 'literate-cryptol',
'*.lhs': 'literate-haskell',
'*.lidr': 'literate-idris',
'*.ls': 'livescript',
'*.mir': 'llvm-mir',
'*.ll': 'llvm',
'*.x': 'logos',
'*.xi': 'logos',
'*.xm': 'logos',
'*.xmi': 'logos',
'*.lgt': 'logtalk',
'*.logtalk': 'logtalk',
'*.lsl': 'lsl',
'*.lua': 'lua',
'*.wlua': 'lua',
'*.mak': 'make',
'*.mk': 'make',
'gnumakefile': 'make',
'makefile': 'make',
'makefile.*': 'make',
'*.mao': 'mako',
'*.maql': 'maql',
'*.markdown': 'markdown',
'*.md': 'markdown',
'*.mask': 'mask',
'*.mc': 'mason',
'*.mhtml': 'mason',
'*.mi': 'mason',
'autohandler': 'mason',
'dhandler': 'mason',
'*.cdf': 'mathematica',
'*.ma': 'mathematica',
'*.nb': 'mathematica',
'*.nbp': 'mathematica',
'*.ms': 'miniscript',
'*.mo': 'modelica',
'*.mod': 'modula2',
'*.monkey': 'monkey',
'*.mt': 'monte',
'*.moo': 'moocode',
'*.moon': 'moonscript',
'*.mos': 'mosel',
'*.mq4': 'mql',
'*.mq5': 'mql',
'*.mqh': 'mql',
'*.msc': 'mscgen',
'*.mu': 'mupad',
'*.mxml': 'mxml',
'*.myt': 'myghty',
'autodelegate': 'myghty',
'*.ncl': 'ncl',
'*.nc': 'nesc',
'*.nt': 'nestedtext',
'*.kif': 'newlisp',
'*.lsp': 'newlisp',
'*.nl': 'newlisp',
'*.ns2': 'newspeak',
'nginx.conf': 'nginx',
'*.nim': 'nimrod',
'*.nimrod': 'nimrod',
'*.nit': 'nit',
'*.nix': 'nixos',
'*.nsh': 'nsis',
'*.nsi': 'nsis',
'*.smv': 'nusmv',
'*.objdump-intel': 'objdump-nasm',
'*.objdump': 'objdump',
'*.mm': 'objective-c++',
'*.ml': 'ocaml',
'*.mli': 'ocaml',
'*.mll': 'ocaml',
'*.mly': 'ocaml',
'*.odin': 'odin',
'*.idl': 'omg-idl',
'*.pidl': 'omg-idl',
'*.ooc': 'ooc',
'*.opa': 'opa',
'*.cls': 'openedge',
'pacman.conf': 'pacmanconf',
'*.pan': 'pan',
'*.psi': 'parasail',
'*.psl': 'parasail',
'*.pwn': 'pawn',
'*.peg': 'peg',
'*.perl': 'perl',
'*.6pl': 'perl6',
'*.6pm': 'perl6',
'*.nqp': 'perl6',
'*.p6': 'perl6',
'*.p6l': 'perl6',
'*.p6m': 'perl6',
'*.pl6': 'perl6',
'*.pm6': 'perl6',
'*.raku': 'perl6',
'*.rakudoc': 'perl6',
'*.rakumod': 'perl6',
'*.rakutest': 'perl6',
'*.php': 'php',
'*.php[345]': 'php',
'*.pig': 'pig',
'*.pike': 'pike',
'*.pmod': 'pike',
'*.pc': 'pkgconfig',
'*.ptls': 'pointless',
'*.pony': 'pony',
'*.eps': 'postscript',
'*.ps': 'postscript',
'*.po': 'pot',
'*.pot': 'pot',
'*.pov': 'pov',
'*.ps1': 'powershell',
'*.psm1': 'powershell',
'*.praat': 'praat',
'*.proc': 'praat',
'*.psc': 'praat',
'*.prolog': 'prolog',
'*.promql': 'promql',
'*.properties': 'properties',
'*.proto': 'protobuf',
'*.jade': 'pug',
'*.pug': 'pug',
'*.pp': 'puppet',
'*.py2tb': 'py2tb',
'*.pypylog': 'pypylog',
'*.py3tb': 'pytb',
'*.pytb': 'pytb',
'*.bzl': 'python',
'*.jy': 'python',
'*.py': 'python',
'*.pyw': 'python',
'*.sage': 'python',
'*.tac': 'python',
'buck': 'python',
'build': 'python',
'build.bazel': 'python',
'sconscript': 'python',
'sconstruct': 'python',
'workspace': 'python',
'*.qbs': 'qml',
'*.qml': 'qml',
'*.qvto': 'qvto',
'*.rkt': 'racket',
'*.rktd': 'racket',
'*.rktl': 'racket',
'*.rout': 'rconsole',
'*.rd': 'rd',
'*.re': 'reasonml',
'*.rei': 'reasonml',
'*.r3': 'rebol',
'*.reb': 'rebol',
'*.red': 'red',
'*.reds': 'red',
'*.cw': 'redcode',
'*.reg': 'registry',
'*.rest': 'restructuredtext',
'*.rst': 'restructuredtext',
'*.arexx': 'rexx',
'*.rex': 'rexx',
'*.rexx': 'rexx',
'*.rx': 'rexx',
'*.rhtml': 'rhtml',
'*.ride': 'ride',
'*.rnc': 'rng-compact',
'*.graph': 'roboconf-graph',
'*.instances': 'roboconf-instances',
'*.robot': 'robotframework',
'*.rql': 'rql',
'*.rsl': 'rsl',
'*.duby': 'ruby',
'*.gemspec': 'ruby',
'*.rake': 'ruby',
'*.rb': 'ruby',
'*.rbw': 'ruby',
'*.rbx': 'ruby',
'gemfile': 'ruby',
'rakefile': 'ruby',
'*.rs': 'rust',
'*.rs.in': 'rust',
'*.sarl': 'sarl',
'*.sas': 'sas',
'*.sass': 'sass',
'*.scala': 'scala',
'*.scaml': 'scaml',
'*.scdoc': 'scdoc',
'*.scm': 'scheme',
'*.ss': 'scheme',
'*.sce': 'scilab',
'*.sci': 'scilab',
'*.tst': 'scilab',
'*.scss': 'scss',
'*.sgf': 'sgf',
'*.shen': 'shen',
'*.shex': 'shexc',
'*.sieve': 'sieve',
'*.siv': 'sieve',
'*.sil': 'silver',
'*.vpr': 'silver',
'singularity': 'singularity',
'*.sla': 'slash',
'*.slim': 'slim',
'*.sl': 'slurm',
'*.smali': 'smali',
'*.st': 'smalltalk',
'*.tpl': 'smarty',
'*.fun': 'sml',
'*.sig': 'sml',
'*.sml': 'sml',
'*.snobol': 'snobol',
'*.sbl': 'snowball',
'*.sol': 'solidity',
'*.sp': 'sp',
'*.rq': 'sparql',
'*.sparql': 'sparql',
'*.spec': 'spec',
'.renviron': 'splus',
'.rhistory': 'splus',
'.rprofile': 'splus',
'*.sqlite3-console': 'sqlite3',
'squid.conf': 'squidconf',
'*.ssp': 'ssp',
'*.stan': 'stan',
'*.ado': 'stata',
'*.do': 'stata',
'*.swift': 'swift',
'*.i': 'swig',
'*.swg': 'swig',
'*.sv': 'systemverilog',
'*.svh': 'systemverilog',
'*.tap': 'tap',
'*.tasm': 'tasm',
'*.rvt': 'tcl',
'*.tcl': 'tcl',
'*.csh': 'tcsh',
'*.tcsh': 'tcsh',
'*.tea': 'tea',
'*.teal': 'teal',
'termcap': 'termcap',
'termcap.src': 'termcap',
'terminfo': 'terminfo',
'terminfo.src': 'terminfo',
'*.tf': 'terraform',
'*.aux': 'tex',
'*.tex': 'tex',
'*.toc': 'tex',
'*.txt': 'text',
'*.thrift': 'thrift',
'*.ti': 'ti',
'*.tid': 'tid',
'*.tnt': 'tnt',
'*.todotxt': 'todotxt',
'todo.txt': 'todotxt',
'*.toml': 'toml',
'pipfile': 'toml',
'poetry.lock': 'toml',
'*.rts': 'trafficscript',
'*.treetop': 'treetop',
'*.tt': 'treetop',
'*.ts': 'typescript',
'*.tsx': 'typescript',
'*.typoscript': 'typoscript',
'*.u1': 'ucode',
'*.u2': 'ucode',
'*.icn': 'unicon',
'*.usd': 'usd',
'*.usda': 'usd',
'*.vala': 'vala',
'*.vapi': 'vala',
'*.vb': 'vb.net',
'*.vbs': 'vbscript',
'*.vcl': 'vcl',
'*.fhtml': 'velocity',
'*.vm': 'velocity',
'*.rpf': 'vgl',
'*.vhd': 'vhdl',
'*.vhdl': 'vhdl',
'*.vim': 'vim',
'.exrc': 'vim',
'.gvimrc': 'vim',
'.vimrc': 'vim',
'_exrc': 'vim',
'_gvimrc': 'vim',
'_vimrc': 'vim',
'gvimrc': 'vim',
'vimrc': 'vim',
'*.wast': 'wast',
'*.wat': 'wast',
'*.wdiff': 'wdiff',
'*.webidl': 'webidl',
'*.whiley': 'whiley',
'*.x10': 'x10',
'*.rss': 'xml',
'*.wsdl': 'xml',
'*.wsf': 'xml',
'*.xsd': 'xml',
'xorg.conf': 'xorg.conf',
'*.xq': 'xquery',
'*.xql': 'xquery',
'*.xqm': 'xquery',
'*.xquery': 'xquery',
'*.xqy': 'xquery',
'*.xpl': 'xslt',
'*.xtend': 'xtend',
'*.xul.in': 'xul+mozpreproc',
'*.sls': 'yaml+jinja',
'*.yaml': 'yaml',
'*.yml': 'yaml',
'*.yang': 'yang',
'*.bro': 'zeek',
'*.zeek': 'zeek',
'*.zep': 'zephir',
'*.zig': 'zig',
}
ambiguous_ext2lexer = {
'*.c': 'c',
'*.h': 'c',
'*.cp': 'cpp',
'*.hh': 'cpp',
'*.html': 'cpp',
'*.xml': 'xml',
'*.xsl': 'xslt',
'*.xslt': 'xslt',
'*.sql': 'sql',
'*.r': 'rebol',
}
manual_tuning = dict()
for ext, lexer in {**unique_ext2lexer, **ambiguous_ext2lexer, **manual_tuning}.items():
if ext in EXT_MAP:
debug(f"Skipping {ext} ...")
continue
MINTED_EXTS.add(ext)
EXT_MAP[ext] = scoop_minted_fcn(lexer)
PANDAS_EXT_MAP = dict()
if deps.PANDAS_OK:
PANDAS_EXT_MAP = {
'*.csv': pandas_scoop_csv,
'*.tsv': pandas_scoop_tsv,
}
EXT_MAP.update(PANDAS_EXT_MAP)
PANDAS_EXTS = set(PANDAS_EXT_MAP.keys())
# Matching & scooping logic
# -------------------------------------------------------------------------------------------------------------------- #
def ext_match(file: pathlib.Path) -> T.Optional[str]:
"""Return the first valid exension string (key in EXT_MAP) that matches the file OR None if it was not known"""
l_name = file.name.lower()
return next((pat for pat in EXT_MAP if fnmatch.fnmatch(name=l_name, pat=pat.lower())),
None)
def scoop(file: T.Union[pathlib.Path, TOCFile]) -> str:
"""
Return a command that will load the contents of *file* and insert them in the PDF
As long as *file* has one of the accepted extensions
"""
if isinstance(file, pathlib.Path):
return EXT_MAP[ext_match(file)](file)
elif isinstance(file, TOCFile):
return EXT_MAP[file.ext_key](file.filepath)
raise TypeError(f"Invalid *file* type {file}")
if __name__ == '__main__':
ext_match(pathlib.Path('/ho/hi/ho/Dockerfile'))
print(scoop('/path/to/your/image.jpg'))
print(scoop('/path/to/your/doc.pdf'))
|
#dependencies
import pandas as pd
import os
import requests
from bs4 import BeautifulSoup as bs
from splinter import Browser
from webdriver_manager.chrome import ChromeDriverManager
def scrape_all():
executable_path = {"executable_path": "/Users/nallu/.wdm/drivers/chromedriver/win32/92.0.4515.107/chromedriver.exe"}
browser = Browser('chrome', **executable_path, headless = False)
news_title, news_p = news(browser)
#storing data into a dictionary
mars_data_dict = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": featured_image_url(browser),
"df_table":mars_facts(),
"hemisphere_img": mars_hemi(browser)
}
browser.quit()
return mars_data_dict
#news
def news(browser):
url = "https://redplanetscience.com/"
browser.visit(url)
html = browser.html
soup = bs(html, "html.parser")
try:
slide = soup.select_one('div.list_text')
news_title = slide.find('div', class_="content_title").get_text()
news_p = slide.find('div', class_="article_teaser_body").get_text()
except AttributeError:
return None, None
return news_title, news_p
#featured photo
def featured_image_url(browser):
url = "https://spaceimages-mars.com"
browser.visit(url)
html = browser.html
soup = bs(html, "html.parser")
img_url = soup.find("a",class_='showimg fancybox-thumbs')['href']
featured_image_url = f"https://spaceimages-mars.com/{img_url}"
return featured_image_url
#mars facts
def mars_facts():
try:
mars_facts_df = pd.read_html("https://galaxyfacts-mars.com")[0]
except BaseException:
return None
mars_facts_df.columns = ["Label", "Mars", "Earth"]
mars_facts_df.set_index("Label")
return mars_facts_df.to_html(classes="table table-bordered")
#mars hemispheres
def mars_hemi(browser):
executable_path = {"executable_path": "/Users/nallu/.wdm/drivers/chromedriver/win32/92.0.4515.107/chromedriver.exe"}
browser = Browser("chrome", **executable_path, headless=False)
url = "https://marshemispheres.com/"
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
items = soup.find_all('div', class_='item')
hemi_main_url = 'https://marshemispheres.com/'
hemi_img_urls = []
for item in items:
title = item.find('h3').text
image_url = item.find('a', class_='itemLink product-item')['href']
browser.visit(hemi_main_url + image_url)
image_html = browser.html
soup = bs(image_html, 'html.parser')
image_url = hemi_main_url + soup.find('img', class_='wide-image')['src']
hemi_img_urls.append({"Title" : title, "Image_URL" : image_url})
# Close the browser after scraping
browser.quit()
if __name__ == "__main__":
# If running as script, print scraped data
print(scrape_all()) |
"""Create a new book template."""
import sys
import os
import os.path as op
import shutil as sh
from ruamel.yaml import YAML
import tempfile
from pathlib import Path
from .utils import print_message_box, _error
from .toc import build_toc
from . import __version__
TEMPLATE_PATH = op.join(op.dirname(__file__), 'book_template')
MINIMAL_PATH = op.join(op.dirname(__file__), 'minimal')
def _final_message(path_out, notes):
msg = ["",
"Finished creating a new book at `{}`".format(path_out),
""
"- Your content is in `{}` ".format(op.join(path_out, 'content')),
"",
"- A Table of Contents file is at `{}`.".format(
op.join(path_out, '_data', 'toc.yml')),
" You should check its contents, make sure it references your",
" content correctly, and ensure it has the correct order.",
"",
"- Your configuration file is at `{}`.".format(
op.join(path_out, '_config.yml')),
" You should check its contents and double-check that the values"
" are correct for your site.",
""]
if len(notes) > 0:
msg += ["", "Notes", "====="] + notes
return '\n'.join(msg)
def _check_file_exists(path):
if not op.exists(path):
raise FileNotFoundError("Couldn't find file: {}".format(path))
def update_config(path_to_config, new_config):
"""Update a configuration yaml file using the values from a user-provided one."""
_check_file_exists(new_config)
print("Updating template configuration file with the values in {}".format(new_config))
# Load our template and new config
yaml = YAML()
with open(path_to_config, 'r') as ff:
data = yaml.load(ff)
with open(new_config, 'r') as ff:
data_new = yaml.load(ff)
# Update the fields that are present in the new config
for ii in data_new.keys():
if ii in data:
data[ii] = data_new[ii]
else:
print(
"Not using config key with no corresponding template key: {}".format(ii))
with open(path_to_config, 'w') as ff:
yaml.dump(data, ff)
def new_book(path_out, content_folder, toc=None,
license=None, custom_css=None, custom_js=None, config=None,
extra_files=None, demo=False, verbose=True,
overwrite=None):
"""Create a new Jupyter Book.
Parameters
----------
path_out : str
The location where your book will be placed
content_folder : str
A path to a folder that holds your book content
toc : str
A path to a yaml file that contains a Table of Contents
for your Jupyter Book. This will overwrite parts of the book
template's default toc.yml configuration
license : str
A path to a LICENSE.md file if you have created one
custom_css : str
A path to a CSS file that defines some custom CSS rules for
your book
custom_js : str
A path to a JS file that defines some custom CSS rules for
your book
config : str
A path to a configuration YAML file that contains
configuration for your Jupyter Book. This will overwrite
parts of the book template's default _config.yml configuration
extra_files : str
A list of extra files / folders to copy into your book's directory
demo : bool
Whether to build the book with demo content instead of your own
content
verbose : bool
Whether to display output information. [yes/no]
overwrite : bool | None
Whether to overwrite a pre-existing book if it exists
"""
notes = []
# Check folder exists and overwrite if necessary
if op.isdir(path_out):
if overwrite:
sh.rmtree(path_out)
if op.isdir(path_out):
raise ValueError(
"A book already exists with this name / output"
" directory. Delete it, or use `--overwrite` if"
" you'd like to replace it")
# Copy the book structure to the new folder
print("Copying new book to: {}".format(path_out))
ignore_folders = ['_build', 'content']
sh.copytree(TEMPLATE_PATH, path_out,
ignore=sh.ignore_patterns('.git', *ignore_folders))
####################################################################
# Copying over book files
####################################################################
# If the Demo argument is provided, copy over a couple demo files and stop
if demo is True:
print("Copying over demo repository content")
sh.copytree(op.join(TEMPLATE_PATH, 'content'),
op.join(path_out, 'content'))
# Remove extra files we don't want
_remove_extra_files(path_out)
message = [
"- You've chosen to copy over the demo Jupyter Book. This"
" contains",
" the content shown at https://jupyterbook.org.\n"
" Use it to get acquainted with the Jupyter-Book structure"
" and build ",
" system. When you're ready, try re-running"
" `jupyter-book create` using ",
" your own content!"]
notes += message
_final_message(path_out, [])
sys.exit()
# Create empty folders for build files if they don't exist
if not op.exists(op.join(path_out, '_build')):
os.makedirs(op.join(path_out, '_build'))
# Copy over content
if content_folder is None:
content_folder = op.join(MINIMAL_PATH, 'content')
toc = op.join(MINIMAL_PATH, '_data', 'toc.yml')
sh.rmtree(op.join(path_out, '_build'))
notes.append(("- Add your own content to your book. You haven't provided any content (`--content-folder`)\n"
" so we've added a couple files to get you started."))
_check_file_exists(content_folder)
print("Copying over your content folder...")
sh.copytree(content_folder, op.join(path_out, 'content'))
# Copy over TOC file
if toc is None:
toc = build_toc(content_folder)
with open(op.join(path_out, '_data', 'toc.yml'), 'w') as ff:
ff.write(toc)
notes.append(("- Check your Table of Contents file (`_data/toc.yml`). Because you specified a content foler\n"
" but no Table of Conents (`--toc`), we auto-generated a TOC file file using folder and file\n"
" names. You should check its contents and clean it up so that it has the structure you want!\n"))
else:
_check_file_exists(toc)
print("Copying over your TOC file...\n")
sh.copy2(toc, op.join(path_out, '_data', 'toc.yml'))
####################################################################
# Configuration file
####################################################################
if config is None:
update_config(op.join(path_out, '_config.yml'),
op.join(MINIMAL_PATH, '_config.yml'))
else:
# Use the minimal configuration, which has some
# placeholders for users to change
update_config(op.join(path_out, '_config.yml'), config)
# Update config values for a new book
yaml = YAML()
with open(op.join(path_out, '_config.yml'), 'r') as ff:
data = yaml.load(ff)
# Add the Jupyter Book version to the config
data['jupyter_book_version'] = __version__
# Remove the GA tracking code for the docs if the user didn't overwrite it
jb_ga_code = "UA-52617120-7"
if data['google_analytics']['mytrackingcode'] == jb_ga_code:
data['google_analytics']['mytrackingcode'] = ''
with open(op.join(path_out, '_config.yml'), 'w') as ff:
yaml.dump(data, ff)
####################################################################
# Extra files (CSS/JS/etc) and license
####################################################################
if custom_css is not None:
if not os.path.exists(custom_css):
raise ValueError(
"Could not find custom CSS file: {}".format(custom_css))
sh.copy2(custom_css, op.join(
path_out, 'assets', 'custom', 'custom.css'))
if custom_js is not None:
if not os.path.exists(custom_js):
raise ValueError(
"Could not find custom JS file: {}".format(custom_js))
sh.copy2(custom_js, op.join(
path_out, 'assets', 'custom', 'custom.js'))
# Ask user to add a license if they wish
if license is not None:
if not os.path.exists(license):
raise ValueError(
"Could not find license file: {}".format(license))
sh.copy2(license, op.join(path_out, 'content', 'LICENSE.md'))
else:
notes.append(("- We've added a CC-BY-SA license for you in {}\n"
" This is a reasonable license for most book content, though feel free\n"
" to change it if you like!".format(op.join(path_out, 'content', 'LICENSE.md'))))
sh.copy2(op.join(MINIMAL_PATH, 'LICENSE.md'),
op.join(path_out, 'content', 'LICENSE.md'))
# Copy over extra files / folders to the root of the content folder
if isinstance(extra_files, (list, str)):
if isinstance(extra_files, str):
extra_files = [extra_files]
print('Copying over extra files: {}'.format(extra_files))
for ipath in extra_files:
if op.isdir(ipath):
# Walk the directory and copy individual
# files respecting directory structure
for ifolder, _, ifiles in os.walk(ipath):
last_folder = ipath.rsplit(os.sep)[-1]
rel_to_last_folder = op.join(
last_folder, ifolder.split(last_folder, 1)[-1].strip(os.sep))
rel_to_out_path = op.join(path_out, rel_to_last_folder)
if not op.isdir(rel_to_out_path):
os.makedirs(rel_to_out_path)
for ifile in ifiles:
new_path = op.join(rel_to_out_path, ifile)
sh.copy2(op.join(ifolder, ifile), new_path)
print(new_path)
else:
# Copy the file to the root of the out path directly
sh.copy2(ipath, op.join(path_out, op.basename(ipath)))
# Remove extra files we don't want
_remove_extra_files(path_out)
# Cleanup messages
if verbose:
print_message_box(_final_message(path_out, notes))
def upgrade_book(path_book, extra_files=None):
"""Upgrade a book to the latest Jupyter Book version.
Parameters
----------
path_book : str
Path to the root of the book repository you'd like to upgrade.
extra_files : list of paths | None
Paths to extra files to include in the upgrade. If these files
would normally be over-written by a update to Jupyter Book,
they will not. If these files are not part of the default
Jupyter Book structure, they will be included in the
updated book.
"""
if not isinstance(path_book, Path):
path_book = Path(path_book)
if not path_book.joinpath('_config.yml').exists():
raise ValueError(
"This does not appear to be a valid Jupyter Book. Searched in location: {}".format(path_book))
# Now create a new book from the old one
try:
print("Creating new book from your original one...")
# Double check for pre-existing environment files as special cases
if extra_files is None:
extra_files = []
if not isinstance(extra_files, list):
extra_files = [extra_files]
extra_files = [Path(ii).absolute() for ii in extra_files]
# These are files we always check for as they're commonly over-written
for ifile in ['requirements.txt', 'environment.yml', '_bibliography']:
path_extra = path_book.joinpath(ifile)
if path_extra.exists():
extra_files.append(str(path_extra))
if len(extra_files) == 0:
extra_files = None
# A few optional files that are not strictly required
optional_files = {
"license": path_book / 'content' / 'LICENSE.md',
"custom_css": path_book / 'assets' / 'custom' / 'custom.css',
"custom_js": path_book / 'assets' / 'custom' / 'custom.js'
}
for key, path in optional_files.items():
if not path.exists():
optional_files[key] = None
# Now delete everything in the current folder and replace with new book
with tempfile.TemporaryDirectory(prefix="jb_") as path_book_temp:
path_book_temp = Path(path_book_temp)
# Now create the new book
new_book(path_book_temp, toc=path_book.joinpath('_data', 'toc.yml'),
content_folder=path_book.joinpath('content'),
license=optional_files['license'],
config=path_book.joinpath('_config.yml'),
custom_css=optional_files['custom_css'],
custom_js=optional_files['custom_js'],
extra_files=[str(ifile) for ifile in extra_files],
overwrite=True, verbose=False)
# Delete the files in the current folder
skip_folders = [".git", ".gitignore"]
for ipath in path_book.glob('*'):
if any(sfolder in str(ipath) for sfolder in skip_folders):
continue
if ipath.is_dir():
sh.rmtree(ipath)
else:
os.remove(ipath)
# Now move the new book into our folder
print("Replacing current book with upgraded book...")
for ipath in path_book_temp.glob('*'):
newpath = Path(str(ipath).replace(str(path_book_temp), str(path_book)))
if ipath.is_dir():
sh.copytree(ipath, newpath)
else:
sh.copy2(ipath, newpath)
print_message_box(("Finished upgrading your book at: {}\n\n"
"Your content, configuration, etc should not have changed, but all surrounding book\n"
"files should be upgraded. You should double-check that this is the case by running \n"
"a `git diff` on your book to see what has changed:\n"
"\n"
" cd {}\n"
" git diff\n"
"\n"
"Don't forget to commit these changes to git!".format(path_book, path_book)))
except Exception as ex:
raise _error(("There was an error in upgrading your Jupyter Book!\n\n"
"Don't worry, your content, configuration, etc should not have changed.\n"
"If it did, reset your repository with\n"
"\n"
" cd {}\n"
" rm -rf ./*\n"
" git reset --hard HEAD`.\n"
"\n"
"Here is the error:\n\n {}".format(path_book, ex)))
def _remove_extra_files(path_out):
"""Remove some extra files we don't want in a created book."""
# Remove some files/folders that may confuse users
files_to_remove = ['Gemfile.lock']
for ifile in files_to_remove:
if Path(ifile).exists():
os.remove(op.join(path_out, ifile))
|
from setuptools import setup
setup(
name='capture',
version='0.0.2',
description='tool for visualization',
#author='shiannn',
#author_email='foomail@foo.com',
packages=['visualizer'], #same as name
#install_requires=['wheel', 'bar', 'greek'], #external packages as dependencies
) |
""" Test boto3 client ovverride """
import unittest
import os
import sys
from moto import mock_ssm
import boto3
import placebo
from . import TestBase
from ssm_cache import SSMParameter, SSMParameterGroup
class TestClientOverride(unittest.TestCase):
""" Refreshable.set_ssm_client tests """
PARAM_VALUE = "abc123"
PLACEBO_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), 'placebo/override'))
def test_with_placebo(self):
""" Test that set_ssm_client works fine with Placebo """
session = boto3.Session()
pill = placebo.attach(session, data_path=self.PLACEBO_PATH)
pill.playback()
client = session.client('ssm')
SSMParameter.set_ssm_client(client)
param = SSMParameter("my_param")
self.assertEqual(param.value, self.PARAM_VALUE)
def test_with_illegal_client(self):
""" Test invalid client (without required methods) """
with self.assertRaises(TypeError):
SSMParameter.set_ssm_client(42)
# pylint: disable=too-few-public-methods
class MyInvalidClient(object):
""" This client only has get_parameters """
def get_parameters(self):
""" Empty method """
with self.assertRaises(TypeError):
client = MyInvalidClient()
SSMParameter.set_ssm_client(client)
@mock_ssm
def test_with_valid_client(self):
""" Test invalid client (without required methods) """
# pylint: disable=unused-argument,no-self-use
class MyValidClient(object):
""" This client has all the required methods """
def get_parameters(self, *args, **kwargs):
""" Mock method """
return {
'InvalidParameters': [],
'Parameters': [
{
"Type": "String",
"Name": "my_param",
"Value": "abc123",
"Version": 1
},
],
}
def get_parameters_by_path(self, *args, **kwargs):
""" Mock method """
return {
"Parameters": [
{
"Type": "String",
"Name": "/foo/bar/1",
"Value": "abc123",
"Version": 1
},
{
"Type": "String",
"Name": "/foo/bar/2",
"Value": "abc123",
"Version": 1
},
]
}
client = MyValidClient()
SSMParameter.set_ssm_client(client)
param = SSMParameter("my_param")
self.assertEqual(param.value, self.PARAM_VALUE)
group = SSMParameterGroup()
param = group.parameter("my_param")
self.assertEqual(param.value, self.PARAM_VALUE)
params = group.parameters("/foo/bar/")
self.assertEqual(len(params), 2)
for param in params:
self.assertEqual(param.value, self.PARAM_VALUE)
|
import pymysql
import psycopg2
import logging
from airflow.hooks.base_hook import BaseHook
from dateutil.parser import parse
from datetime import timezone, timedelta
from exceptions import ErrorMySqlParameters, ErrorStartDateParser, ErrorPostgreParameters
from full_incidents.replication_otrs_to_dwh.postgresql.upload import UploadIntoTicketStateType, UploadIntoTicketState, \
UploadIntoTicketType, \
UploadIntoTicketPriority, UploadIntoTicketLockType, UploadIntoQueue, UploadIntoService, UploadIntoSla, \
UploadIntoUsers, UploadIntoOrganizations, UploadIntoCustomerUser, UploadIntoTicketHistoryType, UploadIntoTicket, \
UploadIntoTicketHistory, UploadClosureCode, UploadTicketCode, UploadArticle, UploadArticleDataMime
logger = logging.getLogger('replication_otrs_to_dwh')
def connect_to_mysql(mysql_args):
try:
host = mysql_args.host
user = mysql_args.login
password = mysql_args.password
db = mysql_args.schema
except KeyError as e:
raise ErrorMySqlParameters(f"No needed parameters for mysql connection: {e}")
mysql_conn = pymysql.connect(
host=host,
user=user,
password=password,
db=db
)
return mysql_conn
def connect_to_pg(postgre_args):
try:
host = postgre_args.host
database = postgre_args.schema
user = postgre_args.login
password = postgre_args.password
except KeyError as e:
raise ErrorPostgreParameters(f"No needed parameters for postgre connection: {e}")
conn = psycopg2.connect(host=host,
database=database,
user=user,
password=password)
return conn
def pars_start_time(t: str):
try:
start_date_time = parse(t)
except Exception as err:
raise ErrorStartDateParser(f"Error in start date parser: {err}")
return start_date_time
class GetData:
def __init__(self, mysql_conn, mysql_cur, execution_start_date):
self.mysql_conn = mysql_conn
self.mysql_cur = mysql_cur
self.extraction_start_date = execution_start_date.strftime("%Y-%m-%d %H:%M:%S.%f")
# 24 часа + 30 мин следующих суток, чтобы не потерять тикеты, которые создались после 00:00
self.extraction_end_date = (execution_start_date + timedelta(hours=24, minutes=30)).\
strftime("%Y-%m-%d %H:%M:%S.%f")
def get_data_from_table(self, table: str, items: list, query: str = None, is_logging_enabled: str = True) -> list:
if not query:
columns = items_to_str(items)
query = f"""
select {columns}
from {table}
where change_time >= '{self.extraction_start_date}'
and change_time < '{self.extraction_end_date}'
"""
self.mysql_cur.execute(query)
rows = self.mysql_cur.fetchall()
data_from_table = []
for row in rows:
row_dict = dict(zip(items, row))
data_from_table.append(row_dict)
if is_logging_enabled:
logger.info("Select %d rows for table %s where change_time >= %s and change_time < %s", len(data_from_table),
table, self.extraction_start_date, self.extraction_end_date)
return data_from_table
def items_to_str(items):
return ', '.join(items)
def ticket_state_type(pg_conn, pg_cur, get_data):
logger.info("Start replication ticket_state_type")
fields = ['id', 'name']
fields_str = items_to_str(fields)
upl_ticket_state_type = UploadIntoTicketStateType(pg_conn, pg_cur)
is_table_empty = upl_ticket_state_type.is_table_empty('ticket_state_type')
if is_table_empty:
query = f"""
select {fields_str}
from ticket_state_type
"""
data = get_data.get_data_from_table(items=fields, table='ticket_state_type', query=query)
else:
data = get_data.get_data_from_table(items=fields, table='ticket_state_type')
upl_ticket_state_type.upload(data)
def ticket_state(pg_conn, pg_cur, get_data):
logger.info("Start replication ticket_state")
fields = ['id', 'name', 'type_id']
fields_str = items_to_str(fields)
upl_ticket_state = UploadIntoTicketState(pg_conn, pg_cur)
is_table_empty = upl_ticket_state.is_table_empty('ticket_state')
if is_table_empty:
query = f"""
select {fields_str}
from ticket_state
"""
data = get_data.get_data_from_table(items=fields, table='ticket_state', query=query)
else:
data = get_data.get_data_from_table(items=fields, table='ticket_state')
upl_ticket_state.upload(data)
def ticket_type(pg_conn, pg_cur, get_data):
logger.info("Start replication ticket_type")
fields = ['id', 'name']
fields_str = items_to_str(fields)
upl_ticket_type = UploadIntoTicketType(pg_conn, pg_cur)
is_table_empty = upl_ticket_type.is_table_empty('ticket_type')
if is_table_empty:
query = f"""
select {fields_str}
from ticket_type
"""
data = get_data.get_data_from_table(items=fields, table='ticket_type', query=query)
else:
data = get_data.get_data_from_table(items=fields, table='ticket_type')
upl_ticket_type.upload(data)
def ticket_priority(pg_conn, pg_cur, get_data):
logger.info("Start replication ticket_priority")
data = get_data.get_data_from_table(items=['id', 'name'], table='ticket_priority')
upl_ticket_priority = UploadIntoTicketPriority(pg_conn, pg_cur)
upl_ticket_priority.upload(data)
def ticket_lock_type(pg_conn, pg_cur, get_data):
logger.info("Start replication ticket_lock_type")
fields = ['id', 'name']
fields_str = items_to_str(fields)
upl_ticket_lock_type = UploadIntoTicketLockType(pg_conn, pg_cur)
is_table_empty = upl_ticket_lock_type.is_table_empty('ticket_lock_type')
if is_table_empty:
query = f"""
select {fields_str}
from ticket_lock_type
"""
data = get_data.get_data_from_table(items=fields, table='ticket_lock_type', query=query)
else:
data = get_data.get_data_from_table(items=fields, table='ticket_lock_type')
upl_ticket_lock_type.upload(data)
def queue(pg_conn, pg_cur, get_data):
logger.info("Start replication queue")
fields = ['id', 'name']
fields_str = items_to_str(fields)
upl_queue = UploadIntoQueue(pg_conn, pg_cur)
is_table_empty = upl_queue.is_table_empty('queue')
if is_table_empty:
query = f"""
select {fields_str}
from queue
"""
data = get_data.get_data_from_table(items=fields, table='queue', query=query)
else:
data = get_data.get_data_from_table(items=fields, table='queue')
upl_queue.upload(data)
def service(pg_conn, pg_cur, get_data):
logger.info("Start replication service")
fields = ['id', 'name', 'criticality']
fields_str = items_to_str(fields)
upl_service = UploadIntoService(pg_conn, pg_cur)
is_table_empty = upl_service.is_table_empty('service')
if is_table_empty:
query = f"""
select {fields_str}
from service
"""
data = get_data.get_data_from_table(items=fields, table='service', query=query)
else:
data = get_data.get_data_from_table(items=fields, table='service')
upl_service.upload(data)
def sla(pg_conn, pg_cur, get_data):
logger.info("Start replication sla")
fields = ['id', 'name', 'first_response_time', 'first_response_notify',
'update_time', 'update_notify', 'solution_time', 'solution_notify']
fields_str = items_to_str(fields)
upl_sla = UploadIntoSla(pg_conn, pg_cur)
is_table_empty = upl_sla.is_table_empty('sla')
if is_table_empty:
query = f"""
select {fields_str}
from sla
"""
data = get_data.get_data_from_table(items=fields, table='sla', query=query)
else:
data = get_data.get_data_from_table(items=fields, table='sla')
upl_sla.upload(data)
def users(pg_conn, pg_cur, get_data):
logger.info("Start replication users")
fields = ['id', 'login', 'first_name', 'last_name']
fields_str = items_to_str(fields)
upl_users = UploadIntoUsers(pg_conn, pg_cur)
is_table_empty = upl_users.is_table_empty('users')
if is_table_empty:
query = f"""
select {fields_str}
from users
"""
data = get_data.get_data_from_table(items=fields, table='users', query=query)
else:
data = get_data.get_data_from_table(items=fields, table='users')
upl_users.upload(data)
def customer_company(pg_conn, pg_cur, get_data):
logger.info("Start replication customer_company")
data = get_data.get_data_from_table(items=['customer_id', 'name'], table='customer_company')
upl_org = UploadIntoOrganizations(pg_conn, pg_cur)
upl_org.upload(data)
def customer_user(pg_conn, pg_cur, get_data):
logger.info("Start replication sla")
fields = ['login', 'email', 'first_name', 'last_name', 'phone', 'mobile', 'id', 'customer_id']
data = get_data.get_data_from_table(items=fields, table='customer_user')
upl_cust_user = UploadIntoCustomerUser(pg_conn, pg_cur)
upl_cust_user.upload(data)
def ticket_history_type(pg_conn, pg_cur, get_data):
logger.info("Start replication ticket_history_type")
fields = ['id', 'name']
fields_str = items_to_str(fields)
upl_ticket_history_type = UploadIntoTicketHistoryType(pg_conn, pg_cur)
is_table_empty = upl_ticket_history_type.is_table_empty('ticket_history_type')
if is_table_empty:
query = f"""
select {fields_str}
from ticket_history_type
"""
data = get_data.get_data_from_table(items=fields, table='ticket_history_type', query=query)
else:
data = get_data.get_data_from_table(items=fields, table='ticket_history_type')
upl_ticket_history_type.upload(data)
def ticket(pg_conn, pg_cur, get_data):
logger.info("Start replication ticket")
fields = ['title', 'queue_id', 'ticket_lock_id', 'type_id', 'service_id', 'sla_id', 'ticket_priority_id',
'ticket_state_id', 'customer_id', 'timeout', 'until_time', 'escalation_time', 'escalation_update_time',
'escalation_response_time', 'escalation_solution_time', 'id', 'tn', 'create_time', 'change_time',
'customer_user_id']
data = get_data.get_data_from_table(items=fields, table='ticket')
upl_ticket = UploadIntoTicket(pg_conn, pg_cur)
upl_ticket.upload(data)
return data
def ticket_history(pg_conn, pg_cur, get_data):
logger.info("Start replication ticket_history")
fields = ['id', 'ticket_id', 'name', 'history_type_id', 'queue_id', 'owner_id', 'create_time', 'change_time']
data = get_data.get_data_from_table(items=fields, table='ticket_history')
upl_ticket_history = UploadIntoTicketHistory(pg_conn, pg_cur)
upl_ticket_history.upload(data)
def closure_codes(pg_conn, pg_cur, get_data, data_with_tickets):
logger.info("Start replication closure_codes")
def insert_and_update_code_name():
logger.info("Start replication closure_codes.code_name")
fields = ['id', 'name', 'valid_id', 'comments', 'create_time', 'create_by', 'change_time', 'change_by',
'type_id']
fields_str = items_to_str(fields)
upl_queue = UploadClosureCode(pg_conn, pg_cur)
is_table_empty = upl_queue.is_table_empty('closure_code')
if is_table_empty:
query = f"""
select {fields_str}
from RS_closure_code
"""
data = get_data.get_data_from_table(items=fields, table='RS_closure_code', query=query)
else:
data = get_data.get_data_from_table(items=fields, table='RS_closure_code')
upl_queue.upload(data)
def insert_ticket_code(ticket_id: int):
data = get_data.get_data_from_table(
items=['id', 'ticket_id', 'code_id'], is_logging_enabled=False,
table='dynamic_field_value', query=f"""
select id, object_id as ticket_id, value_text as code_id
from dynamic_field_value
where field_id=(select id from dynamic_field where name = 'ClosureCode')
and object_id={ticket_id}""")
upl_queue = UploadTicketCode(pg_conn, pg_cur)
upl_queue.upload(data)
insert_and_update_code_name()
logger.info("Start replication closure_codes.ticket_code")
for one_ticket in data_with_tickets:
insert_ticket_code(one_ticket["id"])
def action_performed(pg_conn, pg_cur, get_data):
logger.info("Start replication action_performed")
def article():
logger.info("Start replication action_performed.article")
data = get_data.get_data_from_table(
items=['id', 'ticket_id', 'article_sender_type_id', 'communication_channel_id', 'is_visible_for_customer',
'search_index_needs_rebuild', 'insert_fingerprint', 'create_time', 'create_by', 'change_time',
'change_by'],
table='article')
upl_queue = UploadArticle(pg_conn, pg_cur)
upl_queue.upload(data)
def article_data_mime():
logger.info("Start replication action_performed.article_data_mime")
data = get_data.get_data_from_table(
items=['id', 'article_id', 'a_from', 'a_reply_to', 'a_to', 'a_cc', 'a_bcc', 'a_subject', 'a_message_id',
'a_message_id_md5', 'a_in_reply_to', 'a_references', 'a_content_type', 'a_body', 'incoming_time',
'content_path', 'create_time', 'create_by', 'change_time', 'change_by'],
table='article_data_mime')
upl_queue = UploadArticleDataMime(pg_conn, pg_cur)
upl_queue.upload(data)
article()
article_data_mime()
def transport_root_id(data_with_tickets, mysql_cur, pg_cur, pg_conn):
def get_root_id_from_otrs(object_id: int):
select_root_id = """
select value_text from dynamic_field_value
where field_id = (select id from dynamic_field where name = 'IncidentRootId')
and object_id = %s
"""
mysql_cur.execute(select_root_id, object_id)
try:
root_id = mysql_cur.fetchone()[0]
except TypeError:
root_id = None
return root_id
def insert_root_id(ticket_id: int, root_id: str):
update_query = """
update ticket
set root_id = %s
where ticket_id = %s
"""
pg_cur.execute(update_query, (root_id, ticket_id))
pg_conn.commit()
def transport_id(ticket_id):
root_id = get_root_id_from_otrs(ticket_id)
if root_id:
insert_root_id(ticket_id, root_id)
logger.info("Start replication transport_root_id")
for one_ticket in data_with_tickets:
transport_id(one_ticket["id"])
def time_accounting(tickets, mysql_cur, pg_cur, pg_conn):
def select_time_accounting(prepared_ticket_id):
query = f"""
select time_unit
from time_accounting
where ticket_id = %s
"""
mysql_cur.execute(query, prepared_ticket_id)
try:
prepared_time_unit = int(mysql_cur.fetchone()[0])
except TypeError:
prepared_time_unit = None
return prepared_time_unit
def insert_time_accounting(prepared_ticket_id, prepared_time_unit):
update_query = """
update ticket
set ticket_time_unit = %s
where ticket_id = %s
"""
pg_cur.execute(update_query, (prepared_time_unit, prepared_ticket_id))
pg_conn.commit()
time_unit_numbers = 0
for ticket in tickets:
ticket_id = ticket["id"]
time_unit = select_time_accounting(ticket_id)
if time_unit:
insert_time_accounting(ticket_id, time_unit)
time_unit_numbers = time_unit_numbers + 1
logger.info("Get %d time unit numbers", time_unit_numbers)
def replication_otrs_to_dwh(execution_date, replication_otrs_to_dwh_settings):
execution_start_date = parse(execution_date).replace(tzinfo=timezone.utc)
execution_date = execution_start_date.strftime("%Y-%m-%d %H:%M:%S.%f")
logging.info("Start time to extract the otrs data: %s", execution_date)
dwh_connection_id = replication_otrs_to_dwh_settings["dwh_connection_id"]
dwh_connection_settings = BaseHook.get_connection(dwh_connection_id)
pg_conn = connect_to_pg(dwh_connection_settings)
pg_cur = pg_conn.cursor()
otrs_connection_id = replication_otrs_to_dwh_settings["otrs_connection_id"]
otrs_connection_settings = BaseHook.get_connection(otrs_connection_id)
mysql_conn = connect_to_mysql(otrs_connection_settings)
mysql_cur = mysql_conn.cursor()
get_data = GetData(mysql_conn, mysql_cur, execution_start_date)
ticket_state_type(pg_conn, pg_cur, get_data)
ticket_state(pg_conn, pg_cur, get_data)
ticket_type(pg_conn, pg_cur, get_data)
ticket_priority(pg_conn, pg_cur, get_data)
ticket_lock_type(pg_conn, pg_cur, get_data)
queue(pg_conn, pg_cur, get_data)
service(pg_conn, pg_cur, get_data)
sla(pg_conn, pg_cur, get_data)
users(pg_conn, pg_cur, get_data)
customer_company(pg_conn, pg_cur, get_data)
customer_user(pg_conn, pg_cur, get_data)
ticket_history_type(pg_conn, pg_cur, get_data)
data_with_tickets = ticket(pg_conn, pg_cur, get_data)
transport_root_id(data_with_tickets, mysql_cur, pg_cur, pg_conn)
time_accounting(data_with_tickets, mysql_cur, pg_cur, pg_conn)
ticket_history(pg_conn, pg_cur, get_data)
closure_codes(pg_conn, pg_cur, get_data, data_with_tickets)
action_performed(pg_conn, pg_cur, get_data)
pg_conn.close()
mysql_conn.close()
|
from pycoingecko import CoinGeckoAPI
import csv
import time
import sys
filter_file = 'filter.csv'
if len(sys.argv) > 1:
filter_file = sys.argv[1]
cg = CoinGeckoAPI()
filter = []
with open(filter_file, mode='r') as csv_file:
csv_reader = csv.DictReader(csv_file)
for entry in csv_reader.fieldnames:
filter.append(entry)
id_map = {}
markets_snapshot = cg.get_coins_markets(vs_currency='usd', order="market_cap_desc", per_page=250, page=1)
markets_snapshot += cg.get_coins_markets(vs_currency='usd', order="market_cap_desc", per_page=250, page=2)
for entry in markets_snapshot:
if entry['symbol'] in filter:
id_map[entry['symbol']] = entry['id']
#go through each symbol
with open('mcap_hist.csv', mode='w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for symb in filter:
coin_hist = cg.get_coin_market_chart_by_id(id=id_map[symb], vs_currency='usd', days=30)
row = [symb]
for entry in coin_hist["market_caps"]:
row.append(entry[1])
writer.writerow(row)
time.sleep(1)
|
# Copyright (c) 2015, Dataent Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import dataent
no_cache = True
def get_context(context):
token = dataent.local.form_dict.token
doc = dataent.get_doc(dataent.local.form_dict.doctype, dataent.local.form_dict.docname)
context.payment_message = ''
if hasattr(doc, 'get_payment_success_message'):
context.payment_message = doc.get_payment_success_message()
|
# -*- coding: utf-8 -*-
#
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import budou
from lxml import html
from mock import MagicMock
import unittest
DEFAULT_SENTENCE = u'今日は晴れ。'
DEFAULT_TOKENS = [
{
u'text': {u'content': u'今日', u'beginOffset': 0},
u'dependencyEdge': {u'headTokenIndex': 2, u'label': u'NN'},
u'partOfSpeech': {u'tag': u'NOUN'},
u'lemma': u'今日'
},
{
u'text': {u'content': u'は', u'beginOffset': 2},
u'dependencyEdge': {u'headTokenIndex': 0, u'label': u'PRT'},
u'partOfSpeech': {u'tag': u'PRT'},
u'lemma': u'は'
},
{
u'text': {u'content': u'晴れ', u'beginOffset': 3},
u'dependencyEdge': {u'headTokenIndex': 2, u'label': u'ROOT'},
u'partOfSpeech': {u'tag': u'NOUN'},
u'lemma': u'晴れ'
},
{
u'text': {u'content': u'。', u'beginOffset': 5},
u'dependencyEdge': {u'headTokenIndex': 2, u'label': u'P'},
u'partOfSpeech': {u'tag': u'PUNCT'},
u'lemma': u'。'
}]
class TestBudouMethods(unittest.TestCase):
def setUp(self):
self.parser = budou.Budou(None)
# Mocks external API request.
self.parser._get_annotations = MagicMock(
return_value=DEFAULT_TOKENS)
def tearDown(self):
pass
def test_process(self):
"""Demonstrates standard usage."""
expected_chunks = [
budou.Chunk(u'今日は', u'NOUN', u'NN', True),
budou.Chunk(u'晴れ。', u'NOUN', u'ROOT', False)
]
expected_html_code = (u'<span class="ww">今日は</span>'
u'<span class="ww">晴れ。</span>')
result = self.parser.parse(DEFAULT_SENTENCE, use_cache=False)
self.assertIn(
'chunks', result,
'Processed result should include chunks.')
self.assertIn(
'html_code', result,
'Processed result should include organized html code.')
self.assertEqual(
expected_chunks, result['chunks'],
'Processed result should include expected chunks.')
self.assertEqual(
expected_html_code, result['html_code'],
'Processed result should include expected html code.')
def test_process_with_aria(self):
"""Demonstrates advanced usage considering accessibility."""
expected_chunks = [
budou.Chunk(u'今日は', u'NOUN', u'NN', True),
budou.Chunk(u'晴れ。', u'NOUN', u'ROOT', False)
]
expected_html_code = (
u'<span aria-describedby="parent" class="text-chunk">今日は</span>'
u'<span aria-describedby="parent" class="text-chunk">晴れ。</span>')
result = self.parser.parse(DEFAULT_SENTENCE, {
'aria-describedby': 'parent',
'class': 'text-chunk'
}, use_cache=False)
self.assertIn(
'chunks', result,
'Processed result should include chunks.')
self.assertIn(
'html_code', result,
'Processed result should include organized html code.')
self.assertEqual(
expected_chunks, result['chunks'],
'Processed result should include expected chunks.')
self.assertEqual(
expected_html_code, result['html_code'],
'Processed result should include expected html code.')
def test_preprocess(self):
source = u' a\nb<br> c d'
expected = u'ab c d'
result = self.parser._preprocess(source)
self.assertEqual(
expected, result,
'BR tags, line breaks, and unnecessary spaces should be removed.')
def test_get_source_chunks(self):
expected = [
budou.Chunk(u'今日', u'NOUN', u'NN', True),
budou.Chunk(u'は', u'PRT', u'PRT', False),
budou.Chunk(u'晴れ', u'NOUN', u'ROOT', False),
budou.Chunk(u'。', u'PUNCT', u'P', False),
]
result = self.parser._get_source_chunks(DEFAULT_SENTENCE)
self.assertEqual(
expected, result,
'Input sentence should be processed into source chunks.')
def test_migrate_html(self):
source = u'こ<a>ちらを</a>クリック'
dom = html.fragment_fromstring(source, create_parent='body')
chunks = [
budou.Chunk(u'こちら', u'PRON', u'NSUBJ', True),
budou.Chunk(u'を', u'PRT', u'PRT', False),
budou.Chunk(u'クリック', u'NOUN', u'ROOT', False),
]
expected = [
budou.Chunk(
u'こ<a>ちらを</a>', budou.HTML_POS,
budou.HTML_POS, True),
budou.Chunk(u'クリック', u'NOUN', u'ROOT', False),
]
result = self.parser._migrate_html(chunks, dom)
self.assertEqual(
expected, result,
'The HTML source code should be migrated into the chunk list.')
def test_get_elements_list(self):
source = u'<a>こちら</a>をクリック'
dom = html.fragment_fromstring(source, create_parent='body')
expected = [
budou.Element(u'こちら', 'a', u'<a>こちら</a>', 0)
]
result = self.parser._get_elements_list(dom)
self.assertEqual(
result, expected,
'The input DOM should be processed to an element list.')
def test_spanize(self):
chunks = [
budou.Chunk(u'a', None, None, None),
budou.Chunk(u'b', None, None, None),
budou.Chunk(u'c', None, None, None),
]
attributes = {
'class': 'foo'
}
expected = (
u'<span class="foo">a</span>'
'<span class="foo">b</span>'
'<span class="foo">c</span>')
result = self.parser._spanize(chunks, attributes)
self.assertEqual(
result, expected,
'The chunks should be compiled to a HTML code.')
def test_concatenate_punctuations(self):
chunks = [
budou.Chunk(u'a', None, None, None),
budou.Chunk(u'b', u'PUNCT', None, None),
budou.Chunk(u'c', None, None, None),
]
expected_forward_concat = [
budou.Chunk(u'ab', None, None, None),
budou.Chunk(u'c', None, None, None),
]
result = self.parser._concatenate_punctuations(chunks)
self.assertEqual(
result, expected_forward_concat,
'Punctuation marks should be concatenated backward.')
def test_concatenate_by_label(self):
chunks = [
budou.Chunk(u'a', None, budou.TARGET_LABEL[0], True),
budou.Chunk(u'b', None, budou.TARGET_LABEL[1], False),
budou.Chunk(u'c', None, budou.TARGET_LABEL[2], True),
]
expected_forward_concat = [
budou.Chunk(u'ab', None, budou.TARGET_LABEL[1], False),
budou.Chunk(u'c', None, budou.TARGET_LABEL[2], True),
]
result = self.parser._concatenate_by_label(chunks, True)
self.assertEqual(
result, expected_forward_concat,
'Forward directional chunks should be concatenated to following '
'chunks.')
expected_backward_concat = [
budou.Chunk(u'ab', None, budou.TARGET_LABEL[0], True),
budou.Chunk(u'c', None, budou.TARGET_LABEL[2], True),
]
result = self.parser._concatenate_by_label(chunks, False)
self.assertEqual(
result, expected_backward_concat,
'Backward directional chunks should be concatenated to preceding '
'chunks.')
def test_get_attribute_dict(self):
result = self.parser._get_attribute_dict({})
self.assertEqual(
result, {'class': budou.DEFAULT_CLASS_NAME},
'When attributes is none and classname is not provided, the output '
'should have the default class name in it.')
result = self.parser._get_attribute_dict('foo')
self.assertEqual(
result, {'class': 'foo'},
'When attributes is a string and classname is not provided, the output '
'should have the specified class name in it.')
result = self.parser._get_attribute_dict({'bizz': 'buzz'})
self.assertEqual(
result, {
'bizz': 'buzz',
'class': budou.DEFAULT_CLASS_NAME,
}, 'When attributes is a dictionary but class property is not '
'included, the output should have the default class name in it.')
result = self.parser._get_attribute_dict({'bizz': 'buzz', 'class': 'foo'})
self.assertEqual(
result, {
'bizz': 'buzz',
'class': 'foo',
}, 'When attribute is a dictionary and class property is included, '
'the output should have the specified class name in it.')
result = self.parser._get_attribute_dict({}, 'foo')
self.assertEqual(
result, {'class': 'foo'},
'When attributes is none and classname is provided, the output should '
'have classname as the class name.')
result = self.parser._get_attribute_dict('bar', 'foo')
self.assertEqual(
result, {'class': 'bar'},
'When attributes is a string and classname is provided, the output '
'should use the class property in attributes over classname.')
result = self.parser._get_attribute_dict({'bizz': 'buzz'}, 'foo')
self.assertEqual(
result, {
'bizz': 'buzz',
'class': 'foo',
}, 'When attributes is a dictionary without class property and '
'classname is provided, the output should have classname as the class '
'name.')
result = self.parser._get_attribute_dict(
{'bizz': 'buzz', 'class': 'bar'}, 'foo')
self.assertEqual(
result, {
'bizz': 'buzz',
'class': 'bar',
}, 'When attributes is a dictionary with class property and classname '
'is provided, the output should use the class property in attributes '
'over classname.')
if __name__ == '__main__':
unittest.main()
|
import numpy as np
def Sigmoid(z):
#return 1.0/(1.0 + np.exp(-z))
return np.exp(-np.logaddexp(0,-z))
def SigmoidGradient(z):
return Sigmoid(z)*(1.0-Sigmoid(z));
def InverseSigmoid(a):
z = - np.log(1/np.float64(a) - 1)
return z
def InverseSigmoidGradient(a):
z = InverseSigmoid(a)
return SigmoidGradient(z)
|
# Copyright 2021 MosaicML. All Rights Reserved.
from composer.algorithms.curriculum_learning.curriculum_learning import CurriculumLearning as CurriculumLearning
from composer.algorithms.curriculum_learning.curriculum_learning import \
CurriculumLearningHparams as CurriculumLearningHparams
_name = 'Curriculum Learning'
_class_name = 'CurriculumLearning'
_functional = 'apply_curriculum'
_tldr = 'Using sequence length as a proxy for example difficulty, it warms up the sequence length for a specified duration of training.'
_attribution = '(Li et al, 2021)'
_link = 'https://arxiv.org/abs/2108.06084'
_method_card = ''
|
from time import sleep
for c in range(10, 0, -1):
print(c)
sleep(1)
print('FELIZ ANO NOVO!!')
print('\U0001F386')
print('\U0001F386')
print('\U0001F386')
|
# split into train and test set
from os import listdir
from xml.etree import ElementTree
from numpy import zeros
from numpy import asarray
from mrcnn.utils import Dataset
# class that defines and loads the kangaroo dataset
from constant import classes
from util.xml_parser import extract_boxes
class ImageDataset(Dataset):
# load the dataset definitions
def load_dataset(self, dataset_dir, is_train=True):
# define one class
for i, line in enumerate(classes, 1):
self.add_class("dataset", i, line)
# define data locations
images_dir = dataset_dir + '/images/'
annotations_dir = dataset_dir + '/annots/'
# find all images
for filename in listdir(images_dir):
# extract image id
image_id = filename[:-4]
# skip all images after 150 if we are building the train set
if is_train and (filename in listdir(images_dir)[:15]):
continue
# skip all images before 150 if we are building the test/val set
if not is_train and (filename in listdir(images_dir)[15:]):
continue
img_path = images_dir + filename
ann_path = annotations_dir + image_id + '.xml'
# add to dataset
self.add_image('dataset', image_id=image_id, path=img_path, annotation=ann_path)
# load the masks for an image
def load_mask(self, image_id):
# get details of image
info = self.image_info[image_id]
# define box file location
path = info['annotation']
# load XML
boxes, classes, w, h = extract_boxes(path)
# create one array for all masks, each on a different channel
masks = zeros([h, w, len(boxes)], dtype='uint8')
# create masks
class_ids = list()
for i, (box, class_name) in enumerate(zip(boxes, classes)):
row_s, row_e = box[1], box[3]
col_s, col_e = box[0], box[2]
masks[row_s:row_e, col_s:col_e, i] = 1
class_ids.append(self.class_names.index(class_name))
return masks, asarray(class_ids, dtype='int32')
# load an image reference
def image_reference(self, image_id):
info = self.image_info[image_id]
return info['path']
# if __name__ == '__main__':
# # train set
# train_set = KangarooDataset()
# train_set.load_dataset('dataset', is_train=True)
# train_set.prepare()
# print('Classes: ', train_set.class_names)
# print('Train: %d' % len(train_set.image_ids))
#
# # test/val set
# test_set = KangarooDataset()
# test_set.load_dataset('dataset', is_train=False)
# test_set.prepare()
# print('Test: %d' % len(test_set.image_ids))
# if __name__ == '__main__':
# # train set
# train_set = KangarooDataset()
# train_set.load_dataset('dataset', is_train=True)
# train_set.prepare()
# # enumerate all images in the dataset
# for image_id in train_set.image_ids:
# # load image info
# info = train_set.image_info[image_id]
# # display on the console
# print(info)
# if __name__ == '__main__':
# # Show 9 Photos with Mask
# train_set = KangarooDataset()
# train_set.load_dataset('dataset', is_train=True)
# train_set.prepare()
#
# # load an image
# from matplotlib import pyplot
# for i in range(9):
# # define subplot
# pyplot.subplot(330 + 1 + i)
# # plot raw pixel data
# image = train_set.load_image(i)
# pyplot.imshow(image)
# # plot all masks
# mask, _ = train_set.load_mask(i)
# for j in range(mask.shape[2]):
# pyplot.imshow(mask[:, :, j], cmap='gray', alpha=0.3)
# # show the figure
# pyplot.show()
# if __name__ == '__main__':
# from mrcnn.visualize import display_instances
# from mrcnn.utils import extract_bboxes
# # train set
# train_set = KangarooDataset()
# train_set.load_dataset('dataset', is_train=True)
# train_set.prepare()
# # define image id
# image_id = 1
# # load the image
# image = train_set.load_image(image_id)
# # load the masks and the class ids
# mask, class_ids = train_set.load_mask(image_id)
# # extract bounding boxes from the masks
# bbox = extract_bboxes(mask)
# # display image with masks and bounding boxes
# display_instances(image, bbox, mask, class_ids, train_set.class_names)
|
import string
import math
# Positional inverted index, postings in form:
# <document id, term frequency in document, term positions in document>
class InvertedIndex:
num_documents = 0
inverted_index = {}
# Cached indices of positions last returned by prev/next calls for a term.
prev_cache = {}
next_cache = {}
# Read from a file and build the inverted index
def build_index(self, filename):
f = open(filename, 'r')
# Documents separated by newline
current_document = 1
# Term position within document
current_position = 1
# Read file line by line
for line in f:
# Check if line is only newline.
# If so, new document and reset term position.
if line == "\n":
current_document += 1
current_position = 1
else:
# Read line word by word ignoring whitespace
for word in line.split():
# Strip punctuation and convert to lowercase
word = word.translate(
str.maketrans("", "", string.punctuation))
word = word.lower()
# Case when stripping punctuation leaves the empty string
if word == "":
continue
# First occurrence of the word:
# add an entry in the dictionary
if word not in self.inverted_index:
# <docid, term frequency in doc, occurrences in doc>
self.inverted_index[word] = [
[current_document, 1, [current_position]]]
# Word seen before: add occurrence
else:
postings = self.inverted_index[word]
# Check if first occurrence of this document by
# checking last document posting.
# If so, new posting
if (postings[-1][0] != current_document):
postings += [
[current_document, 1, [current_position]]]
# Same document, increment freq, add occurrence
else:
postings[-1][1] += 1
postings[-1][2] += [current_position]
# Increment current_position.
current_position += 1
self.num_documents = current_document
f.close();
# Returns the first occurrence of term t in the index
def first(self, t):
if t in self.inverted_index:
postings = self.inverted_index[t]
# (docid, position)
return (postings[0][0], postings[0][2][0])
else:
return "infinity"
# Returns the last occurrence of term t in the index
def last(self, t):
if t in self.inverted_index:
postings = self.inverted_index[t]
# (docid, position)
return (postings[-1][0], postings[-1][2][-1])
else:
return "infinity"
# Returns the previous occurrence of term t before position current
# Uses galloping search
def prev(self, t, current):
if t not in self.inverted_index:
return "-infinity"
# Check if current is before the first position in postings,
# thus no previous occurrence exists.
first_position = (
self.inverted_index[t][0][0], self.inverted_index[t][0][2][0])
# first_position >= current
if self.compare_positions(first_position, current) >= 0:
return "-infinity"
# Last position in postings is less than current, return.
last_position = (
self.inverted_index[t][-1][0], self.inverted_index[t][-1][2][-1])
# last_position < current
if self.compare_positions(last_position, current) < 0:
self.prev_cache[t] = self.num_positions(t) - 1
return last_position
# Initialize high after cached position from the last time prev was
# called if >= current, else start at last position in postings
high = self.num_positions(t) - 1
if (t in self.prev_cache and
self.prev_cache[t] < self.num_positions(t) - 1):
cache_position = self.index_to_position(t, self.prev_cache[t] + 1)
# cache_position >= current
if self.compare_positions(cache_position, current) >= 0:
high = self.prev_cache[t] + 1
jump = 1
low = high - jump
# Begin galloping search, increase size of jumps until low
# passes current or end of positions.
if low >= 0:
low_position = self.index_to_position(t, low)
while (low >= 0 and
self.compare_positions(low_position, current) >= 0):
high = low
jump = 2*jump
low = high - jump
if low >= 0:
low_position = self.index_to_position(t, low)
# Jumped past 0, cap at first position
if low < 0:
low = 0
# Binary search interval that current is contained in.
self.prev_cache[t] = self.binary_search(t, low, high, current, False)
return self.index_to_position(t, self.prev_cache[t])
# Returns the next occurrence of term t after position current
# Uses galloping search
def next(self, t, current):
if t not in self.inverted_index:
return "infinity"
# Check if current is past all positions in postings,
# thus no next occurrence exists.
last_position = (
self.inverted_index[t][-1][0], self.inverted_index[t][-1][2][-1])
# last_position <= current
if self.compare_positions(last_position, current) <= 0:
return "infinity"
# First position in postings is greater than current, return.
first_position = (
self.inverted_index[t][0][0], self.inverted_index[t][0][2][0])
# first_position > current
if self.compare_positions(first_position, current) > 0:
self.next_cache[t] = 0
return first_position
# Initialize low before cached position from the last time next was
# called if <= current, else start at first position in postings
low = 0
if t in self.next_cache and self.next_cache[t] > 0:
cache_position = self.index_to_position(t, self.next_cache[t] - 1)
# cache_position <= current
if self.compare_positions(cache_position, current) <= 0:
low = self.next_cache[t] - 1
jump = 1
high = low + jump
# Begin galloping search, increase size of jumps until high
# passes current or end of positions.
high_position = self.index_to_position(t, high)
while (high < self.num_positions(t) and
self.compare_positions(high_position, current) <= 0):
low = high
jump = 2*jump
high = low + jump
high_position = self.index_to_position(t, high)
# Jumped past last position, cap high at last position
if high >= self.num_positions(t):
high = self.num_positions(t) - 1
# Binary search interval that current is contained in.
self.next_cache[t] = self.binary_search(t, low, high, current, True)
return self.index_to_position(t, self.next_cache[t])
# Binary search through postings of term t in the index.
# Returns the next biggest or smallest posting after current
# depending on is_next
def binary_search(self, t, low, high, current, is_next):
# Loop until current is either low or high, or low and high exactly
# surround current.
while high - low > 1:
mid = low + math.floor((high - low)/2)
mid_position = self.index_to_position(t, mid)
# If looking for position bigger than current,
# keep value at high larger than current.
if is_next:
# mid_position <= current
if self.compare_positions(mid_position, current) <= 0:
low = mid
else:
high = mid
# Looking for position smaller than current,
# keep value at low smaller than current.
else:
# mid_position < current
if self.compare_positions(mid_position, current) < 0:
low = mid
else:
high = mid
if is_next:
return high
else:
return low
# Helper function that compares two term positions of form (doc, position)
# pos1 < pos2 == -1
# pos1 == pos2 == 0
# pos1 > pos2 == 1
def compare_positions(self, pos1, pos2):
if pos1 == "infinity":
return 1
if pos1 == "-infinity":
return -1
if pos2 == "infinity":
return -1
if pos2 == "-infinity":
return 1
# pos1's doc is less than pos2's
if pos1[0] < pos2[0]:
return -1
# same documents, check document positions
elif pos1[0] == pos2[0]:
if pos1[1] < pos2[1]:
return -1
elif pos1[1] == pos2[1]:
return 0
else:
return 1
else:
return 1
# Helper function that returns size of a term's total positions in the
# inverted index
def num_positions(self, t):
result = 0
postings = self.inverted_index[t]
for posting in postings:
result += len(posting[2])
return result
# Helper function that takes a term and an index and finds the
# corresponding position in the term's postings, as if all the term's
# document positions were in one list.
def index_to_position(self, t, index):
postings = self.inverted_index[t]
for posting in postings:
positions = posting[2]
# index is contained in this posting's positions
if len(positions) > index:
# (docid, doc_position)
return (posting[0], positions[index])
# index refers to position in a further posting
else:
index -= len(positions)
# Index greater than total positions
return "infinity"
|
#!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
from .base import hash, hash_next
from typing import List, Set, Mapping
from task.util import utils
from tqdm import tqdm
import gzip
def make_hash(emb_path):
vocab = {}
max_tok_len = 0
max_word = None
with gzip.open(emb_path, mode='rt', compresslevel=6) as file:
word_size, dim = [int(i) for i in file.readline().rstrip().split()]
for id, line in tqdm(enumerate(file)):
head, *_ = line.split()
if ' ' in head.strip():
print(head)
continue
hash_id, tlen = hash(head)
vocab[hash_id] = id
if tlen > max_tok_len:
max_tok_len = tlen
max_word = head
print('word vector size = ', len(vocab))
print('{}, token_len={}'.format([w for t, w in utils.replace_entity(max_word)], max_tok_len))
return vocab, max_tok_len
def filter(dataset: List[str], vocab: Mapping[int, int], max_tok_len):
small = {}
for path in dataset:
with open(path) as file:
for line in tqdm(file):
items = line.strip().split()
if 0 < len(items) < 150:
items = [t.rsplit('#', maxsplit=1) for t in items]
tokens = [t[0] for t in items]
tags = [t[1] for t in items]
for s in range(len(tokens)):
hash_id = None
for l in range(min(max_tok_len, len(tokens) - s)):
hash_id = hash_next(hash_id, tokens[s+l])
line_id = vocab.get(hash_id)
if line_id is not None:
small[hash_id] = line_id
return small
def extract(big_emb:str, small_emb: str, small: Mapping[int, int]):
with gzip.open(big_emb, mode='rt', compresslevel=6) as reader,\
gzip.open(small_emb, mode='wt', compresslevel=6) as writer:
word_size, dim = [int(i) for i in reader.readline().rstrip().split()]
writer.write('{} {}\n'.format(len(small), dim))
for lid, line in tqdm(enumerate(reader)):
word, weights = line.rstrip().split(maxsplit=1)
hash_id, tlen = hash(word)
if hash_id in small:
writer.write('{}\t{}\t{}\n'.format(lid, word, weights))
if __name__ == '__main__':
big_path = 'wordvec/Tencent_AILab_ChineseEmbedding.txt.gz'
small_path = 'wordvec/Tencent_AILab_ChineseEmbedding.small.txt.gz'
big_vocab, max_len = make_hash(big_path)
small_vocab = filter(['./pos/data/std.train', './pos/data/std.valid', './pos/data/std.gold'], big_vocab, max_len)
extract(big_path, small_path, small_vocab)
|
from opportunities.api.serializers.opportunity_serializers import (
OpportunitySerializer,
)
from opportunities.models import Opportunity
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
class OpportunityAPIView(generics.ListAPIView):
'''
♻API endpoint that allows opportunity to be viewed.
'''
permission_classes = [IsAuthenticated]
queryset = Opportunity.objects.all().order_by('-created_on')
serializer_class = OpportunitySerializer
class CreateOpportunity(generics.CreateAPIView):
'''
♻API endpoint that allows opportunity to be created.
'''
permission_classes = [IsAuthenticated]
queryset = Opportunity.objects.all()
serializer_class = OpportunitySerializer
class UpdateOpportunity(generics.UpdateAPIView):
'''
♻API endpoint that allows opportunity to be updated.
'''
permission_classes = [IsAuthenticated]
queryset = Opportunity.objects.all()
serializer_class = OpportunitySerializer
class RetrieveOpportunity(generics.RetrieveAPIView):
'''
♻API endpoint that allows opportunity to be retrieved.
'''
permission_classes = [IsAuthenticated]
queryset = Opportunity.objects.all()
serializer_class = OpportunitySerializer
class DestroyOpportunity(generics.DestroyAPIView):
'''
♻API endpoint that allows opportunity to be deleted.
'''
permission_classes = [IsAuthenticated]
queryset = Opportunity.objects.all()
serializer_class = OpportunitySerializer
|
'''-------------------------------------------------------------------------------
Tool Name: CreateInflowFileFromHighResECMWFRunoff
Source Name: CreateInflowFileFromHighResECMWFRunoff.py
Version: ArcGIS 10.3
Author: Alan Snow (adapted from CreateInflowFileFromECMWFRunoff.py)
Description: Creates RAPID inflow file based on the WRF_Hydro land model output
and the weight table previously created.
History: Initial coding - 6/22/2015, version 1.0 (adapted from CreateInflowFileFromECMWFRunoff.py)
-------------------------------------------------------------------------------'''
import csv
import os
import netCDF4 as NET
import numpy as NUM
import re
class CreateInflowFileFromHighResECMWFRunoff(object):
def __init__(self):
"""Define the tool (tool name is the name of the class)."""
self.label = "Create Inflow File From ECMWF Runoff"
self.description = ("Creates RAPID NetCDF input of water inflow "
"based on ECMWF high resoulution runoff results "
"and previously created weight table.")
self.canRunInBackground = False
self.header_wt = ['StreamID', 'area_sqm', 'lon_index', 'lat_index', 'npoints', 'weight', 'Lon', 'Lat']
self.dims_oi = ['lon', 'lat', 'time']
self.vars_oi = ["lon", "lat", "time", "RO"]
self.length_time = {"HighRes": 125}
self.length_time_opt = {"HighRes-1hr": 91, "HighRes-3hr": 49, "HighRes-6hr": 41}
self.errorMessages = ["Missing Variable 'time'",
"Incorrect dimensions in the input ECMWF runoff file.",
"Incorrect variables in the input ECMWF runoff file.",
"Incorrect time variable in the input ECMWF runoff file",
"Incorrect number of columns in the weight table",
"No or incorrect header in the weight table",
"Incorrect sequence of rows in the weight table"]
def dataValidation(self, in_nc):
"""Check the necessary dimensions and variables in the input netcdf data"""
data_nc = NET.Dataset(in_nc)
dims = data_nc.dimensions.keys()
if dims != self.dims_oi:
raise Exception(self.errorMessages[1])
vars = data_nc.variables.keys()
if vars != self.vars_oi:
raise Exception(self.errorMessages[2])
return
def dataIdentify(self, in_nc):
"""Check if the data is Ensemble 1-51 (low resolution) or 52 (high resolution)"""
data_nc = NET.Dataset(in_nc)
name_time = self.vars_oi[2]
time = data_nc.variables[name_time][:]
diff = NUM.unique(NUM.diff(time))
data_nc.close()
time_interval_highres = NUM.array([1.0,3.0,6.0],dtype=float)
if (diff == time_interval_highres).all():
return "HighRes"
else:
return None
def execute(self, in_sorted_nc_files, in_weight_table, out_nc, in_time_interval="6hr"):
"""The source code of the tool."""
''' Read the weight table '''
print "Reading the weight table..."
dict_list = {self.header_wt[0]:[], self.header_wt[1]:[], self.header_wt[2]:[],
self.header_wt[3]:[], self.header_wt[4]:[], self.header_wt[5]:[],
self.header_wt[6]:[], self.header_wt[7]:[]}
streamID = ""
with open(in_weight_table, "rb") as csvfile:
reader = csv.reader(csvfile)
count = 0
for row in reader:
if count == 0:
#check number of columns in the weight table
if len(row) != len(self.header_wt):
raise Exception(self.errorMessages[4])
#check header
if row[1:len(self.header_wt)] != self.header_wt[1:len(self.header_wt)]:
raise Exception(self.errorMessages[5])
streamID = row[0]
count += 1
else:
for i in range(0,8):
dict_list[self.header_wt[i]].append(row[i])
count += 1
size_streamID = len(set(dict_list[self.header_wt[0]]))
size_time = len(in_sorted_nc_files) * 12 + 1
print size_time
# Create output inflow netcdf data
# data_out_nc = NET.Dataset(out_nc, "w") # by default format = "NETCDF4"
data_out_nc = NET.Dataset(out_nc, "w", format = "NETCDF3_CLASSIC")
dim_Time = data_out_nc.createDimension('Time', size_time)
dim_RiverID = data_out_nc.createDimension(streamID, size_streamID)
var_m3_riv = data_out_nc.createVariable('m3_riv', 'f4', ('Time', streamID))
data_temp = NUM.empty(shape = [size_time, size_streamID])
lon_ind_all = [long(i) for i in dict_list[self.header_wt[2]]]
lat_ind_all = [long(j) for j in dict_list[self.header_wt[3]]]
# Obtain a subset of runoff data based on the indices in the weight table
min_lon_ind_all = min(lon_ind_all)
max_lon_ind_all = max(lon_ind_all)
min_lat_ind_all = min(lat_ind_all)
max_lat_ind_all = max(lat_ind_all)
index_pointer = 0
for file_index, in_nc in enumerate(in_sorted_nc_files):
# Validate the netcdf dataset
self.dataValidation(in_nc)
# identify if the input netcdf data is the High Resolution data with three different time intervals
id_data = self.dataIdentify(in_nc)
if id_data is None:
raise Exception(self.errorMessages[3])
''' Read the netcdf dataset'''
data_in_nc = NET.Dataset(in_nc)
time = data_in_nc.variables[self.vars_oi[2]][:]
# Check the size of time variable in the netcdf data
if len(time) != self.length_time[id_data]:
raise Exception(self.errorMessages[3])
'''Calculate water inflows'''
print "Calculating water inflows for", os.path.basename(in_nc), "..."
data_subset_all = data_in_nc.variables[self.vars_oi[3]][:, min_lat_ind_all:max_lat_ind_all+1, min_lon_ind_all:max_lon_ind_all+1]
len_time_subset_all = data_subset_all.shape[0]
len_lat_subset_all = data_subset_all.shape[1]
len_lon_subset_all = data_subset_all.shape[2]
data_subset_all = data_subset_all.reshape(len_time_subset_all, (len_lat_subset_all * len_lon_subset_all))
# compute new indices based on the data_subset_all
index_new = []
for r in range(0,count-1):
ind_lat_orig = lat_ind_all[r]
ind_lon_orig = lon_ind_all[r]
index_new.append((ind_lat_orig - min_lat_ind_all)*len_lon_subset_all + (ind_lon_orig - min_lon_ind_all))
# obtain a new subset of data
data_subset_new = data_subset_all[:,index_new]
# start compute inflow
pointer = 0
for s in range(0, size_streamID):
npoints = int(dict_list[self.header_wt[4]][pointer])
# Check if all npoints points correspond to the same streamID
if len(set(dict_list[self.header_wt[0]][pointer : (pointer + npoints)])) != 1:
print "ROW INDEX", pointer
print "COMID", dict_list[self.header_wt[0]][pointer]
raise Exception(self.errorMessages[2])
area_sqm_npoints = [float(k) for k in dict_list[self.header_wt[1]][pointer : (pointer + npoints)]]
area_sqm_npoints = NUM.array(area_sqm_npoints)
area_sqm_npoints = area_sqm_npoints.reshape(1, npoints)
data_goal = data_subset_new[:, pointer:(pointer + npoints)]
''''IMPORTANT NOTE: runoff variable in ECMWF dataset is cumulative through time'''
if "HighRes" in id_data:
#For data with High Resolution, from Hour 0 to 90 (the first 91 time points) are of 1 hr time interval,
# then from Hour 90 to 144 (19 time points) are of 3 hour time interval, and from Hour 144 to 240 (15 time points)
# are of 6 hour time interval
# get hourly incremental time series for first 12 hours
ro_6hr_b = NUM.subtract(data_goal[1:13,], data_goal[:12,])
if file_index == 0:
# Hour = 0 is a single data point
ro_6hr_a = data_goal[0:1,]
# concatenate all time series
ro_stream = NUM.concatenate([ro_6hr_a, ro_6hr_b]) * area_sqm_npoints
else:
#time zero from last file, so don't need it
ro_stream = ro_6hr_b * area_sqm_npoints
num_data_points = len(ro_stream)
data_temp[index_pointer:index_pointer + num_data_points,s] = ro_stream.sum(axis = 1)
pointer += npoints
index_pointer += num_data_points
'''Write inflow data'''
print "Writing inflow data..."
var_m3_riv[:] = data_temp
# close the input and output netcdf datasets
data_in_nc.close()
data_out_nc.close()
return
if __name__ == "__main__":
calc = CreateInflowFileFromHighResECMWFRunoff()
in_nc_directory='/Users/Alan/Documents/RESEARCH/RAPID/ECMWF/'
nc_files = sorted([os.path.join(in_nc_directory, filename) for filename in os.listdir(in_nc_directory) \
if re.search(r'.*\.52\.205\.runoff\.grib\.runoff\.netcdf', filename, re.IGNORECASE)])
calc.execute(in_sorted_nc_files=nc_files,
in_weight_table='/Users/Alan/Documents/RESEARCH/RAPID/input/nfie_texas_gulf_region/rapid_updated/weight_high_res.csv',
out_nc='/Users/Alan/Documents/RESEARCH/RAPID/input/nfie_texas_gulf_region/rapid_updated/m3_high_res.nc') |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import httpretty
import pytest
from datahub.access.tests import utils
from datahub.access.tests.fixture import conftest
from datahub.access.tests.fixture.access import * # noqa
from datahub.access.tests.fixture.db import ( # noqa
init_access_manager_config,
init_host_config_type,
)
from datahub.access.tests.mock_api.cc import get_biz_location_ok
from datahub.access.tests.modules.collector.conftest import (
common_deploy_success,
common_failure,
common_no_host_config_failure,
)
from datahub.access.tests.utils import post
param = {
"bk_biz_id": 2,
"bk_username": "admin",
"version": "V1.3.0.X",
"deploy_plans": [
{
"system": "linux",
"config": [{"raw_data_id": 123, "deploy_plan_id": [11]}],
"host_list": [{"bk_cloud_id": 1, "ip": "x.x.x.x"}],
}
],
}
@pytest.mark.usefixtures("init_host_config_type")
@pytest.mark.usefixtures("init_access_get_http_resource")
@pytest.mark.usefixtures("init_access_manager_config")
@pytest.mark.django_db
@pytest.mark.skip
def test_push_config_file_error(mocker):
__mock_http_deploy(mocker)
url = "/v3/access/collector/http/deploy/"
res = post(url, param)
assert res["result"]
assert isinstance(res["data"]["task_id"], int)
task_id = res["data"]["task_id"]
url = "/v3/access/collector/check/?task_id=%d" % task_id
res = utils.get(url)
common_failure(res)
@pytest.mark.usefixtures("init_host_config_type")
@pytest.mark.usefixtures("init_access_manager_config")
@pytest.mark.usefixtures("init_access_get_http_resource")
@pytest.mark.usefixtures("init_http_access_raw_data")
@pytest.mark.django_db
@pytest.mark.skip
def test_deploy_exist_raw_data(mocker):
__mock_http_deploy(mocker)
conftest.mock_get_file_result("http_2_mayi22")
url = "/v3/access/collector/http/deploy/"
res = post(url, param)
assert res["result"]
assert isinstance(res["data"]["task_id"], int)
task_id = res["data"]["task_id"]
url = "/v3/access/collector/check/?task_id=%d" % task_id
res = utils.get(url)
common_deploy_success(res)
@pytest.mark.django_db
@pytest.mark.skip
def test_no_host_config():
httpretty.enable()
httpretty.reset()
conftest.mock_user_perm("admin")
conftest.mock_app_perm("bk_dataweb")
conftest.mock_create_data_id()
conftest.mock_get_data_id("http")
url = "/v3/access/collector/http/deploy/"
res = post(url, param)
common_no_host_config_failure(res)
def __mock_http_deploy(mocker):
httpretty.enable()
httpretty.reset()
conftest.mock_user_perm("admin")
conftest.mock_app_perm("bk_dataweb")
conftest.mock_create_data_id()
conftest.mock_get_data_id("http")
conftest.mock_collector_hub_deploy_plan()
conftest.mock_gse_push_file()
conftest.mock_get_task_ip_log_err()
conftest.mock_get_file_result()
conftest.mock_fast_execute_script()
get_biz_location_ok()
mocker.patch(
"common.auth.check_perm",
return_value=True,
)
mocker.patch(
"datahub.access.utils.forms.check_ip_rule",
return_value=True,
)
|
"""Add snapshot model
Revision ID: 2191c871434
Revises: 19168fe64c41
Create Date: 2014-07-17 17:21:42.915797
"""
# revision identifiers, used by Alembic.
revision = '2191c871434'
down_revision = '19168fe64c41'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table(
'snapshot',
sa.Column('id', sa.GUID(), nullable=False),
sa.Column('project_id', sa.GUID(), nullable=False),
sa.Column('build_id', sa.GUID(), nullable=True),
sa.Column('status', sa.Enum(), server_default='0', nullable=False),
sa.ForeignKeyConstraint(['build_id'], ['build.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id')
)
def downgrade():
op.drop_table('snapshot')
|
"""
Title: Projet IPT - Diffraction DI
Description: Ce programme permet de calculer la valeur (couleur)
de chaque point de l'écran. On utlise une méthode
de somme discrète.
"""
#pylint: disable=invalid-name
#=== Importation des modules ===
import numpy as np
def diffraction(Fente):
"""Renvoie une matrice représentant une figure de diffraction.
Fente représentante une matrice également (la matrice de la fente).
"""
n = np.shape(Fente)[0] #Dimension de la matrice de la fente
ecran = np.zeros((n, n)) #Création matrice carrée de taille n
lambdaN = 1/n #Longueur d'onde du faisceau
for i in range(n):
y = (-i + n/2)/n #Ordonnée sur l'écran
for j in range(n):
x = (j - n/2)/n #Abscisse sur l'écran
somme = 0
for iprime in range(n):
yprime = (-iprime + n/2)/n #Ordonnée fente
for jprime in range(n):
xprime = (jprime - n/2)/n #Abscisse fente
#Calcul double intégrale
somme += Fente[iprime][jprime]*np.exp(-(1j)*(2*np.pi*(xprime*x + yprime*y))/(lambdaN))
ecran[i][j] = (np.abs(somme))**2 #Calcul du module au carrée
return ecran
|
"""
Provide implementation of the atomic swap interfaces.
"""
class AtomicSwapInterface:
"""
Implements atomic swap interface.
"""
def get_public_key(self):
"""
Get the public key of atomic swap.
"""
pass
def get(self, swap_id):
"""
Get information about atomic swap by its identifier.
"""
pass
|
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from powersimdata.design.generation.clean_capacity_scaling import (
add_new_capacities_collaborative,
add_new_capacities_independent,
add_shortfall_to_targets,
)
def test_independent_new_capacity():
area_names = ["Pacific", "Atlantic", "Arctic", "Indian"]
# Atlantic tests a 'simple' case
# Pacific tests expected additional curtailment
# Arctic tests external additional clean energy
# Indian tests new capacity solar percentage
targets = pd.DataFrame(
{
"ce_target_fraction": [0.25, 0.3, 0.25, 0.25],
"allowed_resources": [
"solar,wind,geo",
"solar, wind, geo, hydro",
"solar,wind,geo",
"solar, wind, geo",
],
"demand": [2e8, 3e8, 2e8, 2e8],
"solar_percentage": [None, None, None, 0.75],
"external_ce_addl_historical_amount": [0, 0, 1.4e7, 0],
"geo.prev_capacity": [4000, 4500, 4000, 4000],
"geo.prev_cap_factor": [1, 1, 1, 1],
"geo.prev_generation": [8e6, 8.5e6, 8e6, 8e6],
"hydro.prev_capacity": [3900, 4400, 3900, 3900],
"hydro.prev_cap_factor": [1, 1, 1, 1],
"hydro.prev_generation": [7e6, 7.5e6, 7e6, 7e6],
"solar.prev_capacity": [3700, 4200, 3700, 3700],
"solar.prev_cap_factor": [0.25, 0.3, 0.215379, 0.215379],
"solar.prev_generation": [8.1252e6, 1.106784e7, 7e6, 7e6],
"wind.prev_capacity": [3600, 4100, 3600, 3600],
"wind.prev_cap_factor": [0.4, 0.35, 0.347854, 0.347854],
"wind.prev_generation": [1.264896e7, 1.260504e7, 1.1e7, 1.1e7],
},
index=area_names,
)
addl_curtailment = pd.DataFrame(
{
"geo": [0, 0, 0, 0],
"hydro": [0, 0, 0, 0],
"solar": [0.4, 0, 0, 0],
"wind": [0, 0, 0, 0],
},
index=area_names,
)
expected_return = pd.DataFrame(
{
"solar.next_capacity": [
(3700 + 4481.582),
(4200 + 8928.948),
(3700 + 2055.556),
(3700 + 8246.260),
],
"wind.next_capacity": [
(3600 + 4360.459),
(4100 + 8716.354),
(3600 + 2000),
(3600 + 2748.753),
],
"prev_ce_generation": [2.877416e7, 3.967288e7, 2.6e7, 2.6e7],
"ce_shortfall": [2.122584e7, 5.032712e7, 1e7, 2.4e7],
},
index=area_names,
)
targets = add_shortfall_to_targets(targets)
targets = add_new_capacities_independent(
targets, scenario_length=8784, addl_curtailment=addl_curtailment
)
test_columns = [
"prev_ce_generation",
"ce_shortfall",
"solar.next_capacity",
"wind.next_capacity",
]
assert_frame_equal(targets[test_columns], expected_return[test_columns])
@pytest.fixture
def collaborative_test_targets():
targets = pd.DataFrame(
{
"ce_target_fraction": [0.25, 0.4, 0],
"allowed_resources": [
"solar, wind, geo",
"solar, wind, geo, hydro, nuclear",
"polarbears",
],
"demand": [2e8, 3e8, 1e8],
"external_ce_addl_historical_amount": [0, 0, 0],
"geo.prev_capacity": [4000, 4500, 0],
"geo.prev_cap_factor": [1, 1, 0],
"geo.prev_generation": [8e6, 8.5e6, 0],
"hydro.prev_capacity": [3900, 4400, 0],
"hydro.prev_cap_factor": [1, 1, 0],
"hydro.prev_generation": [7e6, 7.5e6, 0],
"nuclear.prev_capacity": [4300, 4300, 0],
"nuclear.prev_cap_factor": [1, 1, 0],
"nuclear.prev_generation": [6.5e6, 6.5e6, 0],
"solar.prev_capacity": [3700, 4200, 5000],
"solar.prev_cap_factor": [0.215379, 0.284608, 0.45],
"solar.prev_generation": [7e6, 1.05e7, (5000 * 0.45 * 8784)],
"wind.prev_capacity": [3600, 4100, 4000],
"wind.prev_cap_factor": [0.347855, 0.319317, 0.5],
"wind.prev_generation": [1.1e7, 1.15e7, (4000 * 0.5 * 8784)],
},
index=["Pacific", "Atlantic", "Arctic"],
)
return targets
def test_collaborative_two_areas_overgeneration(collaborative_test_targets):
targets = collaborative_test_targets.copy()
targets.loc["Pacific", "ce_target_fraction"] = 1e-9
targets.drop("Arctic", inplace=True)
targets = add_shortfall_to_targets(targets)
targets = add_new_capacities_collaborative(targets, scenario_length=8784)
expected_return = pd.DataFrame(
{
"solar.next_capacity": [(3700 + 4578.75), (4200 + 5197.5)],
"wind.next_capacity": [(3600 + 4455), (4100 + 5073.75)],
},
index=["Pacific", "Atlantic"],
)
assert_frame_equal(
targets[["solar.next_capacity", "wind.next_capacity"]],
expected_return[["solar.next_capacity", "wind.next_capacity"]],
)
def test_collaborative_two_areas_addl_curtailment(collaborative_test_targets):
targets = collaborative_test_targets.copy()
targets.drop("Arctic", inplace=True)
targets = add_shortfall_to_targets(targets)
targets = add_new_capacities_collaborative(
targets, scenario_length=8784, addl_curtailment={"solar": 0.07, "wind": 0.13}
)
expected_return = pd.DataFrame(
{
"solar.next_capacity": [(3700 + 10269.18), (4200 + 11656.9)],
"wind.next_capacity": [(3600 + 9991.63), (4100 + 11379.36)],
},
index=["Pacific", "Atlantic"],
)
assert_frame_equal(
targets[["solar.next_capacity", "wind.next_capacity"]],
expected_return[["solar.next_capacity", "wind.next_capacity"]],
)
def test_collaborative_three_areas_one_nonparticipating(collaborative_test_targets):
targets = collaborative_test_targets.copy()
targets = add_shortfall_to_targets(targets)
targets = add_new_capacities_collaborative(targets, scenario_length=8784)
expected_return = pd.DataFrame(
{
"solar.next_capacity": [(3700 + 9203.75), (4200 + 10447.5), 5000],
"wind.next_capacity": [(3600 + 8955), (4100 + 10198.75), 4000],
},
index=["Pacific", "Atlantic", "Arctic"],
)
assert_frame_equal(
targets[["solar.next_capacity", "wind.next_capacity"]],
expected_return[["solar.next_capacity", "wind.next_capacity"]],
)
def test_collaborative_two_areas_addl_external(collaborative_test_targets):
targets = collaborative_test_targets.copy()
targets.drop("Arctic", inplace=True)
targets.loc["Pacific", "external_ce_addl_historical_amount"] = 4e6
targets = add_shortfall_to_targets(targets)
targets = add_new_capacities_collaborative(targets, scenario_length=8784)
expected_return = pd.DataFrame(
{
"solar.next_capacity": [(3700 + 8833.75), (4200 + 10027.5)],
"wind.next_capacity": [(3600 + 8595), (4100 + 9788.75)],
},
index=["Pacific", "Atlantic"],
)
assert_frame_equal(
targets[["solar.next_capacity", "wind.next_capacity"]],
expected_return[["solar.next_capacity", "wind.next_capacity"]],
)
|
from ml.getDemand import current_day
def retrain_model(array_input):
day = current_day()
import numpy as np
import pandas as pd
import pickle
df = pd.read_csv("ml/inventory.csv")
df = pd.DataFrame(df)
df = df[['ProductID', 'PriceReg', 'DayoftheYear', 'ItemCount']]
# array_input = [[1,23,300,100],[2,34,200,122],[3,32,400,300],[4,67,233,100],[5,98,211,400]]
df2 = pd.DataFrame(array_input)
df2.columns = ['ProductID', 'PriceReg', 'DayoftheYear', 'ItemCount']
df = df.append(df2)
from sklearn import linear_model
regr = linear_model.LinearRegression()
train_x = np.asanyarray(df[['ProductID', 'PriceReg', 'DayoftheYear']])
train_y = np.asanyarray(df[['ItemCount']])
regr.fit(train_x, train_y)
filename = 'finalized_model.sav'
pickle.dump(regr, open(filename, 'wb'))
df.to_csv("ml/inventory.csv")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.