hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
501ab238dad07fef93d094d64c69011390603b6e | 669 | py | Python | cntr/service/receive.py | ethan-iai/tour-recmd | 321ee528b748900eff9d22b176cdcd339fb60681 | [
"MIT"
] | 2 | 2021-08-01T06:39:43.000Z | 2021-08-01T06:39:58.000Z | cntr/service/receive.py | ethan-iai/tour-recmd | 321ee528b748900eff9d22b176cdcd339fb60681 | [
"MIT"
] | 1 | 2021-06-08T11:24:30.000Z | 2021-06-09T07:47:26.000Z | cntr/service/receive.py | ethan-iai/tour-recmd | 321ee528b748900eff9d22b176cdcd339fb60681 | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
from cntr.service.base import TextMsg, ImageMsg, LocationMsg, VoiceMsg
class ReceiveHandler(object):
def __init__(self):
self._type_class_map = {
'text' : TextMsg,
'image' : ImageMsg,
'location': LocationMsg,
'voice' : VoiceMsg,
}
def parse_xml(self, web_data):
if len(web_data) == 0:
return None, None
xmlData = ET.fromstring(web_data)
msg_type = xmlData.find('MsgType').text
try:
return msg_type, self._type_class_map[msg_type](xmlData)
except KeyError:
return None, None
| 27.875 | 70 | 0.584454 | 560 | 0.83707 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.058296 |
501bb0b84ecbd8db236b1e1a8fc662a43d76ee9f | 1,081 | py | Python | algorithms/allergies/__init__.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 3 | 2017-05-02T10:28:13.000Z | 2019-02-06T09:10:11.000Z | algorithms/allergies/__init__.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 2 | 2017-06-21T20:39:14.000Z | 2020-02-25T10:28:57.000Z | algorithms/allergies/__init__.py | JASTYN/pythonmaster | 46638ab09d28b65ce5431cd0759fe6df272fb85d | [
"Apache-2.0",
"MIT"
] | 2 | 2016-07-29T04:35:22.000Z | 2017-01-18T17:05:36.000Z | class Allergies(object):
ALLERGY_SCORES = {
'eggs': 1,
'peanuts': 2,
'shellfish': 4,
'strawberries': 8,
'tomatoes': 16,
'chocolate': 32,
'pollen': 64,
'cats': 128
}
def __init__(self, score):
if score is None or not isinstance(score, int):
raise TypeError("Score must be an integer")
self.score = score
def is_allergic_to(self, allergen):
"""
Checks if Tom is allergic to this particular allergen. Does a bitwise AND to perform the check
:param allergen: the allergen to check for
:return: True/False if Tom is allergic
:rtype: bool
"""
return self.ALLERGY_SCORES[allergen] & self.score
def allergies(self):
"""
Sorts the list of allergies in alphabetic order and returns them
:return: a sorted list of all the allergies
:rtype: list
"""
return sorted(list(allergy for allergy in self.ALLERGY_SCORES if
self.is_allergic_to(allergy)))
| 30.885714 | 102 | 0.576318 | 1,080 | 0.999075 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.462535 |
501c3280eed3f39c1378cb20b8407ca47e64ad8a | 2,309 | py | Python | pages/data_upload.py | irzaip/selevaporum | 05754f2a8152185f550e1135feb94fdc85e4046c | [
"MIT"
] | null | null | null | pages/data_upload.py | irzaip/selevaporum | 05754f2a8152185f550e1135feb94fdc85e4046c | [
"MIT"
] | null | null | null | pages/data_upload.py | irzaip/selevaporum | 05754f2a8152185f550e1135feb94fdc85e4046c | [
"MIT"
] | null | null | null | import collections
from numpy.core.defchararray import lower
import streamlit as st
import numpy as np
import pandas as pd
from pages import utils
def app():
st.title("Data Storyteller Application")
st.markdown("## Data Upload")
# Upload the dataset and save as csv
st.markdown("### Upload a csv file for analysis.")
st.write("\n")
# Code to read a single file
uploaded_file = st.file_uploader("Choose a file", type = ['csv', 'xlsx'])
global data
if uploaded_file is not None:
try:
data = pd.read_csv(uploaded_file)
except Exception as e:
print(e)
data = pd.read_excel(uploaded_file)
st.set_option('deprecation.showfileUploaderEncoding', False)
''' Load the data and save the columns with categories as a dataframe.
This section also allows changes in the numerical and categorical columns. '''
if st.button("Load Data"):
# Raw data
st.dataframe(data)
data.to_csv('data/main_data.csv', index=False)
# Collect the categorical and numerical columns
numeric_cols = data.select_dtypes(include=np.number).columns.tolist()
categorical_cols = list(set(list(data.columns)) - set(numeric_cols))
# Save the columns as a dataframe or dictionary
columns = []
# Iterate through the numerical and categorical columns and save in columns
columns = utils.genMetaData(data)
# Save the columns as a dataframe with categories
# Here column_name is the name of the field and the type is whether it's numerical or categorical
columns_df = pd.DataFrame(columns, columns = ['column_name', 'type'])
columns_df.to_csv('data/metadata/column_type_desc.csv', index = False)
# Display columns
st.markdown("**Column Name**-**Type**")
for i in range(columns_df.shape[0]):
st.write(f"{i+1}. **{columns_df.iloc[i]['column_name']}** - {columns_df.iloc[i]['type']}")
st.markdown("""The above are the automated column types detected by the application in the data.
In case you wish to change the column types, head over to the **Column Change** section. """) | 39.810345 | 106 | 0.632308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,105 | 0.478562 |
501c4eef87e41e664070910134642c391a29e880 | 832 | py | Python | uso_red/migrations/0001_initial.py | joelsegoviacrespo/control_aforo_migrado | be90d1d45a20f735e7ef20449c4ab91ca05b5d85 | [
"MIT"
] | null | null | null | uso_red/migrations/0001_initial.py | joelsegoviacrespo/control_aforo_migrado | be90d1d45a20f735e7ef20449c4ab91ca05b5d85 | [
"MIT"
] | null | null | null | uso_red/migrations/0001_initial.py | joelsegoviacrespo/control_aforo_migrado | be90d1d45a20f735e7ef20449c4ab91ca05b5d85 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.13 on 2020-11-26 03:26
from django.db import migrations, models
import djongo.models.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='UsoRed',
fields=[
('_id', djongo.models.fields.ObjectIdField(auto_created=True, primary_key=True, serialize=False)),
('fecha', models.DateTimeField(blank=True, null=True)),
('enviadosGB', models.FloatField(blank=True, default=0.0)),
('recibidosGB', models.FloatField(blank=True, default=0.0)),
('tipo_red', models.CharField(default='', max_length=255)),
],
options={
'db_table': 'uso_red',
},
),
]
| 28.689655 | 114 | 0.564904 | 710 | 0.853365 | 0 | 0 | 0 | 0 | 0 | 0 | 124 | 0.149038 |
501d0008e868acaa4fedee6bdf54ba6f0c7c1b8d | 5,438 | py | Python | sweep_builder/scorable.py | panoramichq/data-collection-fb | 550b90a303c880ae8c3dfd2801dc4f991a969f89 | [
"MIT"
] | null | null | null | sweep_builder/scorable.py | panoramichq/data-collection-fb | 550b90a303c880ae8c3dfd2801dc4f991a969f89 | [
"MIT"
] | null | null | null | sweep_builder/scorable.py | panoramichq/data-collection-fb | 550b90a303c880ae8c3dfd2801dc4f991a969f89 | [
"MIT"
] | null | null | null | import logging
from collections import defaultdict
from typing import Iterable, Generator, Optional
from pynamodb.exceptions import DoesNotExist
from common.enums.entity import Entity
from common.enums.jobtype import detect_job_type
from config.application import PERMANENTLY_FAILING_JOB_THRESHOLD
from config.jobs import FAILS_IN_ROW_BREAKDOWN_LIMIT, TASK_BREAKDOWN_ENABLED
from common.enums.failure_bucket import FailureBucket
from common.measurement import Measure
from common.store.jobreport import JobReport
from common.id_tools import generate_id
from common.job_signature import JobSignature
from sweep_builder.data_containers.expectation_claim import ExpectationClaim
from sweep_builder.data_containers.scorable_claim import ScorableClaim
from sweep_builder.account_cache import AccountCache
logger = logging.getLogger(__name__)
def _fetch_job_report(job_id: str) -> Optional[JobReport]:
"""Retrieve job report from job report table (cached)."""
try:
report = JobReport.get(job_id)
if report.fails_in_row and report.fails_in_row >= PERMANENTLY_FAILING_JOB_THRESHOLD:
Measure.counter('permanently_failing_job').increment()
logger.warning(
f'[permanently-failing-job] Job with id {job_id} failed {report.fails_in_row}' f' times in a row.'
)
return report
except DoesNotExist:
return None
def generate_child_claims(claim: ExpectationClaim) -> Generator[ExpectationClaim, None, None]:
for child_entity_node in claim.entity_hierarchy.children:
yield ExpectationClaim(
child_entity_node.entity_id,
child_entity_node.entity_type,
claim.report_type,
claim.report_variant,
JobSignature(
generate_id(
ad_account_id=claim.ad_account_id,
range_start=claim.range_start,
report_type=claim.report_type,
report_variant=claim.report_variant,
entity_id=child_entity_node.entity_id,
entity_type=child_entity_node.entity_type,
)
),
ad_account_id=claim.ad_account_id,
timezone=claim.timezone,
entity_hierarchy=child_entity_node,
range_start=claim.range_start,
)
def prefer_job_breakdown(report: JobReport) -> bool:
"""Decide if signature should be used based on last report."""
# this is obvious and one of THE reasons for task breakdown
if report.last_failure_bucket == FailureBucket.TooLarge:
return True
# there is another THE reason for breakdown but we don't capture that error yet
# When we kill Celery tasks a special "Stop Oozer" exception. Let' catch that one and record on jobs
# so we know which ones to break down on next sweep.
# need to fail n-times in a row
if report.fails_in_row is not None and report.fails_in_row > FAILS_IN_ROW_BREAKDOWN_LIMIT:
return True
return False
def generate_scorable(claim: ExpectationClaim) -> Generator[ScorableClaim, None, None]:
"""Select job signature for single expectation claim."""
last_report = _fetch_job_report(claim.job_id)
if claim.ad_account_id and claim.entity_type == Entity.AdAccount:
refresh_if_older_than = AccountCache.get_refresh_if_older_than(claim.ad_account_id)
_reset = (
refresh_if_older_than and
last_report and
last_report.last_success_dt and
last_report.last_success_dt < refresh_if_older_than
)
if _reset:
last_report = None
_prefer_breakdown = (
TASK_BREAKDOWN_ENABLED and
claim.is_divisible and
last_report and
prefer_job_breakdown(last_report)
)
if not _prefer_breakdown:
yield ScorableClaim(
claim.entity_id,
claim.entity_type,
claim.report_type,
claim.report_variant,
claim.job_signature,
last_report,
ad_account_id=claim.ad_account_id,
timezone=claim.timezone,
range_start=claim.range_start,
)
return
logger.warning(f'Performing task breakdown for job_id: {claim.job_id}')
Measure.increment(
f'{__name__}.{generate_scorable.__name__}.task_broken_down',
tags={'ad_account_id': claim.ad_account_id, 'entity_type': claim.entity_type},
)(1)
# break down into smaller jobs recursively
for child_claim in generate_child_claims(claim):
yield from generate_scorable(child_claim)
def iter_scorable(claims: Iterable[ExpectationClaim]) -> Generator[ScorableClaim, None, None]:
"""Select signature for each expectation claim based on job history."""
histogram_counter = defaultdict(int)
for claim in claims:
for scorable_claim in generate_scorable(claim):
job_type = detect_job_type(claim.report_type, claim.entity_type)
histogram_counter[(claim.ad_account_id, claim.entity_type, job_type)] += 1
yield scorable_claim
for ((ad_account_id, entity_type, job_type), count) in histogram_counter.items():
Measure.histogram(
f'{__name__}.{iter_scorable.__name__}.scorable_claims_per_expectation_claim',
tags={'ad_account_id': ad_account_id, 'entity_type': entity_type, 'job_type': job_type},
)(count)
| 38.295775 | 114 | 0.696396 | 0 | 0 | 3,348 | 0.615668 | 0 | 0 | 0 | 0 | 987 | 0.181501 |
501dcb0c9d559197f9d0137f85f4646aa314aebc | 1,782 | py | Python | import_em.py | kaija/taiwan_stockloader | 637244c3b0bc96093cc5a7b3df093a829f9e3c2d | [
"MIT"
] | 2 | 2015-06-13T09:17:46.000Z | 2015-10-25T15:31:33.000Z | import_em.py | kaija/taiwan_stockloader | 637244c3b0bc96093cc5a7b3df093a829f9e3c2d | [
"MIT"
] | null | null | null | import_em.py | kaija/taiwan_stockloader | 637244c3b0bc96093cc5a7b3df093a829f9e3c2d | [
"MIT"
] | 3 | 2016-02-01T07:36:55.000Z | 2018-08-03T12:22:20.000Z | #!/usr/bin/python
import datetime
import httplib
import urllib
import redis
import json
from datetime import timedelta
#now = datetime.datetime.now();
#today = now.strftime('%Y-%m-%d')
#print today
rdb = redis.Redis('localhost')
def rv(value):
out = ""
for num in value.strip().split(","):
out+=num
return out
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def convfloat(value):
try:
return float(value)
except ValueError:
return -1
def convint(value):
try:
return int(value)
except ValueError:
return 0
def dump(key, value):
print key
print json.dumps(value)
def save2redis(key, value):
old = rdb.get("TWE" + key)
if old is None:
val = []
val.append(value)
rdb.set("TWE"+key ,json.dumps(val))
else:
l = json.loads(old)
l.append(value)
rdb.set("TWE"+key ,json.dumps(l))
today = datetime.date.today()
one_day = timedelta(days=1);
start_day = datetime.date(2007, 7, 1);
#start_day = datetime.date(2015, 5, 14);
print "Import from " + start_day.strftime("%Y-%m-%d") + " to " + today.strftime("%Y-%m-%d")
dl_date = start_day
stocks = {}
dl_date = start_day
print "Start merge history"
while dl_date < today:
file_name = "emerging/" + dl_date.strftime("%Y%m%d") + ".csv"
f = open(file_name, 'r')
print "open " + file_name
lines = f.readlines()
for line in lines:
r = line.split('","')
if len(r) == 17:
head = r[0].split("\"")
sid = head[1].strip(" ")
obj = {"volume": convint(rv(r[8])), "open": convfloat(r[4]), "high": convfloat(r[5]), "low": convfloat(r[6]), "val": convfloat(r[2]), "date": dl_date.strftime("%Y-%m-%d"), "avg": convfloat(r[7]), "buyPrice": convfloat(r[11]), "salePrice": convfloat(r[12])}
#dump(sid, obj)
save2redis(sid, obj)
dl_date += one_day
| 20.25 | 259 | 0.64422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 359 | 0.201459 |
501e238ecce4d14dde53565c91651d0fe9f9fbc1 | 5,453 | py | Python | LaserController/InterfaceGUI.py | ColdMatter/PhotonBEC | c6bcf9bdefd267c8adde0d299cf5920b010c5022 | [
"MIT"
] | null | null | null | LaserController/InterfaceGUI.py | ColdMatter/PhotonBEC | c6bcf9bdefd267c8adde0d299cf5920b010c5022 | [
"MIT"
] | null | null | null | LaserController/InterfaceGUI.py | ColdMatter/PhotonBEC | c6bcf9bdefd267c8adde0d299cf5920b010c5022 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'InterfaceGUI.ui'
#
# Created: Mon Mar 11 15:56:47 2013
# by: PyQt4 UI code generator 4.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtWidgets
try:
from PyQt5 import QtCore, QtGui
except:
from PySide import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(240, 200)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.groupBox.setGeometry(QtCore.QRect(0, 0, 241, 381))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.powerSlider = QtWidgets.QSlider(self.groupBox)
self.powerSlider.setGeometry(QtCore.QRect(0, 150, 141, 29))
self.powerSlider.setMaximum(2400)
self.powerSlider.setOrientation(QtCore.Qt.Horizontal)
self.powerSlider.setObjectName(_fromUtf8("powerSlider"))
self.enable_checkBox = QtWidgets.QCheckBox(self.groupBox)
self.enable_checkBox.setGeometry(QtCore.QRect(50, 0, 101, 22))
self.enable_checkBox.setObjectName(_fromUtf8("enable_checkBox"))
self.power_timer_checkBox = QtWidgets.QCheckBox(self.groupBox)
self.power_timer_checkBox.setGeometry(QtCore.QRect(155, 0, 101, 22))
self.power_timer_checkBox.setObjectName(_fromUtf8("timer_checkBox"))
self.label_2 = QtWidgets.QLabel(self.groupBox)
self.label_2.setGeometry(QtCore.QRect(0, 50, 101, 17))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_3 = QtWidgets.QLabel(self.groupBox)
self.label_3.setGeometry(QtCore.QRect(189, 50, 51, 20))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.label_4 = QtWidgets.QLabel(self.groupBox)
self.label_4.setGeometry(QtCore.QRect(190, 100, 31, 20))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.label_6 = QtWidgets.QLabel(self.groupBox)
self.label_6.setGeometry(QtCore.QRect(0, 130, 101, 17))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.currentLCD = QtWidgets.QLCDNumber(self.groupBox)
self.currentLCD.setGeometry(QtCore.QRect(0, 70, 181, 51))
self.currentLCD.setObjectName(_fromUtf8("currentLCD"))
self.setLCD = QtWidgets.QLCDNumber(self.groupBox)
self.setLCD.setGeometry(QtCore.QRect(190, 70, 51, 23))
self.setLCD.setNumDigits(4)
self.setLCD.setSegmentStyle(QtWidgets.QLCDNumber.Flat)
self.setLCD.setObjectName(_fromUtf8("setLCD"))
self.setText = QtWidgets.QLineEdit(self.groupBox)
self.setText.setGeometry(QtCore.QRect(160, 150, 51, 27))
self.setText.setObjectName(_fromUtf8("setText"))
self.setTextButton = QtWidgets.QPushButton(self.groupBox)
self.setTextButton.setGeometry(QtCore.QRect(210, 150, 31, 27))
self.setTextButton.setObjectName(_fromUtf8("setTextButton"))
self.getPowerPushButton = QtWidgets.QPushButton(self.groupBox)
self.getPowerPushButton.setGeometry(QtCore.QRect(100, 40, 61, 27))
self.getPowerPushButton.setObjectName(_fromUtf8("getPowerPushButton"))
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 240, 23))
self.menubar.setObjectName(_fromUtf8("menubar"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
screen = QtWidgets.QDesktopWidget().screenGeometry()
mysize = MainWindow.geometry()
MainWindow.move(screen.width() - mysize.width() - 15, 0)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtWidgets.QApplication.translate("MainWindow", "LaserController", None))
self.groupBox.setTitle(QtWidgets.QApplication.translate("MainWindow", "Power", None))
self.enable_checkBox.setText(QtWidgets.QApplication.translate("MainWindow", "Enable Output", None))
self.label_2.setText(QtWidgets.QApplication.translate("MainWindow", "Current Value", None))
self.label_3.setText(QtWidgets.QApplication.translate("MainWindow", "Set Value", None))
self.label_4.setText(QtWidgets.QApplication.translate("MainWindow", "mW", None))
self.label_6.setText(QtWidgets.QApplication.translate("MainWindow", "Controls", None))
self.setTextButton.setText(QtWidgets.QApplication.translate("MainWindow", "Set", None))
self.getPowerPushButton.setText(QtWidgets.QApplication.translate("MainWindow", "GetPower", None))
self.power_timer_checkBox.setText(QtWidgets.QApplication.translate("MainWindow", "Poll Power", None))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 47.417391 | 109 | 0.708601 | 4,768 | 0.874381 | 0 | 0 | 0 | 0 | 0 | 0 | 663 | 0.121584 |
501f4dae72eb2773eebea6e00006189d15c72360 | 302 | py | Python | src/sportsdata/nba/teams/washington_wizards.py | OrangeCardinal/sportsdata | e6e182e89c8f8a12ffe18b218a37b8bdb8971e03 | [
"Apache-2.0"
] | null | null | null | src/sportsdata/nba/teams/washington_wizards.py | OrangeCardinal/sportsdata | e6e182e89c8f8a12ffe18b218a37b8bdb8971e03 | [
"Apache-2.0"
] | null | null | null | src/sportsdata/nba/teams/washington_wizards.py | OrangeCardinal/sportsdata | e6e182e89c8f8a12ffe18b218a37b8bdb8971e03 | [
"Apache-2.0"
] | null | null | null | from sports.nba.nba_team import NBA_Team
class WashingtonWizards(NBA_Team):
"""
NBA's Washington Wizards Static Information
"""
full_name = "Washington Wizards"
name = "Wizards"
team_id = 1610612764
def __init__(self):
"""
"""
super().__init__()
| 17.764706 | 47 | 0.612583 | 258 | 0.854305 | 0 | 0 | 0 | 0 | 0 | 0 | 104 | 0.344371 |
501ffd3a5b4084f880ca38c9bb54d4225d927e52 | 1,305 | py | Python | modules/text/sentiment_analysis/senta_cnn/net.py | Steffy-zxf/HubModule | 40b0563f86634714033ab7712a08a58eba81bad1 | [
"Apache-2.0"
] | null | null | null | modules/text/sentiment_analysis/senta_cnn/net.py | Steffy-zxf/HubModule | 40b0563f86634714033ab7712a08a58eba81bad1 | [
"Apache-2.0"
] | null | null | null | modules/text/sentiment_analysis/senta_cnn/net.py | Steffy-zxf/HubModule | 40b0563f86634714033ab7712a08a58eba81bad1 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
import paddle.fluid as fluid
def cnn_net(data,
dict_dim,
emb_dim=128,
hid_dim=128,
hid_dim2=96,
class_dim=2,
win_size=3):
"""
Conv net
"""
# embedding layer
emb = fluid.layers.embedding(
input=data,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(name="@HUB_senta_cnn@embedding_0.w_0"))
# convolution layer
conv_3 = fluid.nets.sequence_conv_pool(
input=emb,
num_filters=hid_dim,
filter_size=win_size,
act="tanh",
pool_type="max",
param_attr=fluid.ParamAttr(name="@HUB_senta_cnn@sequence_conv_0.w_0"),
bias_attr=fluid.ParamAttr(name="@HUB_senta_cnn@sequence_conv_0.b_0"))
# full connect layer
fc_1 = fluid.layers.fc(
input=[conv_3],
size=hid_dim2,
param_attr=fluid.ParamAttr(name="@HUB_senta_cnn@fc_0.w_0"),
bias_attr=fluid.ParamAttr(name="@HUB_senta_cnn@fc_0.b_0"))
# softmax layer
prediction = fluid.layers.fc(
input=[fc_1],
size=class_dim,
act="softmax",
param_attr=fluid.ParamAttr(name="@HUB_senta_cnn@fc_1.w_0"),
bias_attr=fluid.ParamAttr(name="@HUB_senta_cnn@fc_1.b_0"))
return prediction, fc_1
| 28.369565 | 78 | 0.609195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 341 | 0.261303 |
50205455dc5f8bdf7fc11d558fec2bdbb9a250dd | 944 | py | Python | test/test_scripts.py | JulianKarlBauer/orientation_averaging_mean_field | 75acb5ed58aa6a69cec7508d3d45865bbab3ed3c | [
"MIT"
] | null | null | null | test/test_scripts.py | JulianKarlBauer/orientation_averaging_mean_field | 75acb5ed58aa6a69cec7508d3d45865bbab3ed3c | [
"MIT"
] | null | null | null | test/test_scripts.py | JulianKarlBauer/orientation_averaging_mean_field | 75acb5ed58aa6a69cec7508d3d45865bbab3ed3c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import runpy
import os
import pytest
import glob
THIS_FILES_DIR_PATH = os.path.realpath(os.path.dirname(__file__))
def get_paths_of_scripts():
exclude_sub_strings = ["do_not_execute"]
plot_script_paths = glob.glob(
os.path.join(
os.path.dirname(THIS_FILES_DIR_PATH), "docs", "source", "notebooks", "*.py"
)
)
plot_script_paths_sorted = sorted(plot_script_paths)
plot_script_paths_sorted_reduced = [
p
for p in plot_script_paths_sorted
if not any(sub in p for sub in exclude_sub_strings)
]
return plot_script_paths_sorted_reduced
class Test_scripts:
@pytest.mark.parametrize(
"path_script",
get_paths_of_scripts(),
)
def test_execute_scripts(self, path_script):
print(f"Execut script:\n{path_script}")
runpy.run_path(path_script, init_globals={}, run_name="__main__")
| 25.513514 | 87 | 0.680085 | 282 | 0.298729 | 0 | 0 | 258 | 0.273305 | 0 | 0 | 146 | 0.154661 |
50225dc303362b68cabbf1199f96dcd33b1c08db | 31,316 | py | Python | python/brainvisa/maker/components_definition.py | brainvisa/brainvisa-cmake | 2b4c4c6aae45e036a54d655b064f4d1a2b7b2061 | [
"CECILL-B"
] | null | null | null | python/brainvisa/maker/components_definition.py | brainvisa/brainvisa-cmake | 2b4c4c6aae45e036a54d655b064f4d1a2b7b2061 | [
"CECILL-B"
] | 77 | 2018-10-30T11:28:16.000Z | 2022-02-28T14:21:40.000Z | python/brainvisa/maker/components_definition.py | brainvisa/brainvisa-cmake | 2b4c4c6aae45e036a54d655b064f4d1a2b7b2061 | [
"CECILL-B"
] | 1 | 2019-07-17T14:08:22.000Z | 2019-07-17T14:08:22.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import sys
# groups:
# anatomist: projects needed for anatomist (aims, soma-io and dependencies)
# opensource
# brainvisa: public brainvisa distro
# bvdev: same as brainvisa but excludes restricted access projects
# (such as brainrat-private)
# standard: most useful projects. Includes internal, non-open projects,
# but not restricted ones (such as brainrat-private)
# cea: CEA (Neurospin/MirCen/SHFJ) distro including internal projects
# cati_platform: standard + CATI projects
# all: all projects except those really not useful
components_definition = [
('development', {
'components': [
['brainvisa-cmake', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/brainvisa-cmake.git branch:master','development/brainvisa-cmake/integration'),
'bug_fix': ('git https://github.com/brainvisa/brainvisa-cmake.git branch:master','development/brainvisa-cmake/master'),
'5.0': ('git https://github.com/brainvisa/brainvisa-cmake.git branch:5.0','development/brainvisa-cmake/5.0'),
},
}],
['casa-distro', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/casa-distro.git branch:master','development/casa-distro/integration'),
'bug_fix': ('git https://github.com/brainvisa/casa-distro.git branch:master','development/casa-distro/master'),
'5.0': ('git https://github.com/brainvisa/casa-distro.git branch:brainvisa-5.0','development/casa-distro/5.0'),
},
'build_model': 'pure_python',
}],
],
}),
('communication', {
'components': [
['web', {
'groups': ['all'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/brainvisa-commu/web.git branch:integration','communication/web/trunk'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/brainvisa-commu/web.git branch:master','communication/web/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/brainvisa-commu/web.git branch:master','communication/web/5.0'),
},
}],
],
}),
('brainvisa-share', {
'components': [
['brainvisa-share', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/brainvisa-share.git branch:master','brainvisa-share/integration'),
'bug_fix': ('git https://github.com/brainvisa/brainvisa-share.git branch:master','brainvisa-share/master'),
'5.0': ('git https://github.com/brainvisa/brainvisa-share.git branch:5.0','brainvisa-share/5.0'),
},
}],
],
}),
('soma', {
'description': 'Set of lower-level libraries for neuroimaging processing infrastructure',
'components': [
['soma-base', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/populse/soma-base.git branch:master','soma/soma-base/integration'),
'bug_fix': ('git https://github.com/populse/soma-base.git branch:master','soma/soma-base/master'),
'5.0': ('git https://github.com/populse/soma-base.git branch:5.0','soma/soma-base/5.0'),
},
}],
['soma-io', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/soma-io.git branch:master','soma/soma-io/integration'),
'bug_fix': ('git https://github.com/brainvisa/soma-io.git branch:master','soma/soma-io/master'),
'5.0': ('git https://github.com/brainvisa/soma-io.git branch:5.0','soma/soma-io/5.0'),
},
}],
['soma-workflow', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/populse/soma-workflow.git branch:master','soma/soma-workflow/integration'),
'bug_fix': ('git https://github.com/populse/soma-workflow.git default:master','soma/soma-workflow/master'),
'5.0': ('git https://github.com/populse/soma-workflow.git branch:brainvisa-5.0','soma/soma-workflow/5.0'),
},
}],
],
}),
('populse', {
'components': [
['capsul', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/populse/capsul.git branch:master','capsul/integration'),
'bug_fix': ('git https://github.com/populse/capsul.git default:master','capsul/master'),
'5.0': ('git https://github.com/populse/capsul.git branch:brainvisa-5.0','capsul/5.0'),
},
'build_model': 'pure_python',
}],
['populse_db', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/populse/populse_db.git default:master','populse/populse_db/integration'),
'bug_fix': ('git https://github.com/populse/populse_db.git default:master','populse/populse_db/master'),
'5.0': ('git https://github.com/populse/populse_db.git branch:brainvisa-5.0','populse/populse_db/5.0'),
},
'build_model': 'pure_python',
}],
],
}),
('aims', {
'description': '3D/4D neuroimaging data manipulation and processing library and commands. Includes C++ libraries, command lines, and a Python API.',
'components': [
['aims-free', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/aims-free.git branch:master','aims/aims-free/integration'),
'bug_fix': ('git https://github.com/brainvisa/aims-free.git branch:master','aims/aims-free/master'),
'5.0': ('git https://github.com/brainvisa/aims-free.git branch:5.0','aims/aims-free/5.0'),
},
}],
['aims-gpl', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/aims-gpl.git branch:master','aims/aims-gpl/integration'),
'bug_fix': ('git https://github.com/brainvisa/aims-gpl.git branch:master','aims/aims-gpl/master'),
'5.0': ('git https://github.com/brainvisa/aims-gpl.git branch:5.0','aims/aims-gpl/5.0'),
},
}],
['aims-til', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'5.0': ('git https://github.com/brainvisa/aims-til.git branch:5.0','aims/aims-til/5.0'),
},
}],
],
}),
('anatomist', {
'description': '3D/4D neuroimaging data viewer. Modular and versatile, Anatomist can display any kind of neuroimaging data (3D/4D images, meshes and textures, fiber tracts, and structured sets of objects such as cortical sulci), in an arbitrary number of views. Allows C++ and Python programming, both for plugins add-ons, as well as complete custom graphical applications design.',
'components': [
['anatomist-free', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/anatomist-free.git branch:master','anatomist/anatomist-free/integration'),
'bug_fix': ('git https://github.com/brainvisa/anatomist-free.git branch:master','anatomist/anatomist-free/master'),
'5.0': ('git https://github.com/brainvisa/anatomist-free.git branch:5.0','anatomist/anatomist-free/5.0'),
},
}],
['anatomist-gpl', {
'groups': ['all', 'anatomist', 'opensource', 'brainvisa',
'bvdev', 'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/anatomist-gpl.git branch:master','anatomist/anatomist-gpl/integration'),
'bug_fix': ('git https://github.com/brainvisa/anatomist-gpl.git branch:master','anatomist/anatomist-gpl/master'),
'5.0': ('git https://github.com/brainvisa/anatomist-gpl.git branch:5.0','anatomist/anatomist-gpl/5.0'),
},
}],
],
}),
('axon', {
'description': 'Axon organizes processing, pipelining, and data management for neuroimaging. It works both as a graphical user interface or batch and programming interfaces, and allows transparent processing distribution on a computing resource.',
'components': [
['axon', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/axon.git branch:master','axon/integration'),
'bug_fix': ('git https://github.com/brainvisa/axon.git branch:master','axon/master'),
'5.0': ('git https://github.com/brainvisa/axon.git branch:5.0','axon/5.0'),
},
}],
],
}),
('brainvisa-spm', {
'description': 'Python module and Axon toolbox for SPM.',
'components': [
['brainvisa-spm', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/brainvisa-spm.git branch:integration','brainvisa-spm/integration'),
'bug_fix': ('git https://github.com/brainvisa/brainvisa-spm.git branch:master','brainvisa-spm/master'),
'5.0': ('git https://github.com/brainvisa/brainvisa-spm.git branch:5.0','brainvisa-spm/5.0'),
},
}],
],
}),
('datamind', {
'description': 'Statistics, data mining, machine learning [OBSOLETE].',
'components': [
['datamind', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'5.0': ('svn https://bioproj.extra.cea.fr/neurosvn/brainvisa/datamind/branches/5.0','datamind/5.0'),
},
}],
],
}),
('highres-cortex', {
'description': 'Process 3D images of the cerebral cortex at a sub-millimetre scale',
'components': [
['highres-cortex', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/neurospin/highres-cortex.git branch:master','highres-cortex/integration'),
'bug_fix': ('git https://github.com/neurospin/highres-cortex.git default:master','highres-cortex/master'),
'5.0': ('git https://github.com/neurospin/highres-cortex.git branch:5.0','highres-cortex/5.0'),
},
}],
],
}),
('morphologist', {
'description': 'Anatomical MRI (T1) analysis toolbox, featuring cortex and sulci segmentation, and sulci analysis tools, by the <a href="http://lnao.fr">LNAO team</a>.',
'components': [
['morphologist-nonfree', {
'groups': ['all', 'brainvisa', 'bvdev', 'standard', 'cea',
'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/morphologist-nonfree.git branch:integration','morphologist/morphologist-nonfree/integration'),
'bug_fix': ('git https://github.com/brainvisa/morphologist-nonfree.git branch:master','morphologist/morphologist-nonfree/master'),
'5.0': ('git https://github.com/brainvisa/morphologist-nonfree.git branch:5.0','morphologist/morphologist-nonfree/5.0'),
},
}],
['morphologist-gpl', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/morphologist-gpl.git branch:integration','morphologist/morphologist-gpl/integration'),
'bug_fix': ('git https://github.com/brainvisa/morphologist-gpl.git branch:master','morphologist/morphologist-gpl/master'),
'5.0': ('git https://github.com/brainvisa/morphologist-gpl.git branch:5.0','morphologist/morphologist-gpl/5.0'),
},
}],
['morphologist-baby', {
'groups': ['all', 'standard', 'cea'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/brainvisa-t1mri/morphologist-baby.git branch:integration','morphologist/morphologist-baby/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/brainvisa-t1mri/morphologist-baby.git branch:master','morphologist/morphologist-baby/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/brainvisa-t1mri/morphologist-baby.git branch:5.0','morphologist/morphologist-baby/5.0'),
},
}],
['tms', {
'groups': ['all'],
'branches': {
},
}],
['sulci-data', {
'groups': [],
'branches': {
'trunk': ('svn https://bioproj.extra.cea.fr/neurosvn/brainvisa/morphologist/sulci-data/trunk','morphologist/sulci-data/trunk'),
'bug_fix': ('svn https://bioproj.extra.cea.fr/neurosvn/brainvisa/morphologist/sulci-data/trunk','morphologist/sulci-data/bug_fix'),
},
}],
['sulci-nonfree', {
'groups': ['all', 'brainvisa', 'bvdev', 'standard', 'cea',
'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/sulci-nonfree.git branch:integration','morphologist/sulci-nonfree/integration'),
'bug_fix': ('git https://github.com/brainvisa/sulci-nonfree.git branch:master','morphologist/sulci-nonfree/master'),
'5.0': ('git https://github.com/brainvisa/sulci-nonfree.git branch:5.0','morphologist/sulci-nonfree/5.0'),
},
}],
['morphologist-ui', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/morphologist.git branch:master', 'morphologist/morphologist-ui/integration'),
'bug_fix': ('git https://github.com/brainvisa/morphologist.git default:master', 'morphologist/morphologist-ui/master'),
'5.0': ('git https://github.com/brainvisa/morphologist.git branch:5.0', 'morphologist/morphologist-ui/5.0'),
},
}],
['morpho-deepsulci', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/morpho-deepsulci.git branch:master', 'morphologist/morpho-deepsulci/integration'),
'bug_fix': ('git https://github.com/brainvisa/morpho-deepsulci.git default:master', 'morphologist/morpho-deepsulci/master'),
'5.0': ('git https://github.com/brainvisa/morpho-deepsulci.git branch:5.0', 'morphologist/morpho-deepsulci/5.0'),
},
}],
],
}),
('brainrat', {
'description': 'Ex vivo 3D reconstruction and analysis toolbox, from the <a href="http://www-dsv.cea.fr/dsv/instituts/institut-d-imagerie-biomedicale-i2bm/services/mircen-mircen/unite-cnrs-ura2210-lmn/fiches-thematiques/traitement-et-analyse-d-images-biomedicales-multimodales-du-cerveau-normal-ou-de-modeles-precliniques-de-maladies-cerebrales">BioPICSEL CEA team</a>. Homepage: <a href="http://brainvisa.info/doc/brainrat-gpl/brainrat_man/en/html/index.html">http://brainvisa.info/doc/brainrat-gpl/brainrat_man/en/html/index.html</a>',
'components': [
['brainrat-gpl', {
'groups': ['all', 'brainvisa', 'cea'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/brainrat-gpl branch:master', 'brainrat/brainrat-gpl/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/brainrat-gpl branch:master', 'brainrat/brainrat-gpl/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/brainrat-gpl branch:5.0', 'brainrat/brainrat-gpl/5.0'),
},
}],
['brainrat-private', {
'groups': ['all', 'brainvisa', 'cea'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/brainrat-private branch:master', 'brainrat/brainrat-private/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/brainrat-private branch:master', 'brainrat/brainrat-private/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/brainrat-private branch:5.0', 'brainrat/brainrat-private/5.0'),
},
}],
['bioprocessing', {
'groups': ['all', 'cea'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/bioprocessing branch:master', 'brainrat/bioprocessing/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/bioprocessing branch:master', 'brainrat/bioprocessing/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/bioprocessing branch:5.0', 'brainrat/bioprocessing/5.0'),
},
}],
['preclinical-imaging-iam', {
'groups': ['all'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/preclinical-imaging-iam branch:master', 'brainrat/preclinical-imaging-iam/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/preclinical-imaging-iam branch:master', 'brainrat/preclinical-imaging-iam/master'),
},
}],
['primatologist-gpl', {
'groups': ['all', 'brainvisa', 'cea'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/primatologist-gpl branch:master', 'brainrat/primatologist-gpl/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/primatologist-gpl branch:master', 'brainrat/primatologist-gpl/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/primatologist-gpl branch:5.0', 'brainrat/primatologist-gpl/5.0'),
},
}],
['3dns-private', {
'groups': ['3dns'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/3dns-private branch:master', 'brainrat/3dns-private/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/3dns-private branch:master', 'brainrat/3dns-private/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/3dns-private branch:5.0', 'brainrat/3dns-private/5.0'),
},
}],
],
}),
('constellation', {
'components': [
['constellation-gpl', {
'groups': ['all', 'cea'],
'branches': {
'trunk': ('git https://github.com/brainvisa/constellation-gpl.git branch:integration','constellation/constellation-gpl/integration'),
'bug_fix': ('git https://github.com/brainvisa/constellation-gpl.git branch:master','constellation/constellation-gpl/master'),
'5.0': ('git https://github.com/brainvisa/constellation-gpl.git branch:5.0','constellation/constellation-gpl/5.0'),
},
}],
['constellation-nonfree', {
'groups': ['all', 'cea'],
'branches': {
'trunk': ('git https://github.com/brainvisa/constellation-nonfree.git branch:integration','constellation/constellation-nonfree/integration'),
'bug_fix': ('git https://github.com/brainvisa/constellation-nonfree.git branch:master','constellation/constellation-nonfree/master'),
'5.0': ('git https://github.com/brainvisa/constellation-nonfree.git branch:5.0','constellation/constellation-nonfree/5.0'),
},
}],
],
}),
('cortical_surface', {
'description': 'Cortex-based surfacic parameterization and analysis toolbox from the <a href="http://www.lsis.org">LSIS team</a>. Homepage: <a href="http://olivier.coulon.perso.esil.univmed.fr/brainvisa.html">http://olivier.coulon.perso.esil.univmed.fr/brainvisa.html</a>.<br/>Also contains the FreeSurfer toolbox for BrainVisa, by the LNAO team.',
'components': [
['cortical_surface-nonfree', {
'groups': ['all', 'brainvisa', 'bvdev', 'standard', 'cea',
'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/cortical_surface-nonfree.git branch:integration','cortical_surface/cortical_surface-nonfree/integration'),
'bug_fix': ('git https://github.com/brainvisa/cortical_surface-nonfree.git branch:master','cortical_surface/cortical_surface-nonfree/master'),
'5.0': ('git https://github.com/brainvisa/cortical_surface-nonfree.git branch:5.0','cortical_surface/cortical_surface-nonfree/5.0'),
},
}],
['cortical_surface-gpl', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/cortical_surface-gpl.git branch:integration','cortical_surface/cortical_surface-gpl/integration'),
'bug_fix': ('git https://github.com/brainvisa/cortical_surface-gpl.git branch:master','cortical_surface/cortical_surface-gpl/master'),
'5.0': ('git https://github.com/brainvisa/cortical_surface-gpl.git branch:5.0','cortical_surface/cortical_surface-gpl/5.0'),
},
}],
['brainvisa_freesurfer', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'trunk': ('git https://github.com/brainvisa/brainvisa_freesurfer.git branch:integration','cortical_surface/brainvisa_freesurfer/integration'),
'bug_fix': ('git https://github.com/brainvisa/brainvisa_freesurfer.git branch:master','cortical_surface/brainvisa_freesurfer/master'),
'5.0': ('git https://github.com/brainvisa/brainvisa_freesurfer.git branch:5.0','cortical_surface/brainvisa_freesurfer/5.0'),
},
}],
],
}),
('nuclear_imaging', {
'components': [
['nuclear_imaging-gpl', {
'groups': ['all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/nuclear_imaging-gpl.git branch:master','nuclear_imaging/nuclear_imaging-gpl/master'),
'5.0': ('git https://github.com/cati-neuroimaging/nuclear_imaging-gpl.git branch:5.0','nuclear_imaging/nuclear_imaging-gpl/5.0'),
},
}],
['nuclear_imaging-nonfree', {
'groups': ['all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/nuclear_imaging-nonfree.git branch:master','nuclear_imaging/nuclear_imaging-nonfree/master'),
'5.0': ('git https://github.com/cati-neuroimaging/nuclear_imaging-nonfree.git branch:5.0','nuclear_imaging/nuclear_imaging-nonfree/5.0'),
},
}],
],
}),
('snapbase', {
'components': [
['snapbase', {
'groups': ['all', 'opensource', 'brainvisa', 'bvdev',
'standard', 'cea', 'cati_platform'],
'branches': {
'5.0': ('svn https://bioproj.extra.cea.fr/neurosvn/brainvisa/snapbase/branches/5.0','snapbase/5.0'),
},
}],
],
}),
('catidb', {
'components': [
['catidb-client', {
'groups': ['cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/catidb-client.git default:main', 'catidb-client'),
},
}],
],
}),
('sacha', {
'components': [
['sacha-nonfree', {
'groups': ['all', 'catidb3_all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/sacha-nonfree.git branch:master', 'sacha-nonfree/master'),
},
}],
['sacha-gpl', {
'groups': ['all', 'catidb3_all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/sacha-gpl.git branch:master', 'sacha-gpl/master'),
},
}],
],
}),
('whasa', {
'components': [
['whasa-nonfree', {
'groups': ['all', 'catidb3_all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/whasa-nonfree.git branch:master', 'whasa-nonfree/master'),
},
}],
['whasa-gpl', { # Experimental branch to propose a new organization
'groups': ['all', 'catidb3_all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/whasa-gpl.git branch:master', 'whasa-gpl/master'),
},
}],
],
}),
('longitudinal_pipelines', {
'components': [
['longitudinal_pipelines', {
'groups': ['all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/longitudinal_pipelines.git branch:master',
'longitudinal_pipelines/master'),
'5.0': ('git https://github.com/cati-neuroimaging/longitudinal_pipelines.git branch:5.0',
'longitudinal_pipelines/5.0'),
},
}],
],
}),
('disco', {
'components': [
['disco', {
'groups': ['all', 'cea'],
'branches': {
'trunk': ('git https://bioproj.extra.cea.fr/git/brainvisa-disco branch:master', 'disco/integration'),
'bug_fix': ('git https://bioproj.extra.cea.fr/git/brainvisa-disco branch:master', 'disco/master'),
'5.0': ('git https://bioproj.extra.cea.fr/git/brainvisa-disco branch:5.0', 'disco/5.0'),
},
}],
],
}),
('qualicati', {
'components': [
['qualicati', {
'groups': ['cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/qualicati.git default:main', 'qualicati'),
},
'build_model': 'pure_python',
}],
],
}),
('fmri', {
'description': 'Functional MRI processing toolboxes.',
'components': [
['rsfmri', {
'groups': ['all', 'cati_platform'],
'branches': {
'bug_fix': ('git https://github.com/cati-neuroimaging/rsfmri.git branch:master','rsfmri/master'),
},
'build_model': 'pure_python',
}],
],
}),
]
customize_components_definition = [os.path.expanduser('~/.brainvisa/components_definition.py')]
if 'BV_MAKER_BUILD' in os.environ:
customize_components_definition.append(os.path.join(os.environ['BV_MAKER_BUILD'], 'components_definition.py'))
for ccd in customize_components_definition:
if os.path.exists(ccd):
with open(ccd) as f:
exec(compile(f.read(), ccd, 'exec'))
# allow to name branches master or bug_fix indistinctly, or integration/trunk
for cgroup in components_definition:
for comp in cgroup[1]['components']:
branches = comp[1]['branches']
if 'bug_fix' in branches and 'master' not in branches:
branches['master'] = branches['bug_fix']
elif 'master' in branches and 'bug_fix' not in branches:
branches['bug_fix'] = branches['master']
if 'trunk' in branches and 'integration' not in branches:
branches['integration'] = branches['trunk']
elif 'integration' in branches and 'trunk' not in branches:
branches['trunk'] = branches['integration']
| 56.938182 | 545 | 0.536243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20,300 | 0.648231 |
5022938aba6c91cfd4336b99f65d9f2b0ba733cd | 977 | py | Python | examples/hashtrees.py | ascribe/transactions | 08f344ce1879152d2a0ba51dda76f11e73c83867 | [
"Apache-2.0"
] | 124 | 2015-05-11T09:51:09.000Z | 2021-11-17T15:38:17.000Z | examples/hashtrees.py | chasealias/transactions | 08f344ce1879152d2a0ba51dda76f11e73c83867 | [
"Apache-2.0"
] | 58 | 2016-02-22T10:08:11.000Z | 2018-10-16T17:34:13.000Z | examples/hashtrees.py | chasealias/transactions | 08f344ce1879152d2a0ba51dda76f11e73c83867 | [
"Apache-2.0"
] | 24 | 2015-05-18T19:16:44.000Z | 2021-05-31T09:51:32.000Z | # -*- coding: utf-8 -*-
"""
Inspired by:
* https://gist.github.com/shirriff/c9fb5d98e6da79d9a772#file-merkle-py
* https://github.com/richardkiss/pycoin
"""
from __future__ import absolute_import, division, unicode_literals
from builtins import range
import binascii
import hashlib
def merkleroot(hashes):
"""
Args:
hashes: reversed binary form of transactions hashes, e.g.:
``binascii.unhexlify(h)[::-1] for h in block['tx']]``
Returns:
merkle root in hexadecimal form
"""
if len(hashes) == 1:
return binascii.hexlify(bytearray(reversed(hashes[0])))
if len(hashes) % 2 == 1:
hashes.append(hashes[-1])
parent_hashes = []
for i in range(0, len(hashes)-1, 2):
first_round_hash = hashlib.sha256(hashes[i] + hashes[i+1]).digest()
second_round_hash = hashlib.sha256(first_round_hash).digest()
parent_hashes.append(second_round_hash)
return merkleroot(parent_hashes)
| 28.735294 | 75 | 0.660184 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.379734 |
50236b02c8e8d9823839d03fe29cb5157a9c7e08 | 1,273 | py | Python | test/test_del_contact_from_group.py | julipavlovich/python_training | 0ca2320b89b1bb40926d7c8b1ecb0278998fa4be | [
"Apache-2.0"
] | null | null | null | test/test_del_contact_from_group.py | julipavlovich/python_training | 0ca2320b89b1bb40926d7c8b1ecb0278998fa4be | [
"Apache-2.0"
] | null | null | null | test/test_del_contact_from_group.py | julipavlovich/python_training | 0ca2320b89b1bb40926d7c8b1ecb0278998fa4be | [
"Apache-2.0"
] | null | null | null | from model.contact import Contact
from model.group import Group
import random
def test_add_contact_in_group(app, db):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(firstname="ADDNewFirstName1", lastname="ADDNewLastName1"))
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
# выбрать группу случайно
old_groups = app.group.get_group_list()
random_group = random.choice(old_groups)
random_group_id = int(random_group.id)
# выбрать контакт в группе
if len(db.get_contacts_in_group(random_group_id)) == 0:
ui_contacts_list = app.contact.get_contact_list()
contact = random.choice(ui_contacts_list)
c_id = int(contact.id)
app.contact.add_contact_in_group(c_id, random_group_id)
contacts_in_group = db.get_contacts_in_group(random_group_id)
random_contact = random.choice(contacts_in_group)
random_contact_id = int(random_contact[0])
old_contacts_in_group = db.get_contacts_in_group(random_group_id)
# удалить его, проверки
app.contact.del_contact_in_group(random_contact_id, random_group_id)
new_contacts_in_group = db.get_contacts_in_group(random_group_id)
assert len(old_contacts_in_group) - 1 == len(new_contacts_in_group)
| 43.896552 | 93 | 0.750196 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.131283 |
50241f1b6c840c3a0e9187ec807790ee1359dfdf | 1,945 | py | Python | test.py | termistotel/CherryPyStuff | 84659f8cd3c2b6f7d17c79c3a0bafdac4ad85a68 | [
"MIT"
] | null | null | null | test.py | termistotel/CherryPyStuff | 84659f8cd3c2b6f7d17c79c3a0bafdac4ad85a68 | [
"MIT"
] | null | null | null | test.py | termistotel/CherryPyStuff | 84659f8cd3c2b6f7d17c79c3a0bafdac4ad85a68 | [
"MIT"
] | null | null | null | import dill as pickle
import cherrypy
homeLocation = '/home/alion/Projekti/cherrypyTest'
rootConfig = {'/': {'log.screen': False,
'log.access_file': homeLocation+'/logs/access_file.log',
'log.error_file': homeLocation+'/logs/error_file.log',
'tools.sessions.on': True,
'tools.sessions.storage_class': cherrypy.lib.sessions.MemcachedSession
# 'tools.sessions.storage_class': cherrypy.lib.sessions.FileSession,
# 'tools.sessions.storage_path': "/some/directory"
}
}
tst = lambda x: x
class Root(object):
@cherrypy.expose
def index(self):
return "<h1>Hello! How are you?"
@cherrypy.expose
def set(self):
cookie = cherrypy.response.cookie
cookie['cookieName'] = 'cookieValue'
cookie['cookieName']['path'] = '/'
cookie['cookieName']['max-age'] = 3600
cookie['cookieName']['version'] = 1
return "<html><body>Hello, I just sent you a cookie</body></html>"
@cherrypy.expose
def read(self):
cookie = cherrypy.request.cookie
res = """<html><body>Hi, you sent me %s cookies.<br />
Here is a list of cookie names/values:<br />""" % len(cookie)
for name in cookie.keys():
res += "name: %s, value: %s<br>" % (name, cookie[name].value)
# for i in cookie['cookieName']:
# print(i)
# print(cookie['cookieName'][i])
return res + "</body></html>"
@cherrypy.expose
def count(self):
if 'count' not in cherrypy.session:
cherrypy.session['count'] = 0
cherrypy.session['count'] += 1
cherrypy.session['testica'] = tst
#print(cherrypy.lib.sessions.MemcachedSession)
return "<h1> Naspamao si stranicu %s puta" % (cherrypy.session['count'])
if __name__ == '__main__':
cherrypy.quickstart(Root(), '/', rootConfig)
| 32.966102 | 84 | 0.582519 | 1,268 | 0.651928 | 0 | 0 | 1,225 | 0.62982 | 0 | 0 | 833 | 0.428278 |
502575823cd86b4b2ecb13d681cb93aec7e91dfa | 202 | py | Python | main/admin.py | ericrobskyhuntley/vialab.mit.edu | 1318d03b8eeb106c1662052e1caa53290e206ae7 | [
"MIT"
] | null | null | null | main/admin.py | ericrobskyhuntley/vialab.mit.edu | 1318d03b8eeb106c1662052e1caa53290e206ae7 | [
"MIT"
] | null | null | null | main/admin.py | ericrobskyhuntley/vialab.mit.edu | 1318d03b8eeb106c1662052e1caa53290e206ae7 | [
"MIT"
] | null | null | null | from django.contrib import admin
from simple_history.admin import SimpleHistoryAdmin
from .models import MainMetadata
# Register your models here.
admin.site.register(MainMetadata, SimpleHistoryAdmin) | 28.857143 | 53 | 0.851485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.138614 |
5025e7ee5d5562ae40919351cc1f7e7d9a8bf87c | 17,839 | py | Python | FEArena/feaapi/api/skills/helper.py | Superbird11/FEArena | 76b32dd9d4fddc62c191225e332560e0e9ed2c9e | [
"MIT"
] | 1 | 2020-12-25T03:33:44.000Z | 2020-12-25T03:33:44.000Z | FEArena/feaapi/api/skills/helper.py | Superbird11/FEArena | 76b32dd9d4fddc62c191225e332560e0e9ed2c9e | [
"MIT"
] | null | null | null | FEArena/feaapi/api/skills/helper.py | Superbird11/FEArena | 76b32dd9d4fddc62c191225e332560e0e9ed2c9e | [
"MIT"
] | null | null | null | from typing import Iterable as Iterable, List, Dict, Callable, Iterator, Set, Optional
from ...models.core.Unit import Unit
from ...models.core.Class import Class
from ...models.core.Weapon import Weapon
from ...models.core.Item import Item
from ...models.core.Skill import Skill
from ...models.play.ActiveArena import ActiveArena
from ...models.play.ActiveUnit import ActiveUnit
from ...models.play.ActiveWeapon import ActiveWeapon
from ...models.play.ActiveItem import ActiveItem
from ...models.build.BuiltUnit import BuiltUnit
from ..calc.combat_data import AttackData, AfterAttackData
from ..calc.combat import CombatData
from .passive import passive
from .dequip import dequip
from .equip import equip
from .use import use
from .before_attack import before_attack
from .after_attack import after_attack
from .before_attacked import before_attacked
from .after_attacked import after_attacked
from .before_combat import before_combat
from .after_combat import after_combat
from .turn_start import turn_start
from .turn_end import turn_end
from .unit_turn_end import unit_turn_end
from .on_build import on_build
def _interleave_by_priority(skill_getter1: Callable[[Skill], Callable],
skills1: Iterable[Skill], args1: Iterable,
skill_getter2: Callable[[Skill], Callable],
skills2: Iterable[Skill], args2: Iterable) -> List[Dict]:
"""
Executes the skills in order of priority, providing the given arguments to each skill call
according to which set it comes from.
:param skill_getter1: a lambda to return the correct skill implementation given a skill, for unit1
:param skill_getter2: a lambda to return the correct skill implementation given a skill, for unit2
:param skills1: first set of skills to execute
:param args1: arguments to give when calling first set of skills
:param skills2: second set of skills to execute
:param args2: arguments to give when calling second set of skills
:return: a list of standard skill outputs conforming to the appropriate subschema
(see api.arena.schemas)
"""
skills1 = iter(sorted(skills1, key=lambda skl: -skl.priority))
skills2 = iter(sorted(skills2, key=lambda skl: -skl.priority))
try:
sk1 = next(skills1)
except StopIteration:
return list(filter(_exists, (skill_getter2(s)(*args2) for s in skills2)))
try:
sk2 = next(skills2)
except StopIteration:
return list(filter(_exists, (skill_getter1(s)(*args1) for s in (sk1, *skills1))))
output = []
while True:
if sk2.priority > sk1.priority:
skill_output = skill_getter2(sk2)(*args2)
if skill_output:
output.append(skill_output)
try:
sk2 = next(skills2)
except StopIteration:
return output + list(filter(_exists, (skill_getter1(s)(*args1) for s in (sk1, *skills1))))
else:
skill_output = skill_getter1(sk1)(*args1)
if skill_output:
output.append(skill_output)
try:
sk1 = next(skills1)
except StopIteration:
return output + list(filter(_exists, (skill_getter2(s)(*args2) for s in (sk2, *skills2))))
def accumulate(
personal: Unit = None,
unit_class: Class = None,
weapons: Iterable[Weapon] = None,
active_weapons: Iterable[ActiveWeapon] = None,
items: Iterable[Item] = None,
active_items: Iterable[ActiveItem] = None,
extra: Iterable[Skill] = None
) -> Iterator[Skill]:
"""
A generator that chains the skill lists of all prospective sources of skills, yielding one at a time.
:param personal: A single Unit whose personal skills are to be accumulated
:param unit_class: A Class, presumably belonging to the unit, whose class skills are to be accumulated
:param weapons: an iterable containing Weapon objects to accumulate the skills from
:param active_weapons: An iterable containing ActiveWeapon objects to accumulate the skills from
:param items: an iterable containing Item objects to accumulate the skills from
:param active_items: An iterable containing ActiveItem objects to accumulate the skills from
:param extra: an iterable containing extra skills (probably from a BuiltUnit)
:return: a generator that yields skills one at a time from all given sources
"""
# personal, class, and extra skills
if personal:
yield from personal.personal_skills.all()
if unit_class:
yield from unit_class.class_skills.all()
if extra:
yield from extra
# weapons and active weapons
if weapons:
for weapon in weapons:
yield from weapon.weapon_effects.all()
if active_weapons:
for active_weapon in active_weapons:
yield from active_weapon.template.weapon_effects.all()
# items and active items
if items:
for item in items:
yield from item.item_effects.all()
if active_items:
for active_item in active_items:
yield from active_item.template.item_effects.all()
def _exists(obj: any) -> bool:
return bool(obj)
def passive_all(skills: Iterable[Skill], unit: ActiveUnit) -> None:
"""
Executes the passive_effect for all the given skills, if such exists. Passive skills
do not have output.
:param skills: an iterable containing Skills whose passive_effects are to be executed
:param unit: the argument to give to the passive_effect skill execution
"""
for s in skills:
passive[s.passive_effect](unit)
def dequip_all(skills: Iterable[Skill], unit: ActiveUnit) -> List[Dict]:
"""
Executes the on_dequip_effect for all the given skills, if such exists, returning a list of their
cumulative output, if applicable
:param skills: an iterable containing Skills whose on_dequip_effects are to be executed
:param unit: the argument to give to the on_dequip_effect execution
:return: a list of dequip() outputs, conforming to the appropriate subschema in api.arena.schemas
"""
return list(filter(_exists, (dequip[s.on_dequip_effect](unit) for s in skills)))
def equip_all(skills: Iterable[Skill], unit: ActiveUnit) -> List[Dict]:
"""
Executes the on_equip_effect for all the given skills, if such exists, returning a list of their
cumulative output, if applicable
:param skills: an iterable containing Skills whose on_equip_effects are to be executed
:param unit: the argument to give to the on_equip_effect execution
:return: a list of equip() outputs, conforming to the appropriate subschema in api.arena.schemas
"""
return list(filter(_exists, (equip[s.on_equip_effect](unit) for s in skills)))
def use_all(skills: Iterable[Skill], arena: ActiveArena, unit: ActiveUnit,
target: Optional[ActiveUnit], extra_data: Optional[str]) -> List[Dict]:
"""
Executes the on_use_effect for all the given skills, if such exists, returning a list of their
cumulative output, if applicable
:param skills: an iterable containing Skills whose on_equip_effects are to be executed
:param arena: the arena in which the battle is currently being fought
:param unit: the argument to give to the on_equip_effect execution
:param target: optionally, the target for the skill
:param extra_data: extra data required for the skill to be used
:return: a list of use() outputs, conforming to the appropriate subschema in api.arena.schemas
"""
return list(filter(_exists, (use[s.on_use_effect](arena, unit, target, extra_data) for s in skills)))
def before_attack_all(skills: Iterable[Skill], data: AttackData) -> List[Dict]:
"""
Executes the before_attack_effect for all the given skills, if such exists, returning a list
of their cumulative output, if applicable
:param skills: an iterable containing Skills whose before_attack_effects are to be executed
:param data: the AttackData to feed to the skill functions
:return: a list of outputs conforming to the appropriate subschema in api.arena.schemas
"""
return list(filter(_exists, (before_attack[s.before_attack_effect](data) for s in skills)))
def before_attacked_all(skills: Iterable[Skill], data: AttackData) -> List[Dict]:
"""
Executes the before_attacked_effect for all the given skills, if such exists, returning a list
of their cumulative output, if applicable
:param skills: an iterable containing Skills whose before_attacked_effects are to be executed
:param data: the AttackData to feed to the skill functions
:return: a list of outputs conforming to the appropriate subschema in api.arena.schemas
"""
return list(filter(_exists, (before_attacked[s.before_attacked_effect](data) for s in skills)))
def before_attack_with_priority(attacker: Iterable[Skill], attacked: Iterable[Skill], data: AttackData) -> List[Dict]:
"""
Executes the before_attack_effect or before_attacked_effect, respectively
:param attacker: iterable of skills on which to execute before_attack_effect
:param attacked: iterable of skills on which to execute before_attacked_effect
:param data: argument to pass to skill calls
:return: a list of outputs conforming to the appropriate subschema in api.arena.schemas
"""
return _interleave_by_priority(lambda s: before_attack[s.before_attack_effect],
attacker, [data],
lambda s: before_attacked[s.before_attacked_effect],
attacked, [data])
def after_attack_all(skills: Iterable[Skill], arena: ActiveArena, data: AfterAttackData) -> List[Dict]:
"""
Executes the after_attack_effect for all the given skills, if such exists, returning a list
of their cumulative output, if applicable
:param skills: an iterable containing Skills whose after_attack_effects are to be executed
:param arena: the ActiveArena being fought in, for adding temporary skills if necessary
:param data: the AfterAttackData to feed to the skill functions
:return: a list of outputs conforming to the appropriate subschema in api.arena.schemas
"""
return list(filter(_exists, (after_attack[s.after_attack_effect](arena, data) for s in skills)))
def after_attacked_all(skills: Iterable[Skill], data: AfterAttackData) -> List[Dict]:
"""
Executes the after_attacked_effect for all the given skills, if such exists, returning a list
of their cumulative output, if applicable
:param skills: an iterable containing Skills whose after_attacked_effects are to be executed
:param data: the AfterAttackData to feed to the skill functions
:return: a list of outputs conforming to the appropriate subschema in api.arena.schemas
"""
return list(filter(_exists, (after_attacked[s.after_attacked_effect](data) for s in skills)))
def before_combat_all(skills: Iterable[Skill], unit: ActiveUnit, arena: ActiveArena, data: CombatData) -> List[Dict]:
"""
Executes the before_combat_effect for all the given skills, if such exists, returning a list
of their cumulative output, if applicable
:param skills: an iterable containing Skills whose before_combat_effect are to be executed
:param unit: the unit to whom these skills belong
:param arena: the ActiveArena the battle is taking place in
:param data: the CombatData to feed to the skill functions
:return: a list of outputs conforming to the appropriate subschema in api.arena.schemas
"""
return list(filter(_exists, (before_combat[s.before_combat_effect](unit, arena, data) for s in skills)))
def before_combat_with_priority(skills1: Iterable[Skill], unit1: ActiveUnit,
skills2: Iterable[Skill], unit2: ActiveUnit,
arena: ActiveArena, data: CombatData) -> List[Dict]:
"""
Executes the before_combat_effect for all given skills, such that they are executed in order
of priority between the two combatants
:param skills1: The list of skills to execute before_combat_effect for that belong to unit1
:param unit1: the unit who the skills1 belong to
:param skills2: the list of skills to execute before_combat_effect for that belong to unit2
:param unit2: the unit who skills2 belong to
:param arena: the ActiveArena the battle is taking place in
:param data: the CombatData to feed to the skill functions
:return: a list of outputs conforming to the appropriate subschema in api.arena.schemas
"""
return _interleave_by_priority(lambda s: before_combat[s.before_combat_effect],
skills1, [unit1, arena, data],
lambda s: before_combat[s.before_combat_effect],
skills2, [unit2, arena, data])
def after_combat_all(skills: Iterable[Skill], unit: ActiveUnit, arena: ActiveArena, data: CombatData) -> List[Dict]:
"""
Executes the after_combat_effect for all the given skills, if such exists, returning a list
of their cumulative output, if applicable
:param skills: an iterable containing Skills whose after_combat_effect are to be executed
:param unit: the unit to whom these skills belong
:param arena: the ActiveArena the battle is taking place in
:param data: the CombatData to feed to the skill functions
:return: a list of outputs conforming to the appropriate subschema in api.arena.schemas
"""
return list(filter(_exists, (after_combat[s.after_combat_effect](unit, arena, data) for s in skills)))
def after_combat_with_priority(skills1: Iterable[Skill], unit1: ActiveUnit,
skills2: Iterable[Skill], unit2: ActiveUnit,
arena: ActiveArena, data: CombatData) -> List[Dict]:
"""
Executes the after_combat_effect for all given skills, such that they are executed in order
of priority between the two combatants
:param skills1: The list of skills to execute after_combat_effect for that belong to unit1
:param unit1: the unit who the skills1 belong to
:param skills2: the list of skills to execute after_combat_effect for that belong to unit2
:param unit2: the unit who skills2 belong to
:param arena: the ActiveArena the battle is taking place in
:param data: the CombatData to feed to the skill functions
:return: a list of outputs conforming to the appropriate subschema in api.arena.schemas
"""
return _interleave_by_priority(lambda s: after_combat[s.after_combat_effect],
skills1, [unit1, arena, data],
lambda s: after_combat[s.after_combat_effect],
skills2, [unit2, arena, data])
def turn_start_all(skills: Iterable[Skill], unit: ActiveUnit, arena: ActiveArena) -> List[Dict]:
"""
Executes the turn_start_effect for all given skills, if such exists, returning a
list of their cumulative output
:param skills: skills to use
:param unit: the unit to whom the skills belong
:param arena: the ActiveArena the battle is taking place in
:return: a list of outputs to append to the existing list of actions, conforming to api.arena.schemas
"""
return list(filter(_exists, (turn_start[s.turn_start_effect](arena, unit) for s in skills)))
def turn_end_all(skills: Iterable[Skill], unit: ActiveUnit, arena: ActiveArena) -> List[Dict]:
"""
Executes the turn_end_effect for all given skills, if such exists, returning a
list of their cumulative output
:param skills: skills to use
:param unit: the unit to whom the skills belong
:param arena: the ActiveArena the battle is taking place in
:return: a list of outputs to append to the existing list of actions, conforming to api.arena.schemas
"""
return list(filter(_exists, (turn_end[s.turn_end_effect](arena, unit) for s in skills)))
def unit_turn_end_all(skills: Iterable[Skill], unit: ActiveUnit, arena: ActiveArena, actions: List[Dict]) -> List[Dict]:
"""
Executes the unit_turn_end_effect for all given skills, if such exists, returning a
list of their cumulative output
:param skills: skills to use
:param unit: the unit to whom the skills belong
:param arena: the ActiveArena the battle is taking place in
:param actions: the actions (conforming to api.arena.schemas) that have been taken so far this turn
:return: a list of outputs to append to the existing list of actions, conforming to api.arena.schemas
"""
return list(filter(_exists, (unit_turn_end[s.unit_turn_end_effect](arena, unit, actions) for s in skills)))
def on_build_all(skills: Iterable[Skill], unit: BuiltUnit, av_skills: Set[Skill]):
"""
Executes the build effect for all given skills, if such exists. Does not return anything, because
on_build skills don't need to.
:param skills: skills to use
:param unit: the unit to whom the skills belong
:param av_skills: A Set of available skills to the unit, possibly to be added to. `unit.extra_skills`
should not be directly modified by any of these; instead, skills added to av_skills.
"""
return list(filter(_exists, (on_build[s.build_effect](unit, av_skills) for s in skills)))
__all__ = ['accumulate', 'passive_all', 'dequip_all', 'equip_all', 'use_all', 'before_attack_all', 'after_attack_all',
'before_attacked_all', 'after_attacked_all', 'before_combat_all', 'after_combat_all',
'turn_start_all', 'turn_end_all', 'unit_turn_end_all', 'on_build_all',
'before_attack_with_priority', 'before_combat_with_priority', 'after_combat_with_priority']
| 51.114613 | 120 | 0.71254 | 0 | 0 | 1,889 | 0.105892 | 0 | 0 | 0 | 0 | 9,868 | 0.55317 |
5025f16dc15906f1c7c7af67d516b81c43cb2edb | 967 | bzl | Python | source/bazel/deps/libevent/get.bzl | luxe/CodeLang-compiler | 78837d90bdd09c4b5aabbf0586a5d8f8f0c1e76a | [
"MIT"
] | 1 | 2019-01-06T08:45:46.000Z | 2019-01-06T08:45:46.000Z | source/bazel/deps/libevent/get.bzl | luxe/CodeLang-compiler | 78837d90bdd09c4b5aabbf0586a5d8f8f0c1e76a | [
"MIT"
] | 264 | 2015-11-30T08:34:00.000Z | 2018-06-26T02:28:41.000Z | source/bazel/deps/libevent/get.bzl | UniLang/compiler | c338ee92994600af801033a37dfb2f1a0c9ca897 | [
"MIT"
] | null | null | null | # Do not edit this file directly.
# It was auto-generated by: code/programs/reflexivity/reflexive_refresh
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
def libevent():
http_archive(
name = "libevent",
build_file = "//bazel/deps/libevent:build.BUILD",
sha256 = "9b436b404793be621c6e01cea573e1a06b5db26dad25a11c6a8c6f8526ed264c",
strip_prefix = "libevent-eee26deed38fc7a6b6780b54628b007a2810efcd",
urls = [
"https://github.com/Unilang/libevent/archive/eee26deed38fc7a6b6780b54628b007a2810efcd.tar.gz",
],
patches = [
"//bazel/deps/libevent/patches:p1.patch",
],
patch_args = [
"-p1",
],
patch_cmds = [
"find . -type f -name '*.c' -exec sed -i 's/#include <stdlib.h>/#include <stdlib.h>\n#include <stdint.h>\n/g' {} \\;",
],
)
| 37.192308 | 130 | 0.623578 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 638 | 0.659772 |
502807db93679f10580d12e8bb304e4225f4b690 | 15,190 | py | Python | pygsti/modelmembers/states/cptpstate.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
] | null | null | null | pygsti/modelmembers/states/cptpstate.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
] | null | null | null | pygsti/modelmembers/states/cptpstate.py | colibri-coruscans/pyGSTi | da54f4abf668a28476030528f81afa46a1fbba33 | [
"Apache-2.0"
] | null | null | null | """
The CPTPState class and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
from pygsti.modelmembers.states.densestate import DenseState as _DenseState
from pygsti.modelmembers.states.state import State as _State
from pygsti.evotypes import Evotype as _Evotype
from pygsti.baseobjs import statespace as _statespace
from pygsti.baseobjs.basis import Basis as _Basis
IMAG_TOL = 1e-7 # tolerance for imaginary part being considered zero
class CPTPState(_DenseState):
"""
TODO: update docstring
A state vector constrained to correspond ot a positive density matrix.
This state vector that is parameterized through the Cholesky decomposition of
it's standard-basis representation as a density matrix (not a Liouville
vector). The resulting state vector thus represents a positive density
matrix, and additional constraints on the parameters also guarantee that the
trace == 1. This state vector is meant for use with CPTP processes, hence
the name.
Parameters
----------
vec : array_like or State
a 1D numpy array representing the state operation. The
shape of this array sets the dimension of the state.
basis : {"std", "gm", "pp", "qt"} or Basis
The basis `vec` is in. Needed because this parameterization
requires we construct the density matrix corresponding to
the Lioville vector `vec`.
trunctate : bool, optional
Whether or not a non-positive, trace=1 `vec` should
be truncated to force a successful construction.
evotype : Evotype or str, optional
The evolution type. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
state_space : StateSpace, optional
The state space for this operation. If `None` a default state space
with the appropriate number of qubits is used.
"""
def __init__(self, vec, basis, truncate=False, evotype="default", state_space=None):
vector = _State._to_vector(vec)
basis = _Basis.cast(basis, len(vector))
self.basis = basis
self.basis_mxs = basis.elements # shape (len(vec), dmDim, dmDim)
self.basis_mxs = _np.rollaxis(self.basis_mxs, 0, 3) # shape (dmDim, dmDim, len(vec))
assert(self.basis_mxs.shape[-1] == len(vector))
# set self.params and self.dmDim
self._set_params_from_vector(vector, truncate)
#parameter labels (parameter encode the Cholesky Lmx)
labels = []
for i, ilbl in enumerate(basis.labels[1:]):
for j, jlbl in enumerate(basis.labels[1:]):
if i == j: labels.append("%s diagonal element of density matrix Cholesky decomp" % ilbl)
elif j < i: labels.append("Re[(%s,%s) element of density matrix Cholesky decomp]" % (ilbl, jlbl))
else: labels.append("Im[(%s,%s) element of density matrix Cholesky decomp]" % (ilbl, jlbl))
#scratch space
self.Lmx = _np.zeros((self.dmDim, self.dmDim), 'complex')
state_space = _statespace.default_space_for_dim(len(vector)) if (state_space is None) \
else _statespace.StateSpace.cast(state_space)
evotype = _Evotype.cast(evotype)
_DenseState.__init__(self, vector, evotype, state_space)
self._paramlbls = _np.array(labels, dtype=object)
def _set_params_from_vector(self, vector, truncate):
density_mx = _np.dot(self.basis_mxs, vector)
density_mx = density_mx.squeeze()
dmDim = density_mx.shape[0]
assert(dmDim == density_mx.shape[1]), "Density matrix must be square!"
trc = _np.trace(density_mx)
assert(truncate or _np.isclose(trc, 1.0)), \
"`vec` must correspond to a trace-1 density matrix (truncate == False)!"
if not _np.isclose(trc, 1.0): # truncate to trace == 1
density_mx -= _np.identity(dmDim, 'd') / dmDim * (trc - 1.0)
#push any slightly negative evals of density_mx positive
# so that the Cholesky decomp will work.
evals, U = _np.linalg.eig(density_mx)
Ui = _np.linalg.inv(U)
assert(truncate or all([ev >= -1e-12 for ev in evals])), \
"`vec` must correspond to a positive density matrix (truncate == False)!"
pos_evals = evals.clip(1e-16, 1e100)
density_mx = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui))
try:
Lmx = _np.linalg.cholesky(density_mx)
except _np.linalg.LinAlgError: # Lmx not postitive definite?
pos_evals = evals.clip(1e-12, 1e100) # try again with 1e-12
density_mx = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui))
Lmx = _np.linalg.cholesky(density_mx)
#check TP condition: that diagonal els of Lmx squared add to 1.0
Lmx_norm = _np.trace(_np.dot(Lmx.T.conjugate(), Lmx)) # sum of magnitude^2 of all els
assert(_np.isclose(Lmx_norm, 1.0)), \
"Cholesky decomp didn't preserve trace=1!"
self.dmDim = dmDim
self.params = _np.empty(dmDim**2, 'd')
for i in range(dmDim):
assert(_np.linalg.norm(_np.imag(Lmx[i, i])) < IMAG_TOL)
self.params[i * dmDim + i] = Lmx[i, i].real # / paramNorm == 1 as asserted above
for j in range(i):
self.params[i * dmDim + j] = Lmx[i, j].real
self.params[j * dmDim + i] = Lmx[i, j].imag
def _construct_vector(self):
dmDim = self.dmDim
# params is an array of length dmDim^2 that
# encodes a lower-triangular matrix "Lmx" via:
# Lmx[i,i] = params[i*dmDim + i] / param-norm # i = 0...dmDim-2
# *last diagonal el is given by sqrt(1.0 - sum(L[i,j]**2))
# Lmx[i,j] = params[i*dmDim + j] + 1j*params[j*dmDim+i] (i > j)
param2Sum = _np.vdot(self.params, self.params) # or "dot" would work, since params are real
paramNorm = _np.sqrt(param2Sum) # also the norm of *all* Lmx els
for i in range(dmDim):
self.Lmx[i, i] = self.params[i * dmDim + i] / paramNorm
for j in range(i):
self.Lmx[i, j] = (self.params[i * dmDim + j] + 1j * self.params[j * dmDim + i]) / paramNorm
Lmx_norm = _np.trace(_np.dot(self.Lmx.T.conjugate(), self.Lmx)) # sum of magnitude^2 of all els
assert(_np.isclose(Lmx_norm, 1.0)), "Violated trace=1 condition!"
#The (complex, Hermitian) density matrix is build by
# assuming Lmx is its Cholesky decomp, which makes
# the density matrix is pos-def.
density_mx = _np.dot(self.Lmx, self.Lmx.T.conjugate())
assert(_np.isclose(_np.trace(density_mx), 1.0)), "density matrix must be trace == 1"
# write density matrix in given basis: = sum_i alpha_i B_i
# ASSUME that basis is orthogonal, i.e. Tr(Bi^dag*Bj) = delta_ij
basis_mxs = _np.rollaxis(self.basis_mxs, 2) # shape (dmDim, dmDim, len(vec))
vec = _np.array([_np.trace(_np.dot(M.T.conjugate(), density_mx)) for M in basis_mxs])
#for now, assume Liouville vector should always be real (TODO: add 'real' flag later?)
assert(_np.linalg.norm(_np.imag(vec)) < IMAG_TOL)
vec = _np.real(vec)
self._ptr.flags.writeable = True
self._ptr[:] = vec[:] # so shape is (dim,1) - the convention for spam vectors
self._ptr.flags.writeable = False
def set_dense(self, vec):
"""
Set the dense-vector value of this state vector.
Attempts to modify this state vector's parameters so that the raw
state vector becomes `vec`. Will raise ValueError if this operation
is not possible.
Parameters
----------
vec : array_like or State
A numpy array representing a state vector, or a State object.
Returns
-------
None
"""
try:
self._set_params_from_vector(vec, truncate=False)
self.dirty = True
except AssertionError as e:
raise ValueError("Error initializing the parameters of this "
"CPTPState object: " + str(e))
@property
def num_params(self):
"""
Get the number of independent parameters which specify this state vector.
Returns
-------
int
the number of independent parameters.
"""
assert(self.dmDim**2 == self.dim) # should at least be true without composite bases...
return self.dmDim**2
def to_vector(self):
"""
Get the state vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
return self.params
def from_vector(self, v, close=False, dirty_value=True):
"""
Initialize the state vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of state vector parameters. Length
must == num_params()
close : bool, optional
Whether `v` is close to this state vector's current
set of parameters. Under some circumstances, when this
is true this call can be completed more quickly.
dirty_value : bool, optional
The value to set this object's "dirty flag" to before exiting this
call. This is passed as an argument so it can be updated *recursively*.
Leave this set to `True` unless you know what you're doing.
Returns
-------
None
"""
assert(len(v) == self.num_params)
self.params[:] = v[:]
self._construct_vector()
self.dirty = dirty_value
def deriv_wrt_params(self, wrt_filter=None):
"""
The element-wise derivative this state vector.
Construct a matrix whose columns are the derivatives of the state vector
with respect to a single param. Thus, each column is of length
dimension and there is one column per state vector parameter.
Parameters
----------
wrt_filter : list or numpy.ndarray
List of parameter indices to take derivative with respect to.
(None means to use all the this operation's parameters.)
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
dmDim = self.dmDim
nP = len(self.params)
assert(nP == dmDim**2) # number of parameters
# v_i = trace( B_i^dag * Lmx * Lmx^dag )
# d(v_i) = trace( B_i^dag * (dLmx * Lmx^dag + Lmx * (dLmx)^dag) ) #trace = linear so commutes w/deriv
# /
# where dLmx/d[ab] = {
# \
L, Lbar = self.Lmx, self.Lmx.conjugate()
F1 = _np.tril(_np.ones((dmDim, dmDim), 'd'))
F2 = _np.triu(_np.ones((dmDim, dmDim), 'd'), 1) * 1j
conj_basis_mxs = self.basis_mxs.conjugate()
# Derivative of vector wrt params; shape == [vecLen,dmDim,dmDim] *not dealing with TP condition yet*
# (first get derivative assuming last diagonal el of Lmx *is* a parameter, then use chain rule)
dVdp = _np.einsum('aml,mb,ab->lab', conj_basis_mxs, Lbar, F1) # only a >= b nonzero (F1)
dVdp += _np.einsum('mal,mb,ab->lab', conj_basis_mxs, L, F1) # ditto
dVdp += _np.einsum('bml,ma,ab->lab', conj_basis_mxs, Lbar, F2) # only b > a nonzero (F2)
dVdp += _np.einsum('mbl,ma,ab->lab', conj_basis_mxs, L, F2.conjugate()) # ditto
dVdp.shape = [dVdp.shape[0], nP] # jacobian with respect to "p" params,
# which don't include normalization for TP-constraint
#Now get jacobian of actual params wrt the params used above. Denote the actual
# params "P" in variable names, so p_ij = P_ij / sqrt(sum(P_xy**2))
param2Sum = _np.vdot(self.params, self.params)
paramNorm = _np.sqrt(param2Sum) # norm of *all* Lmx els (note lastDiagEl
dpdP = _np.identity(nP, 'd')
# all p_ij params == P_ij / paramNorm = P_ij / sqrt(sum(P_xy**2))
# and so have derivs wrt *all* Pxy elements.
for ij in range(nP):
for kl in range(nP):
if ij == kl:
# dp_ij / dP_ij = 1.0 / (sum(P_xy**2))^(1/2) - 0.5 * P_ij / (sum(P_xy**2))^(3/2) * 2*P_ij
# = 1.0 / (sum(P_xy**2))^(1/2) - P_ij^2 / (sum(P_xy**2))^(3/2)
dpdP[ij, ij] = 1.0 / paramNorm - self.params[ij]**2 / paramNorm**3
else:
# dp_ij / dP_kl = -0.5 * P_ij / (sum(P_xy**2))^(3/2) * 2*P_kl
# = - P_ij * P_kl / (sum(P_xy**2))^(3/2)
dpdP[ij, kl] = - self.params[ij] * self.params[kl] / paramNorm**3
#Apply the chain rule to get dVdP:
dVdP = _np.dot(dVdp, dpdP) # shape (vecLen, nP) - the jacobian!
dVdp = dpdP = None # free memory!
assert(_np.linalg.norm(_np.imag(dVdP)) < IMAG_TOL)
derivMx = _np.real(dVdP)
if wrt_filter is None:
return derivMx
else:
return _np.take(derivMx, wrt_filter, axis=1)
def has_nonzero_hessian(self):
"""
Whether this state vector has a non-zero Hessian with respect to its parameters.
Returns
-------
bool
"""
return True
def hessian_wrt_params(self, wrt_filter1=None, wrt_filter2=None):
"""
Construct the Hessian of this state vector with respect to its parameters.
This function returns a tensor whose first axis corresponds to the
flattened operation matrix and whose 2nd and 3rd axes correspond to the
parameters that are differentiated with respect to.
Parameters
----------
wrt_filter1 : list or numpy.ndarray
List of parameter indices to take 1st derivatives with respect to.
(None means to use all the this operation's parameters.)
wrt_filter2 : list or numpy.ndarray
List of parameter indices to take 2nd derivatives with respect to.
(None means to use all the this operation's parameters.)
Returns
-------
numpy array
Hessian with shape (dimension, num_params1, num_params2)
"""
raise NotImplementedError("TODO: add hessian computation for CPTPState")
| 42.430168 | 113 | 0.600263 | 14,064 | 0.925872 | 0 | 0 | 361 | 0.023766 | 0 | 0 | 8,709 | 0.573338 |
5028c313079fad9b4cf51c75e556c2cffcedcdc1 | 298 | py | Python | examples/mach_opt_examples/LegacyCode/IM/machine_design/im_settingshandler.py | Severson-Group/MachEval | dbb7999188133f8744636da53cab475ae538ce80 | [
"BSD-3-Clause"
] | 6 | 2021-11-02T20:12:32.000Z | 2021-11-13T10:50:35.000Z | examples/mach_opt_examples/LegacyCode/IM/machine_design/im_settingshandler.py | Severson-Group/MachEval | dbb7999188133f8744636da53cab475ae538ce80 | [
"BSD-3-Clause"
] | 18 | 2021-11-29T20:14:55.000Z | 2022-03-02T07:17:37.000Z | examples/mach_opt_examples/LegacyCode/IM/machine_design/im_settingshandler.py | Severson-Group/MachEval | dbb7999188133f8744636da53cab475ae538ce80 | [
"BSD-3-Clause"
] | 1 | 2022-01-29T00:52:38.000Z | 2022-01-29T00:52:38.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 30 11:58:20 2021
@author: Martin Johnson
"""
import sys
sys.path.append("...")
import macheval as me
class IMSettingsHandler(me.SettingsHandler):
def getSettings(x):
return NotImplementedError #TODO Implement settings functionality
| 19.866667 | 73 | 0.701342 | 142 | 0.47651 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.449664 |
5029b3ee53519f158d3ce03f346eb8d5c279dd5f | 1,596 | py | Python | scripts/cerealfiller_entity_and_test_pipeline.py | sedgewickmm18/functions | 69d04a67b122601c4f207ded8e872d31b2ddafc8 | [
"Apache-2.0"
] | null | null | null | scripts/cerealfiller_entity_and_test_pipeline.py | sedgewickmm18/functions | 69d04a67b122601c4f207ded8e872d31b2ddafc8 | [
"Apache-2.0"
] | null | null | null | scripts/cerealfiller_entity_and_test_pipeline.py | sedgewickmm18/functions | 69d04a67b122601c4f207ded8e872d31b2ddafc8 | [
"Apache-2.0"
] | null | null | null | import datetime as dt
import json
import os
import pandas as pd
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions.preprocessor import BaseTransformer
from iotfunctions.bif import IoTExpression
from iotfunctions.metadata import EntityType, make_sample_entity
from iotfunctions.db import Database
from iotfunctions.estimator import SimpleAnomaly
#replace with a credentials dictionary or provide a credentials file
with open('credentials.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
#create a sample entity to work with
db_schema = None #set if you are not using the default
db = Database(credentials=credentials)
numeric_columns = ['fill_time','temp','humidity','wait_time','size_sd']
table_name = 'as_sample_cereal'
entity = make_sample_entity(db=db, schema = db_schema,
float_cols = numeric_columns,
name = table_name,
register = True)
entity.name
#examine the sample entity
df = db.read_table(entity.name,schema=db_schema)
df.head(1).transpose()
#configure an expression function
expression = '510 + 15*df["temp"] + 5*df["humidity"]'
mass_fn = IoTExpression(expression=expression, output_name='fill_mass')
df = entity.exec_pipeline(mass_fn)
df.head(1).transpose()
#build an anomaly model
features = ['temp', 'humidity', 'wait_time']
targets = ['fill_mass']
anomaly_fn = SimpleAnomaly(features=['temp','humidity','fill_time'],targets=['fill_mass'],threshold=0.01)
df = entity.exec_pipeline(mass_fn,anomaly_fn)
df.head(1).transpose()
| 36.272727 | 105 | 0.740602 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 440 | 0.275689 |
5029f3a83b6307a58318c91798649927beb20509 | 1,022 | py | Python | minst/tests/helpers.py | oriolromani/minst-dataset | 5847ac421522a393df77ca2a43acdc326f7d64e8 | [
"0BSD"
] | 44 | 2016-03-26T13:41:09.000Z | 2022-01-19T08:52:47.000Z | minst/tests/helpers.py | oriolromani/minst-dataset | 5847ac421522a393df77ca2a43acdc326f7d64e8 | [
"0BSD"
] | 37 | 2016-05-25T06:32:44.000Z | 2018-08-21T09:15:40.000Z | minst/tests/helpers.py | oriolromani/minst-dataset | 5847ac421522a393df77ca2a43acdc326f7d64e8 | [
"0BSD"
] | 8 | 2016-05-25T13:45:44.000Z | 2021-06-18T12:14:01.000Z | import pytest
import os
import numpy as np
import minst.taxonomy as tax
def __test(value, expected):
assert value == expected
def __test_df_has_data(df):
assert not df.empty
def __test_pd_output(pd_output, dataset):
"""Make sure all the files in the tree exist"""
# Check for valid columns
required_columns = ['audio_file', 'dataset', 'instrument']
for column in required_columns:
assert column in pd_output.columns
# Check files and per row things.
for row in pd_output.iterrows():
assert os.path.exists(row[1]['audio_file'])
assert row[1]['dataset'] == dataset
classmap = tax.InstrumentClassMap()
# Make sure we have all the selected instruments
instruments = pd_output["instrument"].unique()
map_inst = [classmap[x] for x in instruments if classmap[x]]
inst_found = np.array([(x in classmap.classnames) for x in map_inst])
assert all(inst_found), "Dataset {} is missing: {}".format(
dataset, inst_found[inst_found == 0])
| 28.388889 | 73 | 0.687867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.240705 |
502b24ddf0d51348bd70135dbd9dd0e4f5a0717b | 2,161 | py | Python | pwncat/modules/linux/enumerate/system/selinux.py | Mitul16/pwncat | b8d7876a9779c2c7796a9a29110d3f1cda721dff | [
"MIT"
] | 1,454 | 2020-05-07T02:20:52.000Z | 2022-03-31T21:32:22.000Z | pwncat/modules/linux/enumerate/system/selinux.py | akr3ch/pwncat | d67865bdaac60dd0761d0698062e7b443a62c6db | [
"MIT"
] | 187 | 2020-05-08T06:26:01.000Z | 2022-03-07T21:15:29.000Z | pwncat/modules/linux/enumerate/system/selinux.py | akr3ch/pwncat | d67865bdaac60dd0761d0698062e7b443a62c6db | [
"MIT"
] | 184 | 2020-05-07T02:31:58.000Z | 2022-03-31T09:11:59.000Z | #!/usr/bin/env python3
from typing import Dict
import rich.markup
from pwncat.db import Fact
from pwncat.platform.linux import Linux
from pwncat.modules.enumerate import Schedule, EnumerateModule
class SELinuxState(Fact):
def __init__(self, source, state, status):
super().__init__(source=source, types=["system.selinux"])
self.state: str = state
self.status: Dict[str, str] = status
def title(self, session):
result = "SELinux is "
if self.state == "enabled":
result += "[red]enabled[/red]"
elif self.state == "disabled":
result += "[green]disabled[/green]"
else:
result += f"[yellow]{rich.markup.escape(self.state)}[/yellow]"
return result
@property
def mode(self) -> str:
return self.status.get("Current mode", "unknown").lower()
@property
def enabled(self) -> bool:
return self.state.lower() == "enabled"
def description(self, session):
width = max(len(x) for x in self.status) + 1
return "\n".join(
f"{key+':':{width}} {value}" for key, value in self.status.items()
)
class Module(EnumerateModule):
"""
Retrieve the current SELinux state
"""
PROVIDES = ["system.selinux"]
SCHEDULE = Schedule.ONCE
PLATFORM = [Linux]
def enumerate(self, session):
try:
output = session.platform.run("sestatus", capture_output=True, text=True)
except (FileNotFoundError, PermissionError):
return
if output:
output = output.stdout.strip()
status = {}
for line in output.split("\n"):
line = line.strip().replace("\t", " ")
values = " ".join([x for x in line.split(" ") if x != ""]).split(":")
key = values[0].rstrip(":").strip()
value = " ".join(values[1:])
status[key] = value.strip()
if "SELinux status" in status:
state = status["SELinux status"]
else:
state = "unknown"
yield SELinuxState(self.name, state, status)
| 28.434211 | 85 | 0.559 | 1,957 | 0.905599 | 818 | 0.378528 | 189 | 0.08746 | 0 | 0 | 376 | 0.173994 |
502b6bdebbd7460f59282e2c237e23c39d25bf84 | 2,975 | py | Python | rally_openstack/task/scenarios/gnocchi/archive_policy_rule.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | null | null | null | rally_openstack/task/scenarios/gnocchi/archive_policy_rule.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | null | null | null | rally_openstack/task/scenarios/gnocchi/archive_policy_rule.py | jogeo/rally-openstack | 83437e7c5925d5d647cd28f1821b6d51687b0123 | [
"Apache-2.0"
] | 1 | 2021-08-10T03:11:51.000Z | 2021-08-10T03:11:51.000Z | # Copyright 2017 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.gnocchi import utils as gnocchiutils
"""Scenarios for Gnocchi archive policy rule."""
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="GnocchiArchivePolicyRule.list_archive_policy_rule")
class ListArchivePolicyRule(gnocchiutils.GnocchiBase):
def run(self):
"""List archive policy rules."""
self.gnocchi.list_archive_policy_rule()
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy_rule"]},
name="GnocchiArchivePolicyRule.create_archive_policy_rule")
class CreateArchivePolicyRule(gnocchiutils.GnocchiBase):
def run(self, metric_pattern="cpu_*", archive_policy_name="low"):
"""Create archive policy rule.
:param metric_pattern: Pattern for matching metrics
:param archive_policy_name: Archive policy name
"""
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy_rule(
name,
metric_pattern=metric_pattern,
archive_policy_name=archive_policy_name)
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy_rule"]},
name="GnocchiArchivePolicyRule.create_delete_archive_policy_rule")
class CreateDeleteArchivePolicyRule(gnocchiutils.GnocchiBase):
def run(self, metric_pattern="cpu_*", archive_policy_name="low"):
"""Create archive policy rule and then delete it.
:param metric_pattern: Pattern for matching metrics
:param archive_policy_name: Archive policy name
"""
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy_rule(
name,
metric_pattern=metric_pattern,
archive_policy_name=archive_policy_name)
self.admin_gnocchi.delete_archive_policy_rule(name)
| 40.753425 | 78 | 0.743193 | 1,262 | 0.424202 | 0 | 0 | 2,094 | 0.703866 | 0 | 0 | 1,476 | 0.496134 |
502f6109a3a684baefcd13925e63aa3cf73c0b10 | 515 | py | Python | count words problem/count_words.py | silasjimmy/Machine-Learning-course | f80e98919c982f73f66f0d8a4af8ebffaf7c8e43 | [
"MIT"
] | null | null | null | count words problem/count_words.py | silasjimmy/Machine-Learning-course | f80e98919c982f73f66f0d8a4af8ebffaf7c8e43 | [
"MIT"
] | null | null | null | count words problem/count_words.py | silasjimmy/Machine-Learning-course | f80e98919c982f73f66f0d8a4af8ebffaf7c8e43 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 21 08:20:55 2021
@author: silasjimmy
"""
def count_words(s, n):
s = s.split(' ')
counted_words = [(w, s.count((w))) for w in set(s)]
counted_words.sort(key = lambda x: (-x[1], x[0]))
top_n = counted_words[:n]
return top_n
def test_run():
print(count_words('cat bat mat cat bat cat', 3))
print(count_words('betty bought a bit of butter but the butter was bitter', 3))
if __name__ == '__main__':
test_run()
| 23.409091 | 83 | 0.615534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 203 | 0.394175 |
502f754cdffdb05797ba5ba3fc5cba7ad3499d41 | 250 | py | Python | segitiga/hitung-luas-segitiga.py | Yurimahendra/latihan-big-data | 5ea495bc1187c4d99a83654f8377d73e72eb63d2 | [
"MIT"
] | null | null | null | segitiga/hitung-luas-segitiga.py | Yurimahendra/latihan-big-data | 5ea495bc1187c4d99a83654f8377d73e72eb63d2 | [
"MIT"
] | null | null | null | segitiga/hitung-luas-segitiga.py | Yurimahendra/latihan-big-data | 5ea495bc1187c4d99a83654f8377d73e72eb63d2 | [
"MIT"
] | null | null | null | # jumlah segitiga
n = 123
# panjang alas sebuah segitiga
alas = 30
# tinggi sebuah segitiga
tinggi = 18
# hitung luas sebuah segitiga
luas = alas * tinggi * 1/2
# hitung luas total
luastotal = n * luas
print('luas total : ', luastotal,'satuan luas') | 20.833333 | 47 | 0.716 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 147 | 0.588 |
5031531e9f322bfee275b72a63ab6ef16dc37f80 | 4,210 | py | Python | pygooglehomenotifier/googlehomenotifier.py | k-sh/pygooglehomenotifier | b1e405d1813be049506119f9abb3145d18620217 | [
"MIT"
] | null | null | null | pygooglehomenotifier/googlehomenotifier.py | k-sh/pygooglehomenotifier | b1e405d1813be049506119f9abb3145d18620217 | [
"MIT"
] | null | null | null | pygooglehomenotifier/googlehomenotifier.py | k-sh/pygooglehomenotifier | b1e405d1813be049506119f9abb3145d18620217 | [
"MIT"
] | null | null | null | import queue
import textwrap
import threading
import time
import urllib.parse
import pychromecast
def create_notify_url(text: str, lang: str, ttsspeed: float):
payload = {
"ie": "UTF-8",
"q": text,
"tl": lang,
"total": 1,
"idx": 0,
"textlen": len(text),
"client": "tw-ob",
"prev": "input",
"ttsspeed" : ttsspeed
}
params = urllib.parse.urlencode(payload, quote_via = urllib.parse.quote)
url = "https://translate.google.com/translate_tts?{}".format(params)
return url
def split_text(text: str, lang: str):
max_split_text_len = 200
return textwrap.wrap(text, width = max_split_text_len)
class GoogleHome(pychromecast.Chromecast):
def __init__(self, host, port = None, device = None):
self.thread = None
self.mp3_url_queue = queue.Queue()
super().__init__(host, port, device)
def _play_mp3(self, timeout: int):
if self.mp3_url_queue.empty():
self.thread = None
return
url = self.mp3_url_queue.get()
self.media_controller.play_media(url, "audio/mp3")
# wait start playing
time.sleep(1)
self._block_while_playing_queue(timeout)
# play next mp3
self._play_mp3(timeout)
def _block_while_playing_queue(self, timeout: int):
self.media_controller.block_until_active()
t1 = time.time()
while True:
status = self.media_controller.status
player_state = status.player_state
if player_state != "PLAYING":
break
if timeout > 0:
t2 = time.time()
if t2 - t1 >= timeout:
break
time.sleep(0.5)
def notify(self, text: str, lang: str = "en", ttsspeed: float = 1.0, timeout: int = 0):
for line in split_text(text, lang):
url = create_notify_url(line, lang, ttsspeed)
self.mp3_url_queue.put(url)
if self.thread == None:
self.thread = threading.Thread(target = self._play_mp3, args = ([timeout]))
self.thread.start()
def play(self, url: str, timeout: int = 0):
if url != None:
self.mp3_url_queue.put(url)
if self.thread == None:
self.thread = threading.Thread(target = self._play_mp3, args = ([timeout]))
self.thread.start()
def pause(self):
self.media_controller.pause()
def resume(self):
self.media_controller.play()
def block_while_playing(self, timeout: int = 0):
t1 = time.time()
while not self.mp3_url_queue.empty():
if timeout > 0:
t2 = time.time()
elapsed_t = t2 - t1
if elapsed_t >= timeout:
break
else:
pass
if self.thread != None:
self.thread.join()
def is_playing(self):
if not self.mp3_url_queue.empty():
return True
if self.thread != None:
return self.thread.is_alive()
return False
def get_googlehomes(
friendly_name = None,
ipaddr = None,
uuid = None,
tries = None,
retry_wait = None,
timeout = None
):
if ipaddr != None:
# get from ipaddress
googlehome = GoogleHome(ipaddr)
# check friendly_name and uuid
if friendly_name != None and googlehome.name != friendly_name:
return []
if uuid != None and str(googlehome.uuid) != uuid:
return []
return [googlehome]
ccs, browser = pychromecast.get_chromecasts(tries, retry_wait, timeout)
googlehomes = []
for cc in ccs:
# check friendly_name and uuid
if friendly_name != None and cc.name != friendly_name:
return []
if uuid != None and str(cc.uuid) != uuid:
return []
cc.wait()
googlehome = GoogleHome(
host = cc.socket_client.host,
port = cc.socket_client.port,
device = cc.device,
)
googlehomes.append(googlehome)
return googlehomes
| 29.440559 | 91 | 0.557482 | 2,422 | 0.575297 | 0 | 0 | 0 | 0 | 0 | 0 | 263 | 0.06247 |
5033393de69a8305614d2ab29a3b1c3d9b9a9cf3 | 353 | py | Python | django_tgbot/types/userprofilephotos.py | purwowd/django-tgbot | 6712ad2e9986c0961ad402a1d2e37be39e2f5fb4 | [
"MIT"
] | 52 | 2020-04-05T11:06:21.000Z | 2022-03-21T05:29:15.000Z | django_tgbot/types/userprofilephotos.py | armanexplorer/django-tgbot | e89f34b6a25beb9473c9e162ec8c161c14cd4cd6 | [
"MIT"
] | 11 | 2020-09-02T00:24:13.000Z | 2022-03-22T06:09:36.000Z | django_tgbot/types/userprofilephotos.py | armanexplorer/django-tgbot | e89f34b6a25beb9473c9e162ec8c161c14cd4cd6 | [
"MIT"
] | 14 | 2020-09-01T23:31:54.000Z | 2022-01-30T07:03:52.000Z | from . import BasicType
class UserProfilePhotos(BasicType):
fields = {
'total_count': int,
}
def __init__(self, obj=None):
super(UserProfilePhotos, self).__init__(obj)
from . import photosize
UserProfilePhotos.fields.update({
'photos': {
'class': photosize.PhotoSize,
'array_of_array': True
}
}) | 17.65 | 52 | 0.634561 | 172 | 0.487252 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.124646 |
50346253e5a2a4c18cf434df1441cd57e84a1e8f | 8,171 | py | Python | pyexocross/exomol/exomoldef.py | ucl-exoplanets/pyexocross | 703341cd0fddafcbb04e935c89ddc9d02dda9f59 | [
"BSD-3-Clause"
] | null | null | null | pyexocross/exomol/exomoldef.py | ucl-exoplanets/pyexocross | 703341cd0fddafcbb04e935c89ddc9d02dda9f59 | [
"BSD-3-Clause"
] | null | null | null | pyexocross/exomol/exomoldef.py | ucl-exoplanets/pyexocross | 703341cd0fddafcbb04e935c89ddc9d02dda9f59 | [
"BSD-3-Clause"
] | 1 | 2021-01-15T12:54:04.000Z | 2021-01-15T12:54:04.000Z | from taurex.log import Logger
class LinesReader:
def __init__(self, lines):
self._lines = lines
self._count = 0
def skip(self, num=1):
self._count += num
def read_int(self, skip=1):
val = int(self._lines[self._count])
self.skip(skip)
return val
def read_float(self, skip=1):
val = float(self._lines[self._count])
self.skip(skip)
return val
def read_float_array(self, skip=1):
line = self.read_string()
split = line.split()
return [float(s) for s in split]
def read_string(self, skip=1):
val = self._lines[self._count]
self.skip(skip)
return val
def read_bool(self, skip=1):
val = int(self._lines[self._count])
self.skip(skip)
return val == 1
def reset():
self._count = 0
class BroadenerData:
def __init__(self, molecule, filename, Jmax, default_gamma, default_n):
self._molecule = molecule.strip()
self._filename = filename
self._default_gamma = default_gamma
self._default_n = default_n
self._Jmax = Jmax
self._avail_codes = []
self._quanta={}
def add_code(self, quanta_code, quanta):
self._avail_codes.append(quanta_code)
quanta.insert(0,'J"')
self._quanta[quanta_code] = quanta
@property
def molecule(self):
return self._molecule
@property
def availableCodes(self):
return self._avail_codes
def generate_input(self, maximum_model='JJ', broadener_path='.'):
from .exocrosswriter import BroadenerInput
import os
bb = BroadenerInput(self._molecule, self._default_gamma,
self._default_n, filename=os.path.join(broadener_path, self._filename),
broadener_type='JJ' if 'a1' in self._avail_codes else 'J')
return bb
def generate_exomolbroadener(self, filename=None):
from .exomolbroads import ExomolBroadener
return ExomolBroadener(self._default_gamma, self._default_n,label_defs=self._quanta,filename=filename)
class ExomolDef(Logger):
def __init__(self, exomol_def_file):
super().__init__(self.__class__.__name__)
self.info(f'Opening {exomol_def_file}')
with open(exomol_def_file, 'r') as f:
unclean_exocross_lines = f.read().splitlines()
self.exocross_lines = [s.split('#')[0].strip()
for s in unclean_exocross_lines]
self.parse_definition()
def parse_definition(self):
lr = LinesReader(self.exocross_lines)
if lr.read_string() != 'EXOMOL.def':
raise IOError('Incorrect EXOMOL def header')
lr.skip(1)
self._molecule_slug = lr.read_string()
self._linelist_name = lr.read_string()
self._version_number = lr.read_string()
self._inchikey = lr.read_string()
self._natoms = lr.read_int()
self.info(f'Molecule is {self._molecule_slug}')
self.info(f'Linelist: {self._linelist_name} '
f'Version: {self._version_number}')
while(True):
try:
arr = lr.read_float_array()
self._mass = arr[0]
test = arr[1]
break
except (IndexError, ValueError, ):
continue
self._symmetry_group = lr.read_string()
num_irr = lr.read_int()
lr.skip(num_irr * 3)
self._max_temp = lr.read_float()
self._num_broadeners = lr.read_int()
self._dipole_avail = lr.read_bool()
self._no_cross = lr.read_int()
self._no_ktab = lr.read_int()
self._life_avail = lr.read_bool()
self._landeg_avail = lr.read_bool()
self._num_states = lr.read_int()
num_cases = lr.read_int()
self._quanta_cases = {}
for case in range(num_cases):
case_label = lr.read_string()
no_quanta = lr.read_int()
quanta_definition = []
for q in range(no_quanta):
label = lr.read_string()
form =lr.read_string()
form = form.split()[1].strip()
descrp = lr.read_string()
quanta_definition.append((label, form, descrp))
self._quanta_cases[case_label] = quanta_definition
self._total_transitions = lr.read_int()
self._num_trans_files = lr.read_int()
self._max_wavenumber = lr.read_float()
self._highest_complete = lr.read_float()
self._max_temp_q = lr.read_float()
self._t_step = lr.read_float(skip=2)
self._default_gamma = lr.read_float()
self._default_n = lr.read_float()
self._broadener_defs = {}
if self._num_broadeners > 0:
for b in range(self._num_broadeners):
broadener_label = lr.read_string()
self.info(f'Reading broadener {broadener_label}')
broadener_filename = lr.read_string()
jmax = lr.read_int()
default_gamma = lr.read_float()
default_n = lr.read_float()
new_broad = BroadenerData(broadener_label, broadener_filename,
jmax, default_gamma, default_n)
self._broadener_defs[broadener_label] = new_broad
n_broad_quanta_set = lr.read_int()
for x in range(n_broad_quanta_set):
code_label = lr.read_string(skip=2)
no_quanta = lr.read_int()
quanta =[lr.read_string() for x in range(no_quanta)]
new_broad.add_code(code_label, quanta)
def _pandas_state_fwf(self):
import re
import numpy as np
widths = [12,12,6,7]
headers = ['i', 'E', 'g_tot', 'J']
if self._life_avail:
widths.append(12)
headers.append('lftime')
if self._landeg_avail:
widths.append(12)
headers.append('lande-g')
# Let pandas auto determine above types
# We will be specific about the quanta
dtype = {}
form_conv = {'d' : np.int64,
'f' : np.float64,
's' : str,
'i' : np.int64 }
for case in self._quanta_cases.values():
for label, form, desc in case:
headers.append(label)
wid = re.findall(r'\d+',form)[0]
widths.append(int(wid))
typ = form[-1].strip()
dtype[label] = form_conv[typ]
widths[1:-1] = [w+1 for w in widths[1:-1]]
return headers, widths , dtype
def read_state(self, state_filename):
from .exomolstate import ExomolStates
return ExomolStates(state_filename, self._pandas_state_fwf())
@property
def maximumTemperature(self):
return self._max_temp
@property
def maximumPartitionTemperature(self):
return self._max_temp_q
@property
def maximumWavenumber(self):
return self._max_wavenumber
@property
def filePrefix(self):
return f'{self._molecule_slug}__{self._linelist_name}'
@property
def availableBroadeners(self):
return list(self._broadener_defs.keys())
def create_broadeners(self, broadener):
if broadener not in self.availableBroadeners:
raise KeyError(f'Broadener with name {broadener} not available')
else:
return self._broadener_defs[broadener].generate_input()
def create_exocross_input(self, path='.'):
from .exocrosswriter import ExocrossInput
ex = ExocrossInput(self._molecule_slug, linelist=self._linelist_name,
path=path, file_prefix=self.filePrefix)
ex.set_molar_mass(self._mass)
ex.set_range([0.01, self.maximumWavenumber])
return ex
| 30.602996 | 110 | 0.572268 | 8,134 | 0.995472 | 0 | 0 | 561 | 0.068657 | 0 | 0 | 466 | 0.057031 |
50347b2a42cf82e77fe67d54a2ea1039a74341ec | 2,374 | py | Python | web_parsers/parsers/base.py | invanalabs/web-parser | dca9c6354317ec7187f46fd270092372b39f63f8 | [
"Apache-2.0"
] | 1 | 2019-10-06T23:11:32.000Z | 2019-10-06T23:11:32.000Z | web_parsers/parsers/base.py | crawlerflow/extraction-engine | dca9c6354317ec7187f46fd270092372b39f63f8 | [
"Apache-2.0"
] | 2 | 2020-03-11T09:33:03.000Z | 2020-03-18T21:12:28.000Z | web_parsers/parsers/base.py | crawlerflow/extraction-engine | dca9c6354317ec7187f46fd270092372b39f63f8 | [
"Apache-2.0"
] | null | null | null | import logging
logger = logging.getLogger(__name__)
class ParserBase:
selector_key = None
def __init__(self, string_data, url=None, extractor_manifest=None):
self.string_data = string_data
self.url = url
self.extractor_manifest = extractor_manifest
def parse_data(self, string_data):
"""
this function will be used to convert string to html/xml tree.
:return:
"""
raise NotImplementedError()
def get_selector_key(self):
if self.selector_key is None:
raise Exception("selector_key should be assigned to Parser classes")
return self.selector_key
def run_extractor(self, xml_tree=None, extractor=None):
extractor_id = extractor.extractor_id
logger.info("Running extractor:'{}' on url:{}".format(extractor_id, self.url))
try:
extractor_object = extractor.extractor_cls(
**{
'url': self.url,
self.get_selector_key(): xml_tree,
'extractor': extractor,
'extractor_id': extractor_id
}
)
return extractor_object.run()
except Exception as error:
logger.error(
"Failed to extract data from the extractor '{extractor_id}:{extractor_type}' on url "
"'{url}' with error: '{error}'".format(
extractor_id=extractor_id,
extractor_type=extractor.extractor_type,
url=self.url,
error=error)
)
return {extractor_id: None}
@staticmethod
def flatten_extracted_data(all_extracted_data):
all_extracted_data_new = {}
for k, v in all_extracted_data.items():
all_extracted_data_new.update(v)
return all_extracted_data_new
def run_extractors(self, flatten_extractors=False):
xml_tree = self.parse_data(self.string_data)
all_extracted_data = {}
for extractor in self.extractor_manifest.extractors:
extracted_data = self.run_extractor(extractor=extractor, xml_tree=xml_tree)
all_extracted_data.update(extracted_data)
if flatten_extractors is True:
return self.flatten_extracted_data(all_extracted_data)
return all_extracted_data
| 34.911765 | 102 | 0.613311 | 2,318 | 0.976411 | 0 | 0 | 232 | 0.097725 | 0 | 0 | 335 | 0.141112 |
503945544f0f8af4e632fde69205e3dc522fc1a7 | 562 | py | Python | kolejka/judge/tasks/list_files.py | zielinskit/kolejka-judge | 571df05b12c5a4748d7a2ca4c217b0042acf6b48 | [
"MIT"
] | null | null | null | kolejka/judge/tasks/list_files.py | zielinskit/kolejka-judge | 571df05b12c5a4748d7a2ca4c217b0042acf6b48 | [
"MIT"
] | null | null | null | kolejka/judge/tasks/list_files.py | zielinskit/kolejka-judge | 571df05b12c5a4748d7a2ca4c217b0042acf6b48 | [
"MIT"
] | null | null | null | import glob
import itertools
from functools import partial
from typing import Tuple, Optional
from kolejka.judge.tasks.base import TaskBase
class ListFiles(TaskBase):
def __init__(self, *args, variable_name):
self.files = list(args)
self.variable_name = variable_name
def execute(self, environment) -> Tuple[Optional[str], Optional[object]]:
files = list(itertools.chain.from_iterable(map(partial(glob.glob, recursive=True), self.files)))
environment.set_variable(self.variable_name, files)
return None, None
| 29.578947 | 104 | 0.729537 | 418 | 0.743772 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
50395cf2321c71ca4f29379c96be1b850b7822ca | 5,775 | py | Python | backend/server/server.py | dballesteros7/starthack2016 | 98866b865dcb3c1b307d8119f7d2defb234f30ae | [
"MIT"
] | null | null | null | backend/server/server.py | dballesteros7/starthack2016 | 98866b865dcb3c1b307d8119f7d2defb234f30ae | [
"MIT"
] | 6 | 2021-03-18T20:16:38.000Z | 2022-01-13T00:37:14.000Z | backend/server/server.py | dballesteros7/starthack2016 | 98866b865dcb3c1b307d8119f7d2defb234f30ae | [
"MIT"
] | null | null | null | import os
import json
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, request, jsonify
from flask.views import MethodView
from flask.ext.cors import CORS
from database import ElasticStorage, RedisClient
from article import Article as ESArticle
app = Flask(__name__)
CORS(app)
#sql_config = json.loads(os.getenv('VCAP_SERVICES'))
#app.config['SQLALCHEMY_DATABASE_URI'] = sql_config['sqldb'][0]['credentials']['uri']
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://alius_admin:starthack16@alius.czpsbuodkwp9.eu-west-1.rds.amazonaws.com:5432/alius'
db = SQLAlchemy(app)
source_map = {
'cnn.com': 'CNN', 'nytimes.com': 'New York Times',
'huffingtonpost.com': 'The Huffington Post',
'huffingtonpost.ca': 'The Huffington Post', 'theguardian.com': 'The Guardian',
'foxnews.com': 'Fox News', 'forbes.com': 'Forbes',
'timesofindia.indiatimes.com': 'The Times of India', 'bbc.co.uk': 'BBC',
'usatoday.com': 'USA Today', 'bloomberg.com': 'Bloomberg',
'wsj.com': 'The Wall Street Journal', 'reuters.com': 'Reuters',
'nbcnews.com': 'NBC News', 'money.cnn.com': 'CNN Money',
'indianexpress.com': 'The Indian Express', 'cbsnews.com': 'CBS News',
'abcnews.go.com': 'ABC News', 'latimes.com': 'LA Times',
'time.com': 'Time', 'nypost.com': 'NY Post', 'cnbc.com': 'CNBC',
'thehindu.com': 'The Hindu', 'chron.com': 'CHRON',
'theatlantic.com': 'The Atlantic', 'breitbart.com': 'Breitbart',
'sfgate.com': 'SF Gate', 'usnews.com': 'US News',
'hindustantimes.com': 'Hindustan Times', 'hollywoodreporter.com': 'The Hollywood Reporter',
'fortune.com': 'Fortune', 'chicagotribune.com': 'Chicago Tribune',
'news.com.au': 'news.com.au'
}
class Users(db.Model):
user_id = db.Column(db.String(1024), primary_key=True)
anger = db.Column(db.Float)
disgust = db.Column(db.Float)
fear = db.Column(db.Float)
joy = db.Column(db.Float)
sadness = db.Column(db.Float)
total_articles = db.Column(db.Integer)
def __init__(self, user_id, article_id):
self.user_id = user_id
es = ElasticStorage.get_instance(dev=False)
doc = ESArticle.get(article_id)
self.anger = doc.tone.anger
self.disgust = doc.tone.disgust
self.fear = doc.tone.fear
self.joy = doc.tone.anger
self.sadness = doc.tone.anger
self.total_articles = 1
def to_dict(self):
return {'user_id': self.user_id, 'anger': self.anger, 'disgust': self.disgust, 'fear': self.fear,
'joy': self.joy, 'sadness': self.sadness, 'total_articles': self.total_articles}
class Articles(db.Model):
article_id = db.Column(db.String(1024), primary_key=True)
clicks = db.Column(db.Integer)
def __init__(self, article_id):
self.article_id = article_id
self.clicks = 1
def to_dict(self):
return {'article_id': self.article_id, 'clicks': self.clicks}
class UserAPI(MethodView):
def get(self):
all_users = Users.query.all()
return json.dumps([x.to_dict() for x in all_users])
def post(self):
user_id = json.loads(request.data.decode('utf-8'))['user_id']
article_id = json.loads(request.data.decode('utf-8'))['article_id']
user = Users.query.filter_by(user_id=user_id).first()
if user:
es = ElasticStorage.get_instance(dev=False)
doc = ESArticle.get(article_id)
user.anger += doc.tone.anger
user.disgust += doc.tone.disgust
user.fear += doc.tone.fear
user.joy += doc.tone.anger
user.sadness += doc.tone.anger
user.total_articles += 1
else:
u = Users(user_id, article_id)
db.session.add(u)
db.session.commit()
return ('', 204)
app.add_url_rule('/users/', view_func=UserAPI.as_view('users'))
class ArticlesAPI(MethodView):
def get(self):
all_articles = Articles.query.all()
return json.dumps([x.to_dict() for x in all_articles])
def post(self):
article_id = json.loads(request.data.decode('utf-8'))['article_id']
article = Articles.query.filter_by(article_id=article_id).first()
if article:
article.clicks += 1
db.session.add(article)
else:
a = Articles(article_id)
db.session.add(a)
db.session.commit()
return ('', 204)
app.add_url_rule('/articles/', view_func=ArticlesAPI.as_view('articles'))
@app.route('/search', methods=['POST'])
def search():
data = json.loads(request.data.decode('utf-8'))
query = data['q']
prefs = data['prefs']
es = ElasticStorage.get_instance(dev=False)
r = RedisClient.get_instance(dev=False)
if r.hexists('popular', query.lower()):
r.hincrby('popular', query.lower())
else:
r.hset('popular', query.lower(), 1)
articles = es.query_articles(query, prefs)
articles = list(articles)
articles = list({article['title']:article for article in articles}.values())
for article in articles:
for key, value in source_map.items():
if key in article['url']:
article['source'] = value
return jsonify(
articles=articles
)
@app.route('/popular')
def popular():
r = RedisClient.get_instance(dev=False)
pop = r.hgetall('popular')
sorted_searches = sorted(pop.items(), key=lambda x:int(x[1]), reverse=True)[0:10]
final_dict = {}
for sorted_search in sorted_searches:
final_dict[sorted_search[0].decode('utf-8')] = int(sorted_search[1].decode('utf-8'))
return jsonify(final_dict)
if __name__ == "__main__":
port = os.getenv('VCAP_APP_PORT', '5000')
app.run(host='0.0.0.0', port=int(port), debug=True)
| 34.580838 | 145 | 0.638615 | 2,633 | 0.455931 | 0 | 0 | 1,142 | 0.197749 | 0 | 0 | 1,490 | 0.258009 |
503a4dd6e2152629bf0d68bcf2540e064a59c3ca | 9,738 | py | Python | py/qaviton/scripts/create.py | qaviton/qaviton | 112f1620af36e09031909bd36b7e388df577b75b | [
"Apache-2.0"
] | 9 | 2018-09-06T10:27:55.000Z | 2020-01-02T16:50:13.000Z | py/qaviton/scripts/create.py | qaviton/qaviton | 112f1620af36e09031909bd36b7e388df577b75b | [
"Apache-2.0"
] | 6 | 2019-06-05T09:44:21.000Z | 2022-03-11T23:26:41.000Z | py/qaviton/scripts/create.py | qaviton/qaviton | 112f1620af36e09031909bd36b7e388df577b75b | [
"Apache-2.0"
] | 9 | 2018-09-21T14:47:40.000Z | 2021-12-21T01:37:20.000Z | import os
from qaviton.utils import filer
from qaviton.utils import path
from qaviton.utils.operating_system import s
from qaviton.version import __version__
cwd = os.getcwd()
examples = path.of(__file__)('examples')
def initial_msg(f):
def dec(*args, **kwargs):
print("""
QAVITON VERSION {}
creating qaviton framework and test examples
""".format(__version__))
f(*args, **kwargs)
return dec
def install_is_done(tests_dir):
print("""
@@@@@@@@@@@@@@@@@@@@@
@ installation done @
@@@@@@@@@@@@@@@@@@@@@
# use pip install, uninstall & freeze
# to manage your dependencies:
(venv) path/to/project> pip freeze > requirements-test.txt
# to install your requirements on a new machine(consider using git):
(venv) path/to/project> pip install -r requirements-test.txt
* your testing framework is done!
* start testing like a boss ⚛
* ______________
* / __________ \ ______
* / / \ \ / ____ \
* / / \ / \ \ / / \ \ __ __ _ ___________ _______ _ _
* | | O \ / O | | / |______| \ \ \ / / |_| |____ ____| / _____ \ | \ | |
* | | | | | ________ | \ \ / / |-| | | | | | | | \ | |
* \ \ \________/ / \ | | | | \ \ / / | | | | | | | | | | \ | |
* \ \____________/ /\ \_ | | | | \ \/ / | | | | | |_____| | | |\ \| |
* \________________/ \__| |_| |_| \__/ |_| |_| \_______/ |_| \___|
""")
def add_readme(tests_dir):
with open(cwd + s + tests_dir + s + 'README.rst', 'w+') as f:
f.write("this should be changed to a custom README file for your project\n"
"you have a nice starting point from here.\n"
"\n"
"requirements\n"
"------------\n"
"python 3.7 and above\n"
"pytest latest\n"
"\n"
"\n"
"testing examples\n"
"----------------\n"
"checkout under execute_tests/end_to_end_tests to see testing examples.\n"
"\n"
"\n"
"model-based examples\n"
"--------------------\n"
"check out the pages directory for page model examples\n"
"and services/app for a model-based-app service for testing.\n"
"\n"
"\n"
"conftest & pytest.ini\n"
"---------------------\n"
"look at the conftest.py to see how to add model-based fixtures\n"
"you can set parallel testing & reporting with pytest.ini file\n"
"\n"
"\n"
"setup your hub\n"
"--------------\n"
"check out in the data dir to set your secret user key and remote hub\n"
"and customize your supported platforms under the data/supported_platfoms.py file.\n"
"\n"
"\n"
"local hub\n"
"---------\n"
"install docker:\n"
"https://docs.docker.com/install/\n"
"\n"
"install selenoid:\n"
"go to option 2 to install with docker\n"
"https://github.com/aerokube/selenoid/blob/master/docs/quick-start-guide.adoc\n"
"\n"
"go to your secret file and change your hub url to local host:\n"
"/project/tests/data/secret.py\n"
"hub='http://localhost:4444/wd/hub'\n"
"\n"
"\n"
"run tests examples\n"
"------------------\n"
"cd to your project(in my case it's called myapp and my tests are under tests) and simply\n"
"(env) C:\\Users\\user\\PycharmProjects\\myapp>python -m pytest tests\n"
"\n"
"or run with pycharm:\n"
"https://www.jetbrains.com/pycharm/download/#section=windows\n"
"https://www.jetbrains.com/help/pycharm/pytest.html")
def add_gitignore(tests_dir):
if not filer.os.path.exists(cwd + s + '.gitignore'):
with open(cwd + s + tests_dir + s + '.gitignore', 'w+') as f:
f.write("# Byte-compiled / optimized / DLL files\n"
"__pycache__/\n"
"*.py[cod]\n"
"*$py.class\n"
"\n"
"# C extensions\n"
"*.so\n"
"\n"
"# Distribution / packaging\n"
".Python\n"
"build/\n"
"develop-eggs/\n"
"dist/\n"
"downloads/\n"
"eggs/\n"
".eggs/\n"
"lib/\n"
"lib64/\n"
"parts/\n"
"sdist/\n"
"var/\n"
"wheels/\n"
"*.egg-info/\n"
".installed.cfg\n"
"*.egg\n"
"MANIFEST\n"
"\n"
"# PyInstaller\n"
"# Usually these files are written by a python script from a template\n"
"# before PyInstaller builds the exe, so as to inject date/other infos into it.\n"
"*.manifest\n"
"*.spec\n"
"\n"
"# Installer logs\n"
"pip-log.txt\n"
"pip-delete-this-directory.txt\n"
"\n"
"# Unit test / coverage reports\n"
"htmlcov/\n"
".tox/\n"
".coverage\n"
".coverage.*\n"
".cache\n"
"nosetests.xml\n"
"coverage.xml\n"
"*.cover\n"
".hypothesis/\n"
".pytest_cache/\n"
"\n"
"# Translations\n"
"*.mo\n"
"*.pot\n"
"\n"
"# Django stuff:\n"
"*.log\n"
"\n"
"# Scrapy stuff:\n"
".scrapy\n"
"\n"
"# Sphinx documentation\n"
"docs/_build/\n"
"\n"
"# PyBuilder\n"
"target/\n"
"\n"
"# Jupyter Notebook\n"
".ipynb_checkpoints\n"
"\n"
"# pyenv\n"
".python-version\n"
"\n"
"# celery beat schedule file\n"
"celerybeat-schedule\n"
"\n"
"# SageMath parsed files\n"
"*.sage.py\n"
"\n"
"# Environments\n"
".env\n"
".venv\n"
"env/\n"
"venv/\n"
"ENV/\n"
"env.bak/\n"
"venv.bak/\n"
"\n"
"# Spyder project settings\n"
".spyderproject\n"
".spyproject\n"
"\n"
"# Rope project settings\n"
".ropeproject\n"
"\n"
"# mkdocs documentation\n"
"/site\n"
"\n"
"# mypy\n"
".mypy_cache/\n"
"\n"
"# private\n"
"*secret*")
def add_pytest_ini(tests_dir):
with open(cwd + s + tests_dir + s + 'pytest.ini', 'w+') as f:
f.write("[pytest]\n"
";addopts = -n 3\n"
";addopts = --html=report.html\n"
";addopts = --junitxml=\path\\to\\reports\n"
";addopts = --collect-only\n"
";addopts = --cov=your_app")
def add_requirements(tests_dir):
if not filer.os.path.exists(cwd + s + 'requirements-test.txt'):
open(cwd + s + 'requirements-test.txt', 'w+').close()
os.system('pip freeze > requirements-test.txt')
# TODO: add more content for different frameworks
@initial_msg
def framework(frameworks, tests_dir, params):
if '--example' in params:
filer.copy_directory(examples + s + 'simple_web', cwd + s + tests_dir)
add_readme(tests_dir)
else:
filer.copy_directory(examples + s + 'new_project', cwd + s + tests_dir)
if tests_dir != 'tests':
filer.find_replace(cwd + s + tests_dir, 'from tests.', 'from ' + tests_dir + '.', "*.py")
add_pytest_ini(tests_dir)
add_gitignore(tests_dir)
add_requirements(tests_dir)
install_is_done(tests_dir)
| 38.952 | 155 | 0.377798 | 0 | 0 | 0 | 0 | 539 | 0.055339 | 0 | 0 | 5,312 | 0.54538 |
503ad6eb037cd02695fc6926af71b16de0a8bd4c | 1,178 | py | Python | fpga_isp/debayer/base.py | antmicro/fpga-isp-core | 1b5e1323d9c2938758d230929c8ebcdd039fb603 | [
"Apache-2.0"
] | 4 | 2021-10-30T06:29:15.000Z | 2022-02-24T04:09:58.000Z | fpga_isp/debayer/base.py | antmicro/fpga-isp-core | 1b5e1323d9c2938758d230929c8ebcdd039fb603 | [
"Apache-2.0"
] | null | null | null | fpga_isp/debayer/base.py | antmicro/fpga-isp-core | 1b5e1323d9c2938758d230929c8ebcdd039fb603 | [
"Apache-2.0"
] | 1 | 2022-02-17T19:17:12.000Z | 2022-02-17T19:17:12.000Z | from migen import *
import math
from litex.soc.interconnect.csr import *
from litex.soc.interconnect import stream
class DemosaicBase(Module):
def first_pix(self):
return [If(self.first_pixel,
NextValue(self.rgb_first,1),
NextValue(self.first_pixel,0),
).Else(
NextValue(self.rgb_first,0),
)]
def __init__(self, cache, im_w, im_h, min_lines_req, streamout, active):
self.rgb_ready = Signal(reset=0)
self.rgb_valid = Signal(reset=0)
self.rgb_first = Signal(reset=0)
self.rgb_last = Signal(reset=0)
self.rgb_data = Signal(len(streamout.data))
self.min_lines_required = min_lines_req
self.working = Signal(reset=0)
self.first_pixel = Signal(reset=1)
self.comb += [
If(active,
#connect rgb stream with local signals
self.rgb_ready.eq(streamout.ready),
streamout.valid.eq(self.rgb_valid),
streamout.last.eq(self.rgb_last),
streamout.first.eq(self.rgb_first),
streamout.data.eq(self.rgb_data),
)
]
| 31.837838 | 76 | 0.589134 | 1,060 | 0.89983 | 0 | 0 | 0 | 0 | 0 | 0 | 38 | 0.032258 |
503af92f2305932f6fb09b877f2e4fef0e54bb66 | 10,341 | py | Python | tests/pipeline/test_buyback_auth.py | colin1alexander/zipline | ba42e6d8b972dcce9271526562ceff0cddd3fa30 | [
"Apache-2.0"
] | null | null | null | tests/pipeline/test_buyback_auth.py | colin1alexander/zipline | ba42e6d8b972dcce9271526562ceff0cddd3fa30 | [
"Apache-2.0"
] | null | null | null | tests/pipeline/test_buyback_auth.py | colin1alexander/zipline | ba42e6d8b972dcce9271526562ceff0cddd3fa30 | [
"Apache-2.0"
] | null | null | null | """
Tests for the reference loader for Buyback Authorizations.
"""
from functools import partial
from unittest import TestCase
import blaze as bz
from blaze.compute.core import swap_resources_into_scope
from contextlib2 import ExitStack
import pandas as pd
from six import iteritems
from zipline.pipeline.common import(
BUYBACK_ANNOUNCEMENT_FIELD_NAME,
CASH_FIELD_NAME,
DAYS_SINCE_PREV,
PREVIOUS_BUYBACK_ANNOUNCEMENT,
PREVIOUS_BUYBACK_CASH,
PREVIOUS_BUYBACK_SHARE_COUNT,
SHARE_COUNT_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME)
from zipline.pipeline.data import (CashBuybackAuthorizations,
ShareBuybackAuthorizations)
from zipline.pipeline.factors.events import (
BusinessDaysSinceCashBuybackAuth,
BusinessDaysSinceShareBuybackAuth
)
from zipline.pipeline.loaders.buyback_auth import \
CashBuybackAuthorizationsLoader, ShareBuybackAuthorizationsLoader
from zipline.pipeline.loaders.blaze import (
BlazeCashBuybackAuthorizationsLoader,
BlazeShareBuybackAuthorizationsLoader,
)
from zipline.utils.test_utils import (
tmp_asset_finder,
)
from .base import EventLoaderCommonMixin, DATE_FIELD_NAME
buyback_authorizations = [
# K1--K2--A1--A2.
pd.DataFrame({
SHARE_COUNT_FIELD_NAME: [1, 15],
CASH_FIELD_NAME: [10, 20]
}),
# K1--K2--A2--A1.
pd.DataFrame({
SHARE_COUNT_FIELD_NAME: [7, 13],
CASH_FIELD_NAME: [10, 22]
}),
# K1--A1--K2--A2.
pd.DataFrame({
SHARE_COUNT_FIELD_NAME: [3, 1],
CASH_FIELD_NAME: [4, 7]
}),
# K1 == K2.
pd.DataFrame({
SHARE_COUNT_FIELD_NAME: [6, 23],
CASH_FIELD_NAME: [1, 2]
}),
pd.DataFrame(
columns=[SHARE_COUNT_FIELD_NAME,
CASH_FIELD_NAME],
dtype='datetime64[ns]'
),
]
def create_buyback_auth_tst_frame(cases, field_to_drop):
buyback_auth_df = {
sid:
pd.concat([df, buyback_authorizations[sid]], axis=1).drop(
field_to_drop, 1)
for sid, df
in enumerate(case.rename(columns={DATE_FIELD_NAME:
BUYBACK_ANNOUNCEMENT_FIELD_NAME}
)
for case in cases
)
}
return buyback_auth_df
class CashBuybackAuthLoaderTestCase(TestCase, EventLoaderCommonMixin):
"""
Test for cash buyback authorizations dataset.
"""
pipeline_columns = {
PREVIOUS_BUYBACK_CASH:
CashBuybackAuthorizations.cash_amount.latest,
PREVIOUS_BUYBACK_ANNOUNCEMENT:
CashBuybackAuthorizations.announcement_date.latest,
DAYS_SINCE_PREV:
BusinessDaysSinceCashBuybackAuth(),
}
@classmethod
def setUpClass(cls):
cls._cleanup_stack = stack = ExitStack()
cls.finder = stack.enter_context(
tmp_asset_finder(equities=cls.equity_info),
)
cls.cols = {}
cls.dataset = create_buyback_auth_tst_frame(cls.event_dates_cases,
SHARE_COUNT_FIELD_NAME)
cls.loader_type = CashBuybackAuthorizationsLoader
@classmethod
def tearDownClass(cls):
cls._cleanup_stack.close()
def setup(self, dates):
zip_with_floats_dates = partial(self.zip_with_floats, dates)
num_days_between_dates = partial(self.num_days_between, dates)
_expected_previous_cash = pd.DataFrame({
0: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-14') +
[10] * num_days_between_dates('2014-01-15', '2014-01-19') +
[20] * num_days_between_dates('2014-01-20', None)
),
1: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-14') +
[22] * num_days_between_dates('2014-01-15', '2014-01-19') +
[10] * num_days_between_dates('2014-01-20', None)
),
2: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-09') +
[4] * num_days_between_dates('2014-01-10', '2014-01-19') +
[7] * num_days_between_dates('2014-01-20', None)
),
3: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-09') +
[1] * num_days_between_dates('2014-01-10', '2014-01-14') +
[2] * num_days_between_dates('2014-01-15', None)
),
4: zip_with_floats_dates(['NaN'] * len(dates)),
}, index=dates)
self.cols[PREVIOUS_BUYBACK_ANNOUNCEMENT] = \
self.get_expected_previous_event_dates(dates)
self.cols[PREVIOUS_BUYBACK_CASH] = _expected_previous_cash
self.cols[DAYS_SINCE_PREV] = self._compute_busday_offsets(
self.cols[PREVIOUS_BUYBACK_ANNOUNCEMENT]
)
class ShareBuybackAuthLoaderTestCase(TestCase, EventLoaderCommonMixin):
"""
Test for share buyback authorizations dataset.
"""
pipeline_columns = {
PREVIOUS_BUYBACK_SHARE_COUNT:
ShareBuybackAuthorizations.share_count.latest,
PREVIOUS_BUYBACK_ANNOUNCEMENT:
ShareBuybackAuthorizations.announcement_date.latest,
DAYS_SINCE_PREV:
BusinessDaysSinceShareBuybackAuth(),
}
@classmethod
def setUpClass(cls):
cls._cleanup_stack = stack = ExitStack()
cls.finder = stack.enter_context(
tmp_asset_finder(equities=cls.equity_info),
)
cls.cols = {}
cls.dataset = create_buyback_auth_tst_frame(cls.event_dates_cases,
CASH_FIELD_NAME)
cls.loader_type = ShareBuybackAuthorizationsLoader
@classmethod
def tearDownClass(cls):
cls._cleanup_stack.close()
def setup(self, dates):
zip_with_floats_dates = partial(self.zip_with_floats, dates)
num_days_between_dates = partial(self.num_days_between, dates)
_expected_previous_buyback_share_count = pd.DataFrame({
0: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-14') +
[1] * num_days_between_dates('2014-01-15', '2014-01-19') +
[15] * num_days_between_dates('2014-01-20', None)
),
1: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-14') +
[13] * num_days_between_dates('2014-01-15', '2014-01-19') +
[7] * num_days_between_dates('2014-01-20', None)
),
2: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-09') +
[3] * num_days_between_dates('2014-01-10', '2014-01-19') +
[1] * num_days_between_dates('2014-01-20', None)
),
3: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-09') +
[6] * num_days_between_dates('2014-01-10', '2014-01-14') +
[23] * num_days_between_dates('2014-01-15', None)
),
4: zip_with_floats_dates(['NaN'] * len(dates)),
}, index=dates)
self.cols[
PREVIOUS_BUYBACK_SHARE_COUNT
] = _expected_previous_buyback_share_count
self.cols[PREVIOUS_BUYBACK_ANNOUNCEMENT] = \
self.get_expected_previous_event_dates(dates)
self.cols[DAYS_SINCE_PREV] = self._compute_busday_offsets(
self.cols[PREVIOUS_BUYBACK_ANNOUNCEMENT]
)
class BlazeCashBuybackAuthLoaderTestCase(CashBuybackAuthLoaderTestCase):
""" Test case for loading via blaze.
"""
@classmethod
def setUpClass(cls):
super(BlazeCashBuybackAuthLoaderTestCase, cls).setUpClass()
cls.loader_type = BlazeCashBuybackAuthorizationsLoader
def loader_args(self, dates):
_, mapping = super(
BlazeCashBuybackAuthLoaderTestCase,
self,
).loader_args(dates)
return (bz.Data(pd.concat(
pd.DataFrame({
BUYBACK_ANNOUNCEMENT_FIELD_NAME:
frame[BUYBACK_ANNOUNCEMENT_FIELD_NAME],
CASH_FIELD_NAME:
frame[CASH_FIELD_NAME],
TS_FIELD_NAME:
frame[TS_FIELD_NAME],
SID_FIELD_NAME: sid,
})
for sid, frame in iteritems(mapping)
).reset_index(drop=True)),)
class BlazeShareBuybackAuthLoaderTestCase(ShareBuybackAuthLoaderTestCase):
""" Test case for loading via blaze.
"""
@classmethod
def setUpClass(cls):
super(BlazeShareBuybackAuthLoaderTestCase, cls).setUpClass()
cls.loader_type = BlazeShareBuybackAuthorizationsLoader
def loader_args(self, dates):
_, mapping = super(
BlazeShareBuybackAuthLoaderTestCase,
self,
).loader_args(dates)
return (bz.Data(pd.concat(
pd.DataFrame({
BUYBACK_ANNOUNCEMENT_FIELD_NAME:
frame[BUYBACK_ANNOUNCEMENT_FIELD_NAME],
SHARE_COUNT_FIELD_NAME:
frame[SHARE_COUNT_FIELD_NAME],
TS_FIELD_NAME:
frame[TS_FIELD_NAME],
SID_FIELD_NAME: sid,
})
for sid, frame in iteritems(mapping)
).reset_index(drop=True)),)
class BlazeShareBuybackAuthLoaderNotInteractiveTestCase(
BlazeShareBuybackAuthLoaderTestCase):
"""Test case for passing a non-interactive symbol and a dict of resources.
"""
def loader_args(self, dates):
(bound_expr,) = super(
BlazeShareBuybackAuthLoaderNotInteractiveTestCase,
self,
).loader_args(dates)
return swap_resources_into_scope(bound_expr, {})
class BlazeCashBuybackAuthLoaderNotInteractiveTestCase(
BlazeCashBuybackAuthLoaderTestCase):
"""Test case for passing a non-interactive symbol and a dict of resources.
"""
def loader_args(self, dates):
(bound_expr,) = super(
BlazeCashBuybackAuthLoaderNotInteractiveTestCase,
self,
).loader_args(dates)
return swap_resources_into_scope(bound_expr, {})
| 36.031359 | 78 | 0.619089 | 7,970 | 0.770718 | 0 | 0 | 1,332 | 0.128808 | 0 | 0 | 953 | 0.092157 |
503b91f3eae562c97ebece22cef6e50c66f5a795 | 1,534 | py | Python | learnMongo/mongoInsert.py | huobingli/pyAlgorithm | cbfe3d65f7748be4f6811cb7e1897d696e914891 | [
"MIT"
] | null | null | null | learnMongo/mongoInsert.py | huobingli/pyAlgorithm | cbfe3d65f7748be4f6811cb7e1897d696e914891 | [
"MIT"
] | null | null | null | learnMongo/mongoInsert.py | huobingli/pyAlgorithm | cbfe3d65f7748be4f6811cb7e1897d696e914891 | [
"MIT"
] | null | null | null | import pymongo
from bson.objectid import ObjectId
# mongo 增加
def main():
client = pymongo.MongoClient(host='47.114.171.118', port=27017)
db = client.test
collection = db.students
# 插入一条数据
student = {
'id': '20170101',
'name': 'Kevin',
'age': 20,
'gender': 'male'
}
# 每条数据其实都有一个 _id 属性来唯一标识。
# 如果没有显式指明该属性,MongoDB 会自动产生一个 ObjectId 类型的 _id 属性。
# insert() 方法会在执行后返回_id 值。
result = collection.insert(student)
print(result)
# 插入多条数据
student1 = {
'id': '20170101',
'name': 'Jordan',
'age': 20,
'gender': 'male'
}
student2 = {
'id': '20170202',
'name': 'Mike',
'age': 20,
'gender': 'male'
}
# 这里用列表方式传入
# 返回结果也是列表方式
result = collection.insert([student1, student2])
print(result)
# pymongo 官方不推荐使用insert
# 推荐使用 insert_one 和 insert_many 插入一条或者多条记录
student = {
'id': '20170101',
'name': 'Jordan',
'age': 20,
'gender': 'male'
}
result = collection.insert_one(student)
print(result)
print(result.inserted_id)
student1 = {
'id': '20170101',
'name': 'Jordan',
'age': 20,
'gender': 'male'
}
student2 = {
'id': '20170202',
'name': 'Mike',
'age': 20,
'gender': 'male'
}
result = collection.insert_many([student1, student2])
print(result)
print(result.inserted_ids)
if __name__ == '__main__':
main() | 19.417722 | 67 | 0.527379 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 737 | 0.420662 |
503c48b6df0fa453f16c7dbb69d69d3d798c0dc7 | 4,608 | py | Python | gerrymander/operations.py | agx/gerrymander | ffa74828fdd9bae0aa46ec121dfbb1d999a86d31 | [
"Apache-2.0"
] | 1 | 2015-09-12T20:52:10.000Z | 2015-09-12T20:52:10.000Z | gerrymander/operations.py | russellb/gerrymander | 88d3a43dbc18592225619b88ce93cd88d6137a08 | [
"Apache-2.0"
] | null | null | null | gerrymander/operations.py | russellb/gerrymander | 88d3a43dbc18592225619b88ce93cd88d6137a08 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (C) 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gerrymander.model import ModelChange
from gerrymander.model import ModelEvent
class OperationBase(object):
def __init__(self, client):
self.client = client
class OperationQuery(OperationBase):
PATCHES_NONE = "none"
PATCHES_CURRENT = "current"
PATCHES_ALL = "all"
STATUS_SUBMITTED = "submitted"
STATUS_REVIEWED = "reviewed"
STATUS_MERGED = "merged"
STATUS_ABANDONED = "abandoned"
STATUS_OPEN = "open"
STATUS_CLOSED = "closed"
def __init__(self, client, terms={}, rawquery=None, patches=PATCHES_NONE,
approvals=False, files=False, comments=False):
OperationBase.__init__(self, client)
self.terms = terms
self.rawquery = rawquery
self.patches = patches
self.approvals = approvals
self.files = files
self.comments = comments
if self.patches == OperationQuery.PATCHES_NONE:
if self.approvals:
raise Exception("approvals cannot be requested without patches")
if self.files:
raise Exception("files cannot be requested without patches")
def get_args(self, limit=None, sortkey=None):
args = ["query", "--format=JSON"]
if self.patches == OperationQuery.PATCHES_CURRENT:
args.append("--current-patch-set")
elif self.patches == OperationQuery.PATCHES_ALL:
args.append("--patch-sets")
if self.approvals:
args.append("--all-approvals")
if self.files:
args.append("--files")
if self.comments:
args.append("--comments")
clauses = []
if limit is not None:
clauses.append("limit:" + str(limit))
if sortkey is not None:
clauses.append("resume_sortkey:" + sortkey)
if self.rawquery is not None:
clauses.append("(" + self.rawquery + ")")
terms = list(self.terms.keys())
terms.sort()
for term in terms:
negateAll = False
terms = self.terms[term]
if len(terms) > 0 and terms[0] == "!":
negateAll = True
terms = terms[1:]
if len(terms) == 0:
continue
subclauses = []
for value in terms:
subclauses.append("%s:%s" % (term, value))
clause = " OR ".join(subclauses)
if negateAll:
clause = "( NOT ( " + clause + " ) )"
else:
clause = "( " + clause + " )"
clauses.append(clause)
args.append(" AND ".join(clauses))
return args
def run(self, cb, limit=None):
class tracker(object):
def __init__(self):
self.gotany = True
self.count = 0
self.sortkey = None
c = tracker()
def mycb(line):
if 'rowCount' in line:
return
if 'type' in line and line['type'] == "error":
raise Exception(line['message'])
change = ModelChange.from_json(line)
if "sortKey" in line:
c.sortkey = line["sortKey"]
c.gotany = True
c.count = c.count + 1
cb(change)
if limit is None:
while c.gotany:
c.gotany = False
self.client.run(self.get_args(500, c.sortkey), mycb)
else:
while c.count < limit and c.gotany:
want = limit - c.count
if want > 500:
want = 500
c.gotany = False
self.client.run(self.get_args(want, c.sortkey), mycb)
return 0
class OperationWatch(OperationBase):
def __init__(self, client):
OperationBase.__init__(self, client)
def run(self, cb):
def mycb(line):
event = ModelEvent.from_json(line)
if event:
cb(event)
return self.client.run(["stream-events"], mycb)
| 31.346939 | 80 | 0.560547 | 3,933 | 0.853516 | 0 | 0 | 0 | 0 | 0 | 0 | 976 | 0.211806 |
503d90e6d1ab557f77cc0650d1a8e779ce8b91c4 | 2,130 | py | Python | main.py | pystokes/mnist | ad63158bb53bd7f85c531f708936684edb5b7bd0 | [
"MIT"
] | null | null | null | main.py | pystokes/mnist | ad63158bb53bd7f85c531f708936684edb5b7bd0 | [
"MIT"
] | 2 | 2021-06-08T20:51:12.000Z | 2022-03-12T00:12:22.000Z | main.py | pystokes/mnist | ad63158bb53bd7f85c531f708936684edb5b7bd0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import datetime as dt
import json
from pathlib import Path
import torch
import torch.nn as nn
from torch import optim
from libs.data_loader import create_mnist_data_loader
from libs.model import CNN
from libs.trainer import train, test
def main(config):
# 使用リソースの設定(CPU or GPU)
# PyTorchでは明示的に指定する必要がある
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('\nDevice:', device)
# Data Loaderの作成
train_loader, test_loader = create_mnist_data_loader(config)
# モデル作成
model = CNN(config).to(device)
print(model) # ネットワークの詳細の確認用に表示
# 損失関数を定義
# CorssEntropyLoss = LogSoftmax + NLLLossであるため,学習時はモデルからの出力は確率化しない
loss_fn = nn.CrossEntropyLoss()
# 最適化手法を定義(ここでは例としてAdamを選択)
optimizer = optim.Adam(model.parameters(), lr=config['train']['learning_rate'])
# 9. 学習(各エポックごとにテスト用データで精度:Accを計算)
print('\n-----------------')
print(' Begin train')
print('-----------------\n')
for epoch in range(1, config['train']['epochs']+1):
loss = train(train_loader, model, optimizer, loss_fn, device, config['train']['epochs'], epoch)
correct, data_num = test(test_loader, model, device)
print(f'{dt.datetime.now().strftime("%H:%M:%S")} Epoch [{epoch:05}/{config["train"]["epochs"]:05}], Train Loss: {loss.item():.4f}, Test Acc: {correct:05}/{data_num:05} ({(100. * float(correct) /data_num):.1f}%)')
# save_periodごとに学習した重みを保存
if epoch % config['train']['save_period'] == 0:
# 保存先ディレクトリを作成
save_home = Path(config['train']['save_home'])
save_home.mkdir(exist_ok=True, parents=True)
# 保存するファイルのパスを設定
save_path = save_home.joinpath(f'weight-{str(epoch).zfill(5)}.pth')
# 重みを保存
torch.save(model.state_dict(), save_path)
if __name__ == '__main__':
from pprint import pprint
with open('./config.json', 'r') as f:
config = json.load(f)
print('\n--- Config ---------------')
pprint(config)
print('--------------------------')
main(config)
| 32.272727 | 220 | 0.618779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,171 | 0.473323 |
503e0e96506950e6497e6cb13686d145c5a53e90 | 2,029 | py | Python | ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/status_params.py | kennyballou/ambari | 8985bcf11296d540a861a8634c17d6b9b1accd5a | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/status_params.py | kennyballou/ambari | 8985bcf11296d540a861a8634c17d6b9b1accd5a | [
"Apache-2.0"
] | null | null | null | ambari-server/src/main/resources/common-services/STORM/0.9.1.2.1/package/scripts/status_params.py | kennyballou/ambari | 8985bcf11296d540a861a8634c17d6b9b1accd5a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.libraries.script import Script
from resource_management.libraries.functions import get_kinit_path
from resource_management.libraries.functions import default, format
config = Script.get_config()
pid_dir = config['configurations']['storm-env']['storm_pid_dir']
pid_nimbus = format("{pid_dir}/nimbus.pid")
pid_supervisor = format("{pid_dir}/supervisor.pid")
pid_drpc = format("{pid_dir}/drpc.pid")
pid_ui = format("{pid_dir}/ui.pid")
pid_logviewer = format("{pid_dir}/logviewer.pid")
pid_rest_api = format("{pid_dir}/restapi.pid")
pid_files = {"logviewer":pid_logviewer,
"ui": pid_ui,
"nimbus": pid_nimbus,
"supervisor": pid_supervisor,
"drpc": pid_drpc,
"rest_api": pid_rest_api}
# Security related/required params
hostname = config['hostname']
security_enabled = config['configurations']['cluster-env']['security_enabled']
kinit_path_local = get_kinit_path()
tmp_dir = Script.get_tmp_dir()
conf_dir = "/etc/storm/conf"
storm_user = config['configurations']['storm-env']['storm_user']
storm_ui_principal = default('/configurations/storm-env/storm_ui_principal_name', None)
storm_ui_keytab = default('/configurations/storm-env/storm_ui_keytab', None)
| 41.408163 | 87 | 0.760473 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,253 | 0.617546 |
503e2e5fa5da5155bca173a661a3c68a6802e6ed | 2,917 | py | Python | src/pyuwds3/types/vector/scalar_stable.py | LAAS-HRI/uwds3 | 42390f62ed5701a32710341b01faa10efc448078 | [
"MIT"
] | 2 | 2020-08-19T06:15:14.000Z | 2021-05-23T09:55:18.000Z | src/pyuwds3/types/vector/scalar_stable.py | LAAS-HRI/uwds3 | 42390f62ed5701a32710341b01faa10efc448078 | [
"MIT"
] | 5 | 2021-01-06T09:00:35.000Z | 2021-01-20T13:22:19.000Z | src/pyuwds3/types/vector/scalar_stable.py | LAAS-HRI/uwds3 | 42390f62ed5701a32710341b01faa10efc448078 | [
"MIT"
] | 2 | 2020-11-18T17:34:43.000Z | 2021-05-23T16:14:17.000Z | import rospy
import numpy as np
import cv2
class ScalarStable(object):
"""Represents a stabilized scalar"""
def __init__(self,
x=.0,
vx=.0,
p_cov=.03, m_cov=.01,
time=None):
"""ScalarStabilized constructor"""
self.x = x
self.vx = vx
self.p_cov = p_cov
self.m_cov = m_cov
self.filter = cv2.KalmanFilter(2, 1)
self.filter.statePost = self.to_array()
self.filter.measurementMatrix = np.array([[1, 1]], np.float32)
self.__update_noise_cov(p_cov, m_cov)
if time is None:
self.last_update = rospy.Time().now()
else:
self.last_update = time
def from_array(self, array):
"""Updates the scalar stabilized state from array"""
assert array.shape == (2, 1)
self.x = array[0]
self.vx = array[1]
self.filter.statePre = self.filter.statePost
def to_array(self):
"""Returns the scalar stabilizer state array representation"""
return np.array([[self.x], [self.vx]], np.float32)
def position(self):
"""Returns the scalar's position"""
return self.x
def velocity(self):
"""Returns the scalar's velocity"""
return self.vx
def update(self, x, time=None, m_cov=None):
"""Updates/Filter the scalar"""
if m_cov is not None:
self.__update_noise_cov(self.p_cov, m_cov)
self.__update_time(time=time)
self.filter.predict()
measurement = np.array([[np.float32(x)]])
assert measurement.shape == (1, 1)
self.filter.correct(measurement)
self.from_array(self.filter.statePost)
def predict(self, time=None):
"""Predicts the scalar state"""
self.__update_time(time=time)
self.filter.predict()
self.from_array(self.filter.statePost)
def __update_noise_cov(self, p_cov, m_cov):
"""Updates the process and measurement covariances"""
self.filter.processNoiseCov = np.array([[1, 0],
[0, 1]], np.float32) * p_cov
self.filter.measurementNoiseCov = np.array([[1]], np.float32) * m_cov
def __update_transition(self, dt):
self.filter.transitionMatrix = np.array([[1, dt],
[0, 1]], np.float32)
def __update_time(self, time=None):
if time is None:
now = rospy.Time().now()
else:
now = time
elapsed_time = now - self.last_update
self.last_update = now
self.__update_transition(elapsed_time.to_sec())
def __len__(self):
return 1
def __add__(self, scalar):
return self.x + scalar.x
def __sub__(self, scalar):
return self.x - scalar.x
def __str__(self):
return("{}".format(self.to_array()))
| 31.031915 | 77 | 0.567707 | 2,871 | 0.98423 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.127871 |
5041193e8af88157d4069d70c71b97131486c9cc | 14,399 | py | Python | drest/api.py | derks/drest | 7e35375ffd884c3c124dc800f94c2f271d788e0f | [
"BSD-3-Clause"
] | 9 | 2015-03-10T00:41:54.000Z | 2020-05-07T06:03:22.000Z | drest/api.py | derks/drest | 7e35375ffd884c3c124dc800f94c2f271d788e0f | [
"BSD-3-Clause"
] | null | null | null | drest/api.py | derks/drest | 7e35375ffd884c3c124dc800f94c2f271d788e0f | [
"BSD-3-Clause"
] | null | null | null | """dRest core API connection library."""
import re
from . import interface, resource, request, serialization, meta, exc
from . import response
class API(meta.MetaMixin):
"""
The API class acts as a high level 'wrapper' around multiple lower level
handlers. Most of the meta arguments are optionally passed to one or
more handlers upon instantiation. All handler classes must be passed
*un-instantiated*.
Arguments:
baseurl
Translated to self.baseurl (for convenience).
Optional Arguments and Meta:
debug
Boolean. Toggle debug console output. Default: False.
baseurl
The base url to the API endpoint.
request_handler
The Request Handler class that performs the actual HTTP (or other)
requests. Default: drest.request.RequestHandler.
resource_handler
The Resource Handler class that is used when api.add_resource is
called. Default: drest.resource.ResourceHandler.
response_handler
An un-instantiated Response Handler class used to return
responses to the caller. Default: drest.response.ResponseHandler.
serialization_handler
An un-instantiated Serialization Handler class used to
serialize/deserialize data.
Default: drest.serialization.JsonSerializationHandler.
ignore_ssl_validation
Boolean. Whether or not to ignore ssl validation errors.
Default: False
serialize
Boolean. Whether or not to serialize data before sending
requests. Default: False.
deserialize
Boolean. Whether or not to deserialize data before returning
the Response object. Default: True.
trailing_slash
Boolean. Whether or not to append a trailing slash to the
request url. Default: True.
extra_headers
A dictionary of key value pairs that are added to the HTTP headers
of *every* request. Passed to request_handler.add_header().
extra_params
A dictionary of key value pairs that are added to the POST, or
'payload' data sent with *every* request. Passed to
request_handler.add_param().
extra_url_params
A dictionary of key value pairs that are added to the GET/URL
parameters of *every* request. Passed to
request_handler.add_extra_url_param().
timeout
The amount of seconds where a request should timeout. Default: 30
Usage
.. code-block:: python
import drest
# Create a generic client api object
api = drest.API('http://localhost:8000/api/v1/')
# Or something more customized:
api = drest.API(
baseurl='http://localhost:8000/api/v1/',
trailing_slash=False,
ignore_ssl_validation=True,
)
# Or even more so:
class MyAPI(drest.API):
class Meta:
baseurl = 'http://localhost:8000/api/v1/'
extra_headers = dict(MyKey='Some Value For Key')
extra_params = dict(some_param='some_value')
request_handler = MyCustomRequestHandler
api = MyAPI()
# By default, the API support HTTP Basic Auth with username/password.
api.auth('john.doe', 'password')
# Make calls openly
response = api.make_request('GET', '/users/1/')
# Or attach a resource
api.add_resource('users')
# Get available resources
api.resources
# Get all objects of a resource
response = api.users.get()
# Get a single resource with primary key '1'
response = api.users.get(1)
# Update a resource with primary key '1'
response = api.users.get(1)
updated_data = response.data.copy()
updated_data['first_name'] = 'John'
updated_data['last_name'] = 'Doe'
response = api.users.put(data['id'], updated_data)
# Create a resource
user_data = dict(
username='john.doe',
password='oober-secure-password',
first_name='John',
last_name='Doe',
)
response = api.users.post(user_data)
# Delete a resource with primary key '1'
response = api.users.delete(1)
"""
class Meta:
baseurl = None
request_handler = request.RequestHandler
resource_handler = resource.RESTResourceHandler
extra_headers = {}
extra_params = {}
extra_url_params = {}
def __init__(self, baseurl=None, **kw):
if baseurl:
kw['baseurl'] = baseurl
super(API, self).__init__(**kw)
self.baseurl = self._meta.baseurl.strip('/')
self._resources = []
self._setup_request_handler(**kw)
def _setup_request_handler(self, **kw):
request.validate(self._meta.request_handler)
self.request = self._meta.request_handler(**kw)
# just makes things easier to be able to wrap meta under the api
# and pass it to the request handler.
for meta in dir(self._meta):
if meta.startswith('_'):
continue
if hasattr(self.request._meta, meta):
setattr(self.request._meta, meta, getattr(self._meta, meta))
for key in self._meta.extra_headers:
self.request.add_header(key, self._meta.extra_headers[key])
for key in self._meta.extra_params:
self.request.add_param(key, self._meta.extra_params[key])
for key in self._meta.extra_url_params:
self.request.add_url_param(key, self._meta.extra_url_params[key])
def auth(self, user, password, **kw):
"""
This authentication mechanism implements HTTP Basic Authentication.
Required Arguments:
user
The API username.
password
The password of that user.
"""
self.request.set_auth_credentials(user, password)
def make_request(self, method, path, params={}, headers={}):
url = "%s/%s/" % (self.baseurl.strip('/'), path.strip('/'))
return self.request.make_request(method, url, params, headers)
@property
def resources(self):
return self._resources
def add_resource(self, name, resource_handler=None, path=None):
"""
Add a resource handler to the api object.
Required Arguments:
name
The name of the resource. This is generally the basic name
of the resource on the API. For example '/api/v0/users/'
would likely be called 'users' and will be accessible as
'api.users' from which additional calls can be made. For
example 'api.users.get()'.
Optional Arguments:
resource_handler
The resource handler class to use. Defaults to
self._meta.resource_handler.
path
The path to the resource on the API (after the base url).
Defaults to '/<name>/'.
Nested Resources:
It is possible to attach resources in a 'nested' fashion. For example
passing a name of 'my.nested.users' would be accessible as
api.my.nested.users.get().
Usage:
.. code-block:: python
api.add_resource('users')
response = api.users.get()
# Or for nested resources
api.add_resource('my.nested.users', path='/users/')
response = api.my.nested.users.get()
"""
safe_list = ['.', '_']
for char in name:
if char in safe_list:
continue
if not char.isalnum():
raise exc.dRestResourceError(
"resource name must be alpha-numeric."
)
if not path:
path = '%s' % name
else:
path = path.strip('/')
if not resource_handler:
resource_handler = self._meta.resource_handler
resource.validate(resource_handler)
handler = resource_handler(self, name, path)
if hasattr(self, name):
raise exc.dRestResourceError(
"The object '%s' already exist on '%s'" % (name, self))
# break up if nested
parts = name.split('.')
if len(parts) == 1:
setattr(self, name, handler)
elif len(parts) > 1:
first = parts.pop(0)
last = parts.pop()
# add the first object to self
setattr(self, first, resource.NestedResource())
first_obj = getattr(self, first)
current_obj = first_obj
# everything in between
for part in parts:
setattr(current_obj, part, resource.NestedResource())
current_obj = getattr(current_obj, part)
# add the actual resource to the chain of nested objects
setattr(current_obj, last, handler)
self._resources.append(name)
class TastyPieAPI(API):
"""
This class implements an API client, specifically tailored for
interfacing with `TastyPie <http://django-tastypie.readthedocs.org/en/latest>`_.
Optional / Meta Arguments:
auth_mech
The auth mechanism to use. One of ['basic', 'api_key'].
Default: 'api_key'.
auto_detect_resources
Boolean. Whether or not to auto detect, and add resource objects
to the api. Default: True.
Authentication Mechanisms
Currently the only supported authentication mechanism are:
* ApiKeyAuthentication
* BasicAuthentication
Usage
Please note that the following example use ficticious resource data.
What is returned, and sent to the API is unique to the API itself. Please
do not copy and paste any of the following directly without modifying the
request parameters per your use case.
Create the client object, and authenticate with a user/api_key pair by
default:
.. code-block:: python
import drest
api = drest.api.TastyPieAPI('http://localhost:8000/api/v0/')
api.auth('john.doe', '34547a497326dde80bcaf8bcee43e3d1b5f24cc9')
OR authenticate against HTTP Basic Auth:
.. code-block:: python
import drest
api = drest.api.TastyPieAPI('http://localhost:8000/api/v0/',
auth_mech='basic')
api.auth('john.doe', 'my_password')
As drest auto-detects TastyPie resources, you can view those at:
.. code-block:: python
api.resources
And access their schema:
.. code-block:: python
api.users.schema
As well as make the usual calls such as:
.. code-block:: python
api.users.get()
api.users.get(<pk>)
api.users.put(<pk>, data_dict)
api.users.post(data_dict)
api.users.delete(<pk>)
What about filtering? (these depend on how the `API is configured <http://django-tastypie.readthedocs.org/en/latest/resources.html#basic-filtering>`_):
.. code-block:: python
api.users.get(params=dict(username='admin'))
api.users.get(params=dict(username__icontains='admin'))
...
See :mod:`drest.api.API` for more standard usage examples.
"""
class Meta:
request_handler = request.TastyPieRequestHandler
resource_handler = resource.TastyPieResourceHandler
auto_detect_resources = True
auth_mech = 'api_key'
auth_mechanizms = ['api_key', 'basic']
def __init__(self, *args, **kw):
super(TastyPieAPI, self).__init__(*args, **kw)
if self._meta.auto_detect_resources:
self.find_resources()
def auth(self, *args, **kw):
"""
Authenticate the request, determined by Meta.auth_mech. Arguments
and Keyword arguments are just passed to the auth_mech function.
"""
if self._meta.auth_mech in self.auth_mechanizms:
func = getattr(self, '_auth_via_%s' % self._meta.auth_mech)
func(*args, **kw)
else:
raise exc.dRestAPIError("Unknown TastyPie auth mechanism.")
def _auth_via_basic(self, user, password, **kw):
"""
This is just a wrapper around drest.api.API.auth().
"""
return super(TastyPieAPI, self).auth(user, password)
def _auth_via_api_key(self, user, api_key, **kw):
"""
This authentication mechanism adds an Authorization header for
user/api_key per the
`TastyPie Documentation <http://django-tastypie.readthedocs.org/en/latest/authentication_authorization.html>`_.
Required Arguments:
user
The API username.
api_key
The API Key of that user.
"""
key = 'Authorization'
value = 'ApiKey %s:%s' % (user, api_key)
self.request.add_header(key, value)
def find_resources(self):
"""
Find available resources, and add them via add_resource().
"""
response = self.make_request('GET', '/')
for resource in list(response.data.keys()):
if resource not in self._resources:
self.add_resource(resource)
| 33.177419 | 155 | 0.562678 | 14,239 | 0.988888 | 0 | 0 | 65 | 0.004514 | 0 | 0 | 9,875 | 0.685812 |
504170f7d179aab0a2ca38addb3c52b1e0a07920 | 519 | py | Python | ExIt/Expert/BaseExpert.py | LarsChrWiik/Expert-Iteration-Algorithmic-Comparison | daed2972159c451be19892ee31c413d60dd2f987 | [
"MIT"
] | 1 | 2019-03-01T15:46:06.000Z | 2019-03-01T15:46:06.000Z | ExIt/Expert/BaseExpert.py | LarsChrWiik/Expert-Iteration | daed2972159c451be19892ee31c413d60dd2f987 | [
"MIT"
] | null | null | null | ExIt/Expert/BaseExpert.py | LarsChrWiik/Expert-Iteration | daed2972159c451be19892ee31c413d60dd2f987 | [
"MIT"
] | null | null | null |
from ExIt.Apprentice import BaseApprentice
from Games.GameLogic import BaseGame
class BaseExpert:
""" Class for the tree search algorithm used for policy improvement """
def __init__(self):
self.__name__ = type(self).__name__
def search(self, state: BaseGame, predictor: BaseApprentice, search_time, use_exploration_policy):
""" Do policy improvement for a given state.
:return: a_explore, a_optimal, soft-z """
raise NotImplementedError("Please Implement this method")
| 32.4375 | 102 | 0.722543 | 435 | 0.83815 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.375723 |
5044725258a6a777f86c0f33b6d96e0f8f308a62 | 1,235 | py | Python | monasca_common/kafka_lib/partitioner/base.py | zhangjm12/monasca-common | 2ebc766534eba6163e98b94a1f114ece18739fff | [
"Apache-2.0"
] | 26 | 2015-10-18T02:54:54.000Z | 2022-02-15T01:36:41.000Z | monasca_common/kafka_lib/partitioner/base.py | zhangjm12/monasca-common | 2ebc766534eba6163e98b94a1f114ece18739fff | [
"Apache-2.0"
] | 18 | 2019-11-01T13:03:36.000Z | 2022-02-16T02:28:52.000Z | monasca_common/kafka_lib/partitioner/base.py | zhangjm12/monasca-common | 2ebc766534eba6163e98b94a1f114ece18739fff | [
"Apache-2.0"
] | 22 | 2016-06-01T11:47:17.000Z | 2020-02-11T14:41:45.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class Partitioner(object):
"""
Base class for a partitioner
"""
def __init__(self, partitions):
"""
Initialize the partitioner
Arguments:
partitions: A list of available partitions (during startup)
"""
self.partitions = partitions
def partition(self, key, partitions=None):
"""
Takes a string key and num_partitions as argument and returns
a partition to be used for the message
Arguments:
key: the key to use for partitioning
partitions: (optional) a list of partitions.
"""
raise NotImplementedError('partition function has to be implemented')
| 33.378378 | 77 | 0.680972 | 692 | 0.560324 | 0 | 0 | 0 | 0 | 0 | 0 | 1,015 | 0.821862 |
50454b178e9245d647f837e2ff8ebec1f13577e5 | 1,937 | py | Python | tests/test_options.py | inmanta/pytest-inmanta | 201a505f56b5a3c8acbcdc6d22f9f5329630c96a | [
"Apache-2.0"
] | null | null | null | tests/test_options.py | inmanta/pytest-inmanta | 201a505f56b5a3c8acbcdc6d22f9f5329630c96a | [
"Apache-2.0"
] | 59 | 2018-09-17T07:51:35.000Z | 2022-03-29T07:39:20.000Z | tests/test_options.py | inmanta/pytest-inmanta | 201a505f56b5a3c8acbcdc6d22f9f5329630c96a | [
"Apache-2.0"
] | 1 | 2019-08-07T14:38:57.000Z | 2019-08-07T14:38:57.000Z | """
Copyright 2020 Inmanta
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Contact: code@inmanta.com
"""
import os
import shutil
import uuid
import pytest_inmanta
def test_module_in_place(testdir):
"""Make sure the run in place option works"""
# copy_example copies what is IN the given directory, not the directory itself...
testdir.copy_example("testmodule")
# Moving module to make sure we can run in place,
# by making sure the module name in module.yml is in it's parent path
os.mkdir("testmodule")
shutil.move("model", "testmodule/model")
shutil.move("module.yml", "testmodule/module.yml")
shutil.move("plugins", "testmodule/plugins")
shutil.move("tests", "testmodule/tests")
os.chdir("testmodule")
path = os.getcwd()
assert not os.path.exists(os.path.join(path, "testfile"))
pytest_inmanta.plugin.CURDIR = path
result = testdir.runpytest("tests/test_location.py", "--use-module-in-place")
result.assert_outcomes(passed=1)
assert os.path.exists(os.path.join(path, "testfile"))
def test_not_existing_venv_option(testdir, tmpdir):
testdir.copy_example("testmodule")
venv_path = os.path.join(tmpdir, str(uuid.uuid4()))
result = testdir.runpytest("tests/test_resource_run.py", "--venv", venv_path)
result.assert_outcomes(errors=1)
assert f"Specified venv {venv_path} does not exist" in "\n".join(result.outlines)
| 33.396552 | 85 | 0.717088 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,183 | 0.610738 |
5046b56a13dbb3b70b3975b8d94901defe4984ac | 7,798 | py | Python | PyFSM/pyfsm/elements/state_unit_tests.py | wafec/wafec-py-fsm | 444b7b797411daa3186cf812535a660404951d00 | [
"MIT"
] | null | null | null | PyFSM/pyfsm/elements/state_unit_tests.py | wafec/wafec-py-fsm | 444b7b797411daa3186cf812535a660404951d00 | [
"MIT"
] | null | null | null | PyFSM/pyfsm/elements/state_unit_tests.py | wafec/wafec-py-fsm | 444b7b797411daa3186cf812535a660404951d00 | [
"MIT"
] | null | null | null | import unittest
from pyfsm.elements import *
class StateUnitSimpleFsmTests(unittest.TestCase):
def setUp(self):
self.fsm = StateUnit()
self.state1 = StateUnit()
self.state2 = StateUnit()
self.link1 = LinkElement()
self.fsm.starters = [self.state1]
self.state1.parent = self.fsm
self.state2.parent = self.fsm
self.state1.levelers = [self.state1]
self.state2.levelers = [self.state2]
self.state1.links = [self.link1]
self.link1.sources = [self.state1]
self.link1.destinations = [self.state2]
self.state3 = StateUnit()
self.link2 = LinkElement()
self.link2.sources = [self.state2]
self.link2.destinations = [self.state3]
self.state2.links = [self.link2]
self.state3.parent = self.fsm
self.state3.levelers = [self.state1]
self.fsm.children = [self.state1, self.state2, self.state3]
self.fsm.name = 'fsm'
self.state1.name = 'state1'
self.state2.name = 'state2'
self.state3.name = 'state3'
def test_fsm_with_enter(self):
event = TransportEvent()
event.id = 'test'
self.fsm.initialize(event)
self.assertTrue(self.state1.active)
self.assertTrue(self.fsm.active)
self.assertFalse(self.state2.active)
def test_fsm_with_first_transition(self):
event = TransportEvent()
event.id = 'test'
self.fsm.initialize(event)
self.fsm.receive(event)
self.assertTrue(self.fsm.active)
self.assertTrue(self.state2.active)
self.assertFalse(self.state1.active)
def test_fsm_with_second_transition(self):
event = TransportEvent()
event.id = 'test'
self.fsm.initialize(event)
self.fsm.receive(event)
self.fsm.receive(event)
self.assertTrue(self.state3.active)
self.assertFalse(self.state2.active)
self.assertFalse(self.state1.active)
self.assertTrue(self.fsm.active)
def test_fsm_with_entry_action(self):
count = 0
def increment_count(args):
nonlocal count
count += 1
event = TransportEvent()
event.id = 'test'
self.state1.entry_actions = [increment_count]
self.fsm.initialize(event)
self.fsm.receive(event)
self.assertEqual(1, count)
def test_fsm_with_entry_action_and_view(self):
def change_view(args):
args.event.view['test'] = 'value'
event = TransportEvent()
event.id = 'test'
self.state1.entry_actions = [change_view]
self.fsm.initialize(event)
self.fsm.receive(event)
self.assertEqual('value', event.view['test'])
def test_fsm_with_entry_in_all_units(self):
count = 0
def increment_count(args):
nonlocal count
count += 1
event = TransportEvent()
event.id = 'test'
self.fsm.entry_actions = [increment_count]
self.state1.entry_actions = [increment_count]
self.state2.entry_actions = [increment_count]
self.state3.entry_actions = [increment_count]
self.fsm.initialize(event)
self.fsm.receive(event)
self.fsm.receive(event)
self.assertEqual(4, count)
def test_fsm_with_exit(self):
count = 0
def increment_count(args):
nonlocal count
count += 1
self.state1.exit_actions = [increment_count]
event = TransportEvent()
event.id = 'test'
self.fsm.initialize(event)
self.fsm.receive(event)
self.assertEqual(1, count)
self.fsm.receive(event)
self.assertEqual(1, count)
def test_fsm_with_exit_and_entry(self):
count = 0
def increment_count(args):
nonlocal count
count += 1
def decrement_count(args):
nonlocal count
count -= 1
self.fsm.entry_actions = [increment_count]
self.fsm.exit_actions = [decrement_count]
self.state1.entry_actions = [increment_count]
self.state1.exit_actions = [decrement_count]
self.state2.entry_actions = [increment_count]
self.state2.exit_actions = [decrement_count]
self.state3.entry_actions = [increment_count]
self.state3.exit_actions = [increment_count]
event = TransportEvent()
event.id = 'test'
self.fsm.initialize(event)
self.assertEqual(2, count)
self.fsm.receive(event)
self.assertEqual(2, count)
self.fsm.receive(event)
self.assertEqual(2, count)
class StateUnitComplexFsmTests(unittest.TestCase):
def setUp(self):
self.fsm = StateUnit()
self.state10 = StateUnit()
self.state20 = StateUnit()
self.state11 = StateUnit()
self.state12 = StateUnit()
self.state21 = StateUnit()
self.state22 = StateUnit()
self.state23 = StateUnit()
self.fsm.starters = [self.state10, self.state20]
self.fsm.children = [self.state10, self.state11, self.state12, self.state20, self.state21, self.state22,
self.state23]
self.state10.parent = self.fsm
self.state11.parent = self.fsm
self.state12.parent = self.fsm
self.state20.parent = self.fsm
self.state21.parent = self.fsm
self.state22.parent = self.fsm
self.state23.parent = self.fsm
self.state10.levelers = [self.state10]
self.state11.levelers = [self.state10]
self.state12.levelers = [self.state10]
self.state20.levelers = [self.state20]
self.state21.levelers = [self.state20]
self.state22.levelers = [self.state20]
self.state23.levelers = [self.state20]
self.link11 = LinkElement()
self.link12 = LinkElement()
self.link21 = LinkElement()
self.link22 = LinkElement()
self.link23 = LinkElement()
self.link11.sources = [self.state10]
self.link11.destinations = [self.state11]
self.link12.sources = [self.state11]
self.link12.destinations = [self.state12]
self.link21 = LinkElement()
self.link21.sources = [self.state20]
self.link21.destinations = [self.state21]
self.link22 = LinkElement()
self.link22.sources = [self.state21]
self.link22.destinations = [self.state22]
self.link23 = LinkElement()
self.link23.sources = [self.state22]
self.link23.destinations = [self.state23]
self.state10.links = [self.link11]
self.state11.links = [self.link12]
self.state20.links = [self.link21]
self.state21.links = [self.link22]
self.state22.links = [self.link23]
self.link11.accepts = ['event11']
self.link12.accepts = ['event11', 'event12']
self.link21.accepts = ['event21']
self.link22.accepts = ['event21', 'event22']
self.link23.accepts = ['event21', 'event22', 'event23']
self.event11 = TransportEvent()
self.event12 = TransportEvent()
self.event21 = TransportEvent()
self.event22 = TransportEvent()
self.event23 = TransportEvent()
self.event0 = TransportEvent()
self.event0.id = 'event0'
self.event11.id = 'event11'
self.event12.id = 'event12'
self.event21.id = 'event21'
self.event22.id = 'event22'
self.event23.id = 'event23'
def test_fsm_with_event(self):
self.fsm.initialize(self.event0)
self.assertTrue(self.fsm.active)
self.assertTrue(self.state10.active)
self.assertTrue(self.state20.active)
self.assertFalse(self.state11.active)
self.assertFalse(self.state21.active)
| 35.445455 | 112 | 0.616184 | 7,746 | 0.993332 | 0 | 0 | 0 | 0 | 0 | 0 | 237 | 0.030392 |
5047525ddbf466e6c5243baf4d5f52864154f60d | 8,117 | py | Python | data_extraction.py | Qnouro/Gdelt-analysis | e24fb316a1458d51ceb4cf1ccca71b7395235b61 | [
"MIT"
] | 6 | 2020-10-09T16:15:12.000Z | 2022-02-17T02:04:13.000Z | data_extraction.py | Qnouro/Gdelt-analysis | e24fb316a1458d51ceb4cf1ccca71b7395235b61 | [
"MIT"
] | 2 | 2020-10-09T18:26:10.000Z | 2020-10-09T22:31:34.000Z | data_extraction.py | Qnouro/Gdelt-analysis | e24fb316a1458d51ceb4cf1ccca71b7395235b61 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import sqlite3
import pandas as pd
import numpy as np
from scraper import create_data_folder, read_config
from collections import OrderedDict
def main():
"""
Mainly for debugging purposes.
"""
config_file = read_config()
# Pick a file
try:
csv_name = os.listdir(config_file["downloaded_data_path"])[0]
except:
print("Could not read csv file.. Please check you've downloaded data beforehand using scraper.py.")
exit(1)
# Read the data
df = read_data(csv_name, config_file)
# Extract information
sanitized_dataframe = extract_event_information(df)
# Save extracted information
create_data_folder(config_file["extracted_data_path"])
save_dataframe(sanitized_dataframe, "test", config_file)
def save_dataframe(df, df_root_name, config_file):
"""
Handles all the saving process into SQL and CSV formats.
@Param df: dataframe to save.
@Param df_root_name: name of the file to create without the extension.
@Param config_file: Configuration file.
"""
sqlite_read_path = os.path.join(config_file["extracted_data_path"] , f"{df_root_name}.db")
csv_save_path = os.path.join(config_file["extracted_data_path"] , f"{df_root_name}.csv")
save_dataframe_to_sqlite(df, sqlite_read_path)
save_dataframe_to_csv(sqlite_read_path, csv_save_path)
def save_dataframe_to_csv(db_path, save_path):
"""
Saves the data as csv in the given path by reading the sqlite3 database.
Makes sure to merge the values with those already existing at the same
location (event, latitude, location).
@Param db_path: path to the sqlite3 database.
@Param save_path: path to the csv file to create.
"""
# Read the SQL database
db = sqlite3.connect(db_path)
db_df = pd.read_sql_query("SELECT * FROM events", db)
# Transforming columns to make them compatible with storing multiple values
db_df["event_document"] = db_df["event_document"].apply(lambda x: [x])
db_df["event_date"] = db_df["event_date"].apply(lambda x: [x])
db_df["event_importance"] = db_df["event_importance"].apply(lambda x: [x])
db_df["event_source_name"] = db_df["event_source_name"].apply(lambda x: [x])
# merge lines with identical position and event.
db_df = db_df.groupby(["event", "event_latitude", "event_longitude"], as_index=False).aggregate({'event_document':np.sum, "event_importance": np.sum, "event_date": np.sum, "event_source_name": np.sum})
# Storing the information
db_df.to_csv(save_path, mode='w', index=False)
# Closing the database connexion
db.commit()
db.close()
def read_data(csv_name, config_file, add_root_dir=True):
"""
Reads the csv file given and returns the associated dataframe.
@Param csv_name: Name of the csv file to read.
@Param config_file: Configuration file.
@Return: Dataframe containing the csv information.
"""
print("Reading the csv file...")
csv = csv_name
if add_root_dir:
data_dir = config_file["downloaded_data_path"]
csv = os.path.join(data_dir, csv_name)
pd.set_option('display.float_format', lambda x: '%.3f' % x) # Avoid scientific notation
dataframe = pd.read_csv(csv,
delimiter = "\t",
names=["ID", "event_date", "source_identifier", "source_name", "document_id", "V1Counts_10", "V2_1Counts", "V1Themes", "V2EnhancedThemes", "V1Locations", "V2EnhancedLocations", "V1Persons",
"V2EnhancedPersons", "V1organizations", "V2EnhancedOrganizations", "V1_5tone", "V2_1EnhancedDates", "V2GCam", "V2_1SharingImage", "V2_1RelatedImages", "V2_1SocialImageEmbeds", "V2_1SocialVideoEmbeds",
"V2_1Quotations", "V2_1AllNames", "V2_1Amounts", "V2_1TranslationInfo", "V2ExtrasXML"],
encoding="ISO-8859-1")
return dataframe
def extract_event_information(dataframe):
"""
Extracts the information related to the events from the dataframe and returns a transformed dataframe.
The new dataframe contains information related to the event type, its importance and position (lat, long).
@Params dataframe: represents all the information contained in the initial csv.
@Return: dataframe containing the extracted information regarding the events.
"""
print("Extracting information from the csv file...")
events_columns = ["event", "event_importance", "event_latitude", "event_longitude"]
sanitized_dataframe = pd.DataFrame(columns=events_columns)
# Removing NaN events
main_dataframe = dataframe[["event_date", "V1Counts_10", "source_name", "document_id"]].copy()
main_series = main_dataframe.dropna(0)
for idx, row in main_series.iterrows():
event_date = row[0]
event_source_name = row[2]
event_document = row[3]
event_details = row[1].split("#")
event_dict = OrderedDict()
event_dict["event_date"] = event_date
event_dict["event_source_name"] = event_source_name
event_dict["event_document"] = event_document
event_dict["event"] = event_details[0]
event_dict["event_importance"] = event_details[1]
event_dict["event_latitude"] = event_details[7]
event_dict["event_longitude"] = event_details[8]
sanitized_dataframe = sanitized_dataframe.append(event_dict, ignore_index=True)
return sanitized_dataframe
def save_dataframe_to_sqlite(sanitized_dataframe, destination_file):
"""
Saves the dataframe information to a sqlite3 database.
@Param sanitized_dataframe: Dataframe containing the information to save.
@Param destination_file: Path to the database to save the information in.
If the database doesn't exist, creates it.
"""
conn = sqlite3.connect(destination_file)
c = conn.cursor()
# Create table
try:
c.execute('''CREATE TABLE events
(event text, event_importance text, event_latitude real, event_longitude real, event_date integer, event_document text, event_source_name text, unique(event_date, event, event_importance, event_latitude, event_longitude))''')
print("Created event table")
except Exception as e:
print(e)
# Populating the database
for idx, row in sanitized_dataframe.iterrows():
try:
# Before adding, we check if the element has been reported in the same day.
if row[2]=="":
row[2]=0
if row[3]=="":
row[3]=0
c.execute(f"SELECT event, event_importance, event_latitude, event_longitude FROM events WHERE event='{row[0]}' AND event_importance={int(row[1])} AND event_latitude={float(row[2])} AND event_longitude={float(row[3])}")
result = c.fetchall()
if len(result) == 0:
try:
c.execute(f"INSERT INTO events VALUES ('{row[0]}', '{row[1]}', '{row[2]}', '{row[3]}', '{row[4]}', '{row[5]}', '{row[6]}')")
except sqlite3.IntegrityError as e:
# Duplicated row
pass
except:
print("Unexpected error:", sys.exc_info()[0])
exit(1)
except Exception as e:
print("Unexpected error:", sys.exc_info()[0], e)
exit(1)
# Save (commit) the changes
conn.commit()
conn.close()
def save_dataframe_to_txt(sanitized_dataframe, destination_file):
"""
Saves the dataframe information to a txt file.
@Param sanitized_dataframe: Dataframe containing the information to save.
@Param destination_file: Path to the file to save the information in.
"""
# TODO: Change to a sqlite database ?
print("Storing the event information into a txt file...")
np.savetxt(destination_file, sanitized_dataframe.values, fmt='%s', delimiter="\t",
header="event\tevent_importance\tevent_latitude\tevent_longitude")
if __name__ == "__main__":
main()
| 39.21256 | 246 | 0.668104 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,289 | 0.528397 |
5047aacc39f41b71f725e5f24c6849454e495033 | 7,799 | py | Python | predict_with_ssd7.py | esp32wrangler/ssd_keras | 632024599764a16b4791ce4a705f84273c6f7896 | [
"Apache-2.0"
] | null | null | null | predict_with_ssd7.py | esp32wrangler/ssd_keras | 632024599764a16b4791ce4a705f84273c6f7896 | [
"Apache-2.0"
] | null | null | null | predict_with_ssd7.py | esp32wrangler/ssd_keras | 632024599764a16b4791ce4a705f84273c6f7896 | [
"Apache-2.0"
] | null | null | null | from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau, TerminateOnNaN, CSVLogger
from keras import backend as K
from keras.models import load_model
from math import ceil
import numpy as np
from matplotlib import pyplot as plt
from models.keras_ssd7 import build_model
from keras_loss_function.keras_ssd_loss import SSDLoss
from keras_layers.keras_layer_AnchorBoxes import AnchorBoxes
from keras_layers.keras_layer_DecodeDetections import DecodeDetections
from keras_layers.keras_layer_DecodeDetectionsFast import DecodeDetectionsFast
from ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder
from ssd_encoder_decoder.ssd_output_decoder import decode_detections, decode_detections_fast
from data_generator.object_detection_2d_data_generator import DataGenerator
from data_generator.object_detection_2d_misc_utils import apply_inverse_transforms
from data_generator.data_augmentation_chain_variable_input_size import DataAugmentationVariableInputSize
from data_generator.data_augmentation_chain_constant_input_size import DataAugmentationConstantInputSize
from data_generator.data_augmentation_chain_original_ssd import SSDDataAugmentation
import cv2
img_height = 416 # Height of the input images
img_width = 416 # Width of the input images
img_channels = 3 # Number of color channels of the input images
intensity_mean = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.
intensity_range = 127.5 # Set this to your preference (maybe `None`). The current settings transform the input pixel values to the interval `[-1,1]`.
n_classes = 3 # Number of positive classes
scales = [0.08, 0.16, 0.32, 0.64, 0.96] # An explicit list of anchor box scaling factors. If this is passed, it will override `min_scale` and `max_scale`.
aspect_ratios = [0.5, 1.0, 2.0] # The list of aspect ratios for the anchor boxes
two_boxes_for_ar1 = True # Whether or not you want to generate two anchor boxes for aspect ratio 1
steps = None # In case you'd like to set the step sizes for the anchor box grids manually; not recommended
offsets = None # In case you'd like to set the offsets for the anchor box grids manually; not recommended
clip_boxes = False # Whether or not to clip the anchor boxes to lie entirely within the image boundaries
variances = [1.0, 1.0, 1.0, 1.0] # The list of variances by which the encoded target coordinates are scaled
normalize_coords = True # Whether or not the model is supposed to use coordinates relative to the image size
model_path = 'ssd7_epoch-07_loss-0.9988_val_loss-0.6916.h5'
# We need to create an SSDLoss object in order to pass that to the model loader.
ssd_loss = SSDLoss(neg_pos_ratio=3, alpha=1.0)
K.clear_session() # Clear previous models from memory.
model = build_model(image_size=(img_height, img_width, img_channels),
n_classes=n_classes,
mode='training',
l2_regularization=0.0005,
scales=scales,
aspect_ratios_global=aspect_ratios,
aspect_ratios_per_layer=None,
two_boxes_for_ar1=two_boxes_for_ar1,
steps=steps,
offsets=offsets,
clip_boxes=clip_boxes,
variances=variances,
normalize_coords=normalize_coords,
subtract_mean=intensity_mean,
divide_by_stddev=intensity_range)
# 2: Optional: Load some weights
model.load_weights(model_path, by_name=True)
train_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
val_dataset = DataGenerator(load_images_into_memory=False, hdf5_dataset_path=None)
# 2: Parse the image and label lists for the training and validation datasets.
# TODO: Set the paths to your dataset here.
# Images
images_dir = './infiles'
'''
# Ground truth
train_labels_filename = 'onsite-images-export.csv'
val_labels_filename = 'onsite-images-valid.csv'
train_dataset.parse_csv(images_dir=images_dir,
labels_filename=train_labels_filename,
input_format=['image_name', 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'], # This is the order of the first six columns in the CSV file that contains the labels for your dataset. If your labels are in XML format, maybe the XML parser will be helpful, check the documentation.
include_classes='all')
val_dataset.parse_csv(images_dir=images_dir,
labels_filename=val_labels_filename,
input_format=['image_name', 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'],
include_classes='all')
train_dataset.create_hdf5_dataset(file_path='train_imgs.h5',
resize=False,
variable_image_size=True,
verbose=True)
val_dataset.create_hdf5_dataset(file_path='val_imgs.h5',
resize=False,
variable_image_size=True,
verbose=True)
train_dataset_size = train_dataset.get_dataset_size()
val_dataset_size = val_dataset.get_dataset_size()
predict_generator = val_dataset.generate(batch_size=1,
shuffle=True,
transformations=[],
label_encoder=None,
returns={'processed_images',
'processed_labels',
'filenames'},
keep_images_without_gt=False)
batch_images, batch_labels, batch_filenames = next(predict_generator)
i = 0 # Which batch item to look at
print("Image:", batch_filenames[i])
print()
print("Ground truth boxes:\n")
print(batch_labels[i])
'''
import glob
import os
fnames = glob.glob(images_dir+"/*.png")
#fnames = [images_dir+"/yellow834159457.png"]
for fn in fnames:
img = cv2.imread(fn)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
imgs = np.reshape(img_rgb, (1, 416, 416, 3))
y_pred = model.predict(imgs)
y_pred_decoded = decode_detections(y_pred,
confidence_thresh=0.7,
iou_threshold=0.45,
top_k=200,
normalize_coords=normalize_coords,
img_height=img_height,
img_width=img_width)
boxColors = [(0, 255, 255), (0, 0, 255), (0, 255, 0)]
#print y_pred_decoded
if len(y_pred_decoded) > 0:
y_pred_list = y_pred_decoded[0].tolist()
for sss in y_pred_list:
#print sss
cls, conf, xmin, ymin, xmax, ymax = sss
boxColor = boxColors[int(cls)-1]
cv2.rectangle(img, (int(xmin), int(ymin)),
(int(xmax), int(ymax)), boxColor, 2)
cv2.imwrite("outfiles/" + os.path.basename(fn), img)
'''
y_pred = model.predict(batch_images)
y_pred_decoded = decode_detections(y_pred,
confidence_thresh=0.2,
iou_threshold=0.45,
top_k=200,
normalize_coords=normalize_coords,
img_height=img_height,
img_width=img_width)
np.set_printoptions(precision=2, suppress=True, linewidth=90)
print("Predicted boxes:\n")
print(' class conf xmin ymin xmax ymax')
print(y_pred_decoded[i])
''' | 44.3125 | 297 | 0.649186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,154 | 0.532632 |
504917355b6cf7fd121b21c62bef31888fe9fbc3 | 29 | py | Python | deploy/__init__.py | gaocegege/treadmill | 04325d319c0ee912c066f07b88b674e84485f154 | [
"Apache-2.0"
] | 2 | 2017-03-20T07:13:33.000Z | 2017-05-03T03:39:53.000Z | deploy/__init__.py | gaocegege/treadmill | 04325d319c0ee912c066f07b88b674e84485f154 | [
"Apache-2.0"
] | 12 | 2017-07-10T07:04:06.000Z | 2017-07-26T09:32:54.000Z | deploy/__init__.py | gaocegege/treadmill | 04325d319c0ee912c066f07b88b674e84485f154 | [
"Apache-2.0"
] | 2 | 2017-05-04T11:25:32.000Z | 2017-07-11T09:10:01.000Z | """Ansible playbooks init"""
| 14.5 | 28 | 0.689655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.965517 |
5049af1e05d73fa6840cb3f92f972bafd2d50cac | 12,339 | py | Python | a1/ANN.py | mishless/LearningSystems | 635d9af9d00ae0360d7ca8571bf47f782fdcdfe9 | [
"MIT"
] | 1 | 2021-08-01T03:30:49.000Z | 2021-08-01T03:30:49.000Z | a1/ANN.py | mishless/LearningSystems | 635d9af9d00ae0360d7ca8571bf47f782fdcdfe9 | [
"MIT"
] | null | null | null | a1/ANN.py | mishless/LearningSystems | 635d9af9d00ae0360d7ca8571bf47f782fdcdfe9 | [
"MIT"
] | null | null | null | #Artificial Neural Network
#includes
import configparser
import math
import matplotlib.pyplot as plt
import numpy as np
import random
from decimal import *
#global variables
weights = [];
topology = [];
data_training = [];
data_test = [];
learning_rate = 0;
weight_min = 0;
weight_max = 0;
error_terms = [];
outputs = [];
result_offset = 4.5;
partition_num = 0;
partition_size = 0;
data_sets = [];
scalings = [{'min' : 1.5*(10**9), 'max' : 2.5*(10**9)},
{'min' : 1.5*(10**8), 'max' : 4.5*(10**8)},
{'min' : 0, 'max' : 150}];
def read_config():
global partition_num;
global learning_rate;
global weight_min;
global weight_max;
global iteration_num;
config = configparser.ConfigParser();
config.read("config.txt");
temp = config["general"]["topology"];
temp = temp.split(",");
for s in temp:
topology.append(int(s));
learning_rate = float(config['general']['learning_rate']);
weight_min = float(config['general']['weight_min']);
weight_max = float(config['general']['weight_max']);
partition_num = int(config['general']['partition_num']);
def read_input():
read_config();
def print_weights():
print("***** WEIGHTS *****");
for i in range(0, len(weights)):
print("Layer 0 (" + str(topology[i]) + " -> " + str(topology[i+1]) + "):");
print("---------------");
for j in range(0, len(weights[i])):
for k in range(0, len(weights[i][j])):
print("%.6f " % weights[i][j][k], end="");
print();
print("---------------");
print();
def fill_dummy_weights():
w = 0.1;
for i in range(0, len(weights)):
for j in range(0, len(weights[i])):
for k in range(0, len(weights[i][j])):
weights[i][j][k] = w;
w = w + 0.1;
def fill_random_weights(min_limit, max_limit):
for i in range(0, len(weights)):
for j in range(0, len(weights[i])):
for k in range(0, len(weights[i][j])):
weights[i][j][k] = random.uniform(min_limit, max_limit);
def init_weights():
for i in range(0, len(topology)-1):
weights.append([]);
for j in range(0, topology[i+1]):
weights[i].append([]);
for k in range(0, topology[i]):
weights[i][j].append(0);
weights[i][j].append(0);
def init_error_terms():
for layer in range(0, len(topology)):
error_terms.append([]);
for row in range(0, topology[layer]):
error_terms[layer].append(0);
def init_outputs():
for layer in range(0, len(topology)):
outputs.append([]);
for row in range(0, topology[layer]):
outputs[layer].append(0);
def plot_sigmoid():
x_list = np.arange(-8, 8, 0.1);
y_list = [];
for x in x_list:
y_list.append(sigmoid(x));
plt.plot(x_list, y_list);
plt.show();
def sigmoid(x):
#avoid overflow fuckups
if x < -100:
x = -100;
res = 1/(1+(math.exp(-x)));
return res;
def output_function(x):
return sigmoid(x) + 4.5;
def calculate_output(input_sample):
return output_function(calculate_net(len(topology)-1, 0, input_sample));
def print_nets(input_sample):
print("***** NETS *****");
for layer in range(0, len(topology)):
print("Layer " + str(layer) + ":");
for row in range(0, topology[layer]):
print("%0.2f " % calculate_net(layer, row, input_sample), end = "");
print();
print();
def print_outputs():
print("***** OUTPUTS *****");
for layer in range(0, len(topology)):
print("Layer " + str(layer) + ":");
for row in range(0, topology[layer]):
print("%0.20f " % outputs[layer] [row], end = "");
print();
print();
def print_error_terms():
print("***** ERROR TERMS *****");
for layer in range(0, len(topology)):
print("Layer " + str(layer) + ":");
for row in range(0, topology[layer]):
print("%0.6f " % error_terms[layer] [row], end = "");
print();
print();
def read_input():
file = open("Data_Training.txt");
file_lines = file.readlines();
file.close();
for line in file_lines:
temp = line.split();
data_sample_strings = [temp[1], temp[18], temp[19], temp[21]];
data_sample_numbers = [];
for s in data_sample_strings:
data_sample_numbers.append(float(s));
data_training.append(data_sample_numbers);
file = open("Data_Test.txt");
file_lines = file.readlines();
file.close();
for line in file_lines:
temp = line.split();
data_sample_strings = [temp[1], temp[18], temp[19], temp[21]];
data_sample_numbers = [];
for s in data_sample_strings:
data_sample_numbers.append(float(s));
data_test.append(data_sample_numbers);
random.shuffle(data_training);
def partition_data():
global partition_size;
partition_size = math.floor(len(data_training)/partition_num);
print("Total data: " + str(len(data_training)));
print("Partition size: " + str(partition_size));
for i in range(0, partition_size*partition_num, partition_size):
data_sets.append(data_training[i:(i+partition_size)]);
def examine_input():
a = [];
b = [];
c = [];
d = [];
for data_sample in data_training:
a.append(data_sample[0]);
b.append(data_sample[1]);
c.append(data_sample[2]);
d.append(data_sample[3]);
exit();
def scale_training_data():
for data_sample in data_training:
for i in range(0, topology[0]):
data_sample[i] = (data_sample[i] - scalings[i]['min']) / (scalings[i]['max'] - scalings[i]['min']);
def scale_test_data():
for data_sample in data_test:
for i in range(0, topology[0]):
data_sample[i] = (data_sample[i] - scalings[i]['min']) / (scalings[i]['max'] - scalings[i]['min']);
def scale_data():
scale_training_data();
scale_test_data();
def init():
read_config();
read_input();
scale_data();
init_weights();
fill_random_weights(weight_min, weight_max);
init_error_terms();
init_outputs();
partition_data();
def calculate_output_error_term(target_output, calculated_output):
return (target_output - calculated_output) * calculated_output * (1 - calculated_output);
def calculate_net(layer, row):
result = 0;
for i in range(0, topology[layer-1]):
result = result + outputs[layer-1][i] * weights[layer-1][row][i];
result = result + (1 * weights[layer-1][row][-1]);
return result;
def calculate_outputs(input_sample):
for input_node in range(0, topology[0]):
outputs[0][input_node] = input_sample[input_node];
for layer in range(1, len(topology)):
for row in range(0, topology[layer]):
outputs[layer][row] = sigmoid(calculate_net(layer, row));
def calculate_error_term(layer, row):
result = 0;
for row_from_next_layer in range(0, topology[layer+1]):
result = result + error_terms[layer+1][row_from_next_layer] * weights[layer][row_from_next_layer][row];
result = result * outputs[layer][row] * (1 - outputs[layer][row]);
return result
def calculate_error_terms(target_output):
error_terms[-1][0] = calculate_output_error_term(target_output, outputs[-1][0]);
for layer in reversed(range(1, len(topology)-1)):
for row in range(0, topology[layer]):
error_terms[layer][row] = calculate_error_term(layer, row);
def update_weights():
for layer in range(0, len(topology)-1):
for destination_row in range(0, topology[layer+1]):
for source_row in range(0, topology[layer]):
delta_weight = learning_rate * error_terms[layer+1][destination_row] * outputs[layer][source_row];
weights[layer][destination_row][source_row] = weights[layer][destination_row][source_row] + delta_weight;
weights[layer][destination_row][-1] = weights[layer][destination_row][-1] + learning_rate * error_terms[layer+1][destination_row] * 1;
def iterate_once(data_list):
squared_errors = [];
for data_sample in data_list:
calculate_outputs(data_sample[0:3]);
target_result = data_sample[3] - result_offset;
squared_errors.append((target_result - outputs[-1][0])**2);
calculate_error_terms(target_result);
update_weights();
mean_squared_error = sum(squared_errors)/float(len(squared_errors));
return mean_squared_error;
def temp_test():
data_sample = data_training[0];
print_weights();
for i in range(0, 10000):
calculate_outputs(data_sample[0:3]);
target_result = data_sample[3] - result_offset;
calculate_error_terms(target_result);
update_weights();
print_weights();
def get_mean_error(data_list):
squared_errors = [];
for data_sample in data_list:
calculate_outputs(data_sample[0:3]);
target_result = data_sample[3] - result_offset;
squared_errors.append((target_result - outputs[-1][0])**2);
calculate_error_terms(target_result);
mean_squared_error = sum(squared_errors)/float(len(squared_errors));
return mean_squared_error;
def calculate_iteration_num(training, validation):
fill_random_weights(weight_min, weight_max);
error_old = get_mean_error(validation);
consecutive_worse_num = 0;
iterations = 0;
while True:
iterate_once(training);
iterations = iterations + 1;
error_new = get_mean_error(validation);
#print("Iteration = " + str(iterations) + ", error = " + str(error_new));
if error_new > error_old:
consecutive_worse_num = consecutive_worse_num + 1;
if consecutive_worse_num == 10:
break;
else:
consecutive_worse_num = 0;
error_old = error_new;
return iterations;
def train_network(number_of_iterations):
errors = []
for i in range(0, number_of_iterations):
errors.append(iterate_once(data_training));
return errors;
def estimate_iteration_num():
best_iterations = [];
for i in range(0, partition_num):
validation = data_training[ (i*partition_size) : ((i+1)*partition_size) ];
if i == 0:
training = data_training[ (i+1)*partition_size : partition_num*partition_size ];
if i == (partition_num-1):
training = data_training[0:partition_size*(partition_num-1)];
else:
training = data_training[0:i*partition_size] + data_training[(i+1)*partition_size:partition_num*partition_size];
#print("Training = " + str(training));
#print("Validation = " + str(validation));
print("Performing K-fold cross validation... %2d%%" % int(i*100*partition_size/(partition_num*partition_size)));
iteration_number = calculate_iteration_num(training, validation);
best_iterations.append(iteration_number);
average_iterations = int(sum(best_iterations)/len(best_iterations));
print("Best iterations:" + str(best_iterations));
print("Average best iterations: " + str(average_iterations));
return average_iterations;
def estimate_and_train():
global weights;
all_errors = [];
errors = [];
weight_sets = [];
number_of_iterations = estimate_iteration_num();
for i in range(0, 10):
print("Running training network, cycle " + str(i));
fill_random_weights(weight_min, weight_max);
all_errors.append(train_network(number_of_iterations));
errors.append(get_mean_error(data_training));
print("Error on whole training data set: " + str(errors[-1]));
weight_sets.append(weights);
weights = weight_sets[errors.index(min(errors))];
plt.plot(all_errors[errors.index(min(errors))]);
plt.show()
test_error = get_mean_error(data_test);
print("Test data error is " + str(test_error));
#main
init();
estimate_and_train();
| 31.557545 | 146 | 0.606775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 883 | 0.071562 |
504a5158dee21a88d36bd8c0ab457b382a4195ba | 143 | py | Python | Python/CodingBat/string_times.py | dvt32/cpp-journey | afd7db7a1ad106c41601fb09e963902187ae36e6 | [
"MIT"
] | 1 | 2018-05-24T11:30:05.000Z | 2018-05-24T11:30:05.000Z | Python/CodingBat/string_times.py | dvt32/cpp-journey | afd7db7a1ad106c41601fb09e963902187ae36e6 | [
"MIT"
] | null | null | null | Python/CodingBat/string_times.py | dvt32/cpp-journey | afd7db7a1ad106c41601fb09e963902187ae36e6 | [
"MIT"
] | 2 | 2017-08-11T06:53:30.000Z | 2017-08-29T12:07:52.000Z | # http://codingbat.com/prob/p193507
def string_times(str, n):
result = ""
for i in range(0, n):
result += str
return result
| 14.3 | 35 | 0.601399 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.258741 |
504a8a1f14c3de8780d56a229c744af8d53c9b27 | 262 | py | Python | app/service/plugin_svc.py | muyenzo/caldera | 408f6d54239aa73832f474136ac8f64faab5be35 | [
"Apache-2.0"
] | null | null | null | app/service/plugin_svc.py | muyenzo/caldera | 408f6d54239aa73832f474136ac8f64faab5be35 | [
"Apache-2.0"
] | null | null | null | app/service/plugin_svc.py | muyenzo/caldera | 408f6d54239aa73832f474136ac8f64faab5be35 | [
"Apache-2.0"
] | null | null | null | from app.service.base_service import BaseService
class PluginService(BaseService):
def __init__(self, plugins):
self.plugins = plugins
self.log = self.add_service('plugin_svc', self)
def get_plugins(self):
return self.plugins
| 21.833333 | 55 | 0.698473 | 210 | 0.801527 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.045802 |
504a9f6ead52901b47b1d2e2ebcb578f28c594cc | 3,709 | py | Python | tests/node_io/test_ethernet.py | SpiNNakerManchester/nengo_spinnaker | 147e2b3d6c0965259d6897f177f23e5c99b184f9 | [
"MIT"
] | 13 | 2015-06-10T08:58:05.000Z | 2022-03-29T08:20:14.000Z | tests/node_io/test_ethernet.py | SpiNNakerManchester/nengo_spinnaker | 147e2b3d6c0965259d6897f177f23e5c99b184f9 | [
"MIT"
] | 131 | 2015-04-16T15:17:12.000Z | 2020-06-19T05:38:56.000Z | tests/node_io/test_ethernet.py | SpiNNakerManchester/nengo_spinnaker | 147e2b3d6c0965259d6897f177f23e5c99b184f9 | [
"MIT"
] | 7 | 2015-07-01T00:01:50.000Z | 2018-06-28T10:12:18.000Z | import nengo
import pytest
from nengo_spinnaker.builder import Model
from nengo_spinnaker.builder.ports import OutputPort, InputPort
from nengo_spinnaker.node_io import ethernet as ethernet_io
from nengo_spinnaker.operators import SDPReceiver, SDPTransmitter
@pytest.mark.parametrize("transmission_period", [0.001, 0.002])
def test_Ethernet_init(transmission_period):
"""Test that the Ethernet initialisation creates a host network and stores
appropriate rates.
"""
# Create the EthernetIO
io = ethernet_io.Ethernet(transmission_period=transmission_period)
# Check that we stored the transmission period
assert io.transmission_period == transmission_period
# Check that there is a (empty) host network
assert io.host_network.all_objects == list()
assert io.host_network.all_connections == list()
assert io.host_network.all_probes == list()
# Check that the node input dictionary and lock are present
with io.node_input_lock:
assert io.node_input == dict()
def test_get_spinnaker_source_for_node():
"""Check that getting the SpiNNaker source for a Node returns an SDP Rx
operator as the source object with OutputPort.standard as the port. The
spec should indicate that the connection should be latching.
"""
with nengo.Network():
a = nengo.Node(lambda t: t**2, size_out=1)
b = nengo.Ensemble(100, 1)
a_b = nengo.Connection(a, b)
# Create an empty model and an Ethernet object
model = Model()
io = ethernet_io.Ethernet()
spec = io.get_node_source(model, a_b)
assert isinstance(spec.target.obj, SDPReceiver)
assert spec.target.port is OutputPort.standard
assert spec.latching
assert model.extra_operators == [spec.target.obj]
def test_get_spinnaker_source_for_node_repeated():
"""Getting the source twice for the same Node should return the same
object.
"""
with nengo.Network():
a = nengo.Node(lambda t: t**2, size_out=1)
b = nengo.Ensemble(100, 1)
a_b0 = nengo.Connection(a, b)
a_b1 = nengo.Connection(a, b, transform=-0.5)
# Create an empty model and an Ethernet object
model = Model()
io = ethernet_io.Ethernet()
spec0 = io.get_node_source(model, a_b0)
spec1 = io.get_node_source(model, a_b1)
assert spec0.target.obj is spec1.target.obj
assert model.extra_operators == [spec0.target.obj]
def test_get_spinnaker_sink_for_node():
"""Check that getting the SpiNNaker sink for a Node returns an SDP Tx
operator as the sink object with InputPort.standard as the port.
"""
with nengo.Network():
a = nengo.Ensemble(100, 1)
b = nengo.Node(lambda t, x: None, size_in=1)
a_b = nengo.Connection(a, b)
# Create an empty model and an Ethernet object
model = Model()
io = ethernet_io.Ethernet()
spec = io.get_node_sink(model, a_b)
assert isinstance(spec.target.obj, SDPTransmitter)
assert spec.target.port is InputPort.standard
assert model.extra_operators == [spec.target.obj]
def test_get_spinnaker_sink_for_node_repeated():
"""Check that getting the SpiNNaker sink for a Node twice returns the same
target.
"""
with nengo.Network():
a = nengo.Ensemble(100, 1)
b = nengo.Node(lambda t, x: None, size_in=1)
a_b0 = nengo.Connection(a, b)
a_b1 = nengo.Connection(a, b, synapse=0.3)
# Create an empty model and an Ethernet object
model = Model()
io = ethernet_io.Ethernet()
spec0 = io.get_node_sink(model, a_b0)
spec1 = io.get_node_sink(model, a_b1)
assert spec0.target.obj is spec1.target.obj
assert model.extra_operators == [spec0.target.obj]
| 34.027523 | 78 | 0.701267 | 0 | 0 | 0 | 0 | 759 | 0.204637 | 0 | 0 | 1,031 | 0.277972 |
504ae56ee7d72c3c28a102b8af0d776ce3b43014 | 59,138 | py | Python | aliyun/log/logclient.py | SeraphLiu/aliyun-log-sdk-python | 35f608bd6de9f5ed7a89c40288c550cfc3bea8ba | [
"BSD-3-Clause"
] | null | null | null | aliyun/log/logclient.py | SeraphLiu/aliyun-log-sdk-python | 35f608bd6de9f5ed7a89c40288c550cfc3bea8ba | [
"BSD-3-Clause"
] | null | null | null | aliyun/log/logclient.py | SeraphLiu/aliyun-log-sdk-python | 35f608bd6de9f5ed7a89c40288c550cfc3bea8ba | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# Copyright (C) Alibaba Cloud Computing
# All rights reserved.
import sys
import requests
try:
import json
except ImportError:
import simplejson as json
try :
import logservice_lz4
except ImportError:
pass
from datetime import datetime
from log_logs_pb2 import LogGroup
from aliyun.log.util import Util
from aliyun.log.logexception import LogException
from aliyun.log.getlogsresponse import GetLogsResponse
from aliyun.log.putlogsresponse import PutLogsResponse
from aliyun.log.listtopicsresponse import ListTopicsResponse
from aliyun.log.listlogstoresresponse import ListLogstoresResponse
from aliyun.log.gethistogramsresponse import GetHistogramsResponse
from aliyun.log.logstore_config_response import CreateLogStoreResponse
from aliyun.log.logstore_config_response import DeleteLogStoreResponse
from aliyun.log.logstore_config_response import GetLogStoreResponse
from aliyun.log.logstore_config_response import UpdateLogStoreResponse
from aliyun.log.logstore_config_response import ListLogStoreResponse
from aliyun.log.pulllog_response import PullLogResponse
from aliyun.log.cursor_response import GetCursorResponse
from aliyun.log.cursor_time_response import GetCursorTimeResponse
from aliyun.log.index_config_response import CreateIndexResponse
from aliyun.log.index_config_response import UpdateIndexResponse
from aliyun.log.index_config_response import DeleteIndexResponse
from aliyun.log.index_config_response import GetIndexResponse
from aliyun.log.logtail_config_response import CreateLogtailConfigResponse
from aliyun.log.logtail_config_response import UpdateLogtailConfigResponse
from aliyun.log.logtail_config_response import DeleteLogtailConfigResponse
from aliyun.log.logtail_config_response import GetLogtailConfigResponse
from aliyun.log.logtail_config_response import ListLogtailConfigResponse
from aliyun.log.machinegroup_response import CreateMachineGroupResponse
from aliyun.log.machinegroup_response import UpdateMachineGroupResponse
from aliyun.log.machinegroup_response import DeleteMachineGroupResponse
from aliyun.log.machinegroup_response import GetMachineGroupResponse
from aliyun.log.machinegroup_response import ListMachineGroupResponse
from aliyun.log.machinegroup_response import ListMachinesResponse
from aliyun.log.machinegroup_response import ApplyConfigToMachineGroupResponse
from aliyun.log.machinegroup_response import RemoveConfigToMachineGroupResponse
from aliyun.log.machinegroup_response import GetMachineGroupAppliedConfigResponse
from aliyun.log.machinegroup_response import GetConfigAppliedMachineGroupsResponse
from aliyun.log.acl_response import UpdateAclResponse
from aliyun.log.acl_response import ListAclResponse
from aliyun.log.shard_response import ListShardResponse
from aliyun.log.shard_response import DeleteShardResponse
from aliyun.log.shipper_response import CreateShipperResponse
from aliyun.log.shipper_response import UpdateShipperResponse
from aliyun.log.shipper_response import DeleteShipperResponse
from aliyun.log.shipper_response import GetShipperConfigResponse
from aliyun.log.shipper_response import ListShipperResponse
from aliyun.log.shipper_response import GetShipperTasksResponse
from aliyun.log.shipper_response import RetryShipperTasksResponse
from aliyun.log.project_response import CreateProjectResponse
from aliyun.log.project_response import DeleteProjectResponse
from aliyun.log.project_response import GetProjectResponse
CONNECTION_TIME_OUT = 20
API_VERSION = '0.6.0'
USER_AGENT = 'log-python-sdk-v-0.6.1'
"""
LogClient class is the main class in the SDK. It can be used to communicate with
log service server to put/get data.
:Author: log_dev
"""
class LogClient(object):
""" Construct the LogClient with endpoint, accessKeyId, accessKey.
:type endpoint: string
:param endpoint: log service host name, for example, http://ch-hangzhou.sls.aliyuncs.com
:type accessKeyId: string
:param accessKeyId: aliyun accessKeyId
:type accessKey: string
:param accessKey: aliyun accessKey
"""
__version__ = API_VERSION
Version = __version__
def __init__(self, endpoint, accessKeyId, accessKey,securityToken = None):
if isinstance(endpoint, unicode): # ensure is ascii str
endpoint = endpoint.encode('ascii')
if isinstance(accessKeyId, unicode):
accessKeyId = accessKeyId.encode('ascii')
if isinstance(accessKey, unicode):
accessKey = accessKey.encode('ascii')
self._isRowIp = True
self._port = 80
self._setendpoint(endpoint)
self._accessKeyId = accessKeyId
self._accessKey = accessKey
self._timeout = CONNECTION_TIME_OUT
self._source = Util.get_host_ip(self._logHost)
self._securityToken = securityToken;
def _setendpoint(self, endpoint):
pos = endpoint.find('://')
if pos != -1:
endpoint = endpoint[pos + 3:] # strip http://
pos = endpoint.find('/')
if pos != -1:
endpoint = endpoint[:pos]
pos = endpoint.find(':')
if pos != -1:
self._port = int(endpoint[pos + 1:])
endpoint = endpoint[:pos]
self._isRowIp = Util.is_row_ip(endpoint)
self._logHost = endpoint
self._endpoint = endpoint + ':' + str(self._port)
def _getGMT(self):
return datetime.utcnow().strftime('%a, %d %b %Y %H:%M:%S GMT')
def _loadJson(self, respText, requestId):
if not respText:
return None
try:
return json.loads(respText)
except:
raise LogException('BadResponse',
'Bad json format:\n%s' % respText,
requestId)
def _getHttpResponse(self, method, url, params, body, headers): # ensure method, url, body is str
try :
headers['User-Agent'] = USER_AGENT
r = None
if method.lower() == 'get' :
r = requests.get(url, params = params, data = body, headers = headers, timeout = self._timeout)
elif method.lower() == 'post':
r = requests.post(url, params = params, data = body, headers = headers, timeout = self._timeout)
elif method.lower() == 'put':
r = requests.put(url, params = params, data = body, headers = headers, timeout = self._timeout)
elif method.lower() == 'delete':
r = requests.delete(url, params = params, data = body, headers = headers, timeout = self._timeout)
return (r.status_code, r.content, r.headers)
except Exception, ex:
raise LogException('LogRequestError', str(ex))
def _sendRequest(self, method, url, params, body, headers, respons_body_type = 'json'):
(status, respText, respHeader) = self._getHttpResponse(method, url, params, body, headers)
header = {}
for key, value in respHeader.items():
header[key] = value
requestId = header['x-log-requestid'] if 'x-log-requestid' in header else ''
exJson = None
header = Util.convert_unicode_to_str(header)
if status == 200 :
if respons_body_type == 'json' :
exJson = self._loadJson(respText, requestId)
#exJson = Util.convert_unicode_to_str(exJson)
return (exJson, header)
else :
return (respText, header)
exJson = self._loadJson(respText.encode('utf-8'), requestId)
exJson = Util.convert_unicode_to_str(exJson)
if 'errorCode' in exJson and 'errorMessage' in exJson:
raise LogException(exJson['errorCode'], exJson['errorMessage'], requestId)
else:
exJson = '. Return json is '+str(exJson) if exJson else '.'
raise LogException('LogRequestError',
'Request is failed. Http code is '+str(status)+exJson, requestId)
def _send(self, method, project, body, resource, params, headers, respons_body_type ='json'):
if body:
headers['Content-Length'] = str(len(body))
headers['Content-MD5'] = Util.cal_md5(body)
else:
headers['Content-Length'] = '0'
headers["x-log-bodyrawsize"] = '0'
headers['x-log-apiversion'] = API_VERSION
headers['x-log-signaturemethod'] = 'hmac-sha1'
url = ''
if self._isRowIp:
url = "http://" + self._endpoint
else:
url = "http://" + project + "." + self._endpoint
headers['Host'] = project + "." + self._logHost
headers['Date'] = self._getGMT()
if self._securityToken != None and self._securityToken != "" :
headers["x-acs-security-token"] = self._securityToken
signature = Util.get_request_authorization(method, resource,
self._accessKey, params, headers)
headers['Authorization'] = "LOG " + self._accessKeyId + ':' + signature
url = url + resource
return self._sendRequest(method, url, params, body, headers, respons_body_type)
def get_unicode(self, key):
if isinstance(key, str):
key = unicode(key, 'utf-8')
return key
def put_logs(self, request):
""" Put logs to log service.
Unsuccessful opertaion will cause an LogException.
:type request: PutLogsRequest
:param request: the PutLogs request parameters class
:return: PutLogsResponse
:raise: LogException
"""
if len(request.get_log_items()) > 4096:
raise LogException('InvalidLogSize',
"logItems' length exceeds maximum limitation: 4096 lines.")
logGroup = LogGroup()
logGroup.Topic = request.get_topic()
if request.get_source():
logGroup.Source = request.get_source()
else:
if self._source=='127.0.0.1':
self._source = Util.get_host_ip(request.get_project() + '.' + self._logHost)
logGroup.Source = self._source
for logItem in request.get_log_items():
log = logGroup.Logs.add()
log.Time = logItem.get_time()
contents = logItem.get_contents()
for key, value in contents:
content = log.Contents.add()
content.Key = self.get_unicode(key)
content.Value = self.get_unicode(value)
body = logGroup.SerializeToString()
if len(body) > 3 * 1024 * 1024: # 3 MB
raise LogException('InvalidLogSize',
"logItems' size exceeds maximum limitation: 3 MB.")
headers = {}
headers['x-log-bodyrawsize'] = str(len(body))
headers['Content-Type'] = 'application/x-protobuf'
is_compress = request.get_compress()
compress_data = None
if is_compress :
headers['x-log-compresstype'] = 'lz4'
compress_data = logservice_lz4.compress(body)
params = {}
logstore = request.get_logstore()
project = request.get_project()
resource = '/logstores/' + logstore
if request.get_hash_key() is not None:
resource = '/logstores/' + logstore+"/shards/route"
params["key"] = request.get_hash_key()
else:
resource = '/logstores/' + logstore+"/shards/lb"
respHeaders = None
if is_compress :
respHeaders = self._send('POST', project, compress_data, resource, params, headers)
else :
respHeaders = self._send('POST', project, body, resource, params, headers)
return PutLogsResponse(respHeaders[1])
def list_logstores(self, request):
""" List all logstores of requested project.
Unsuccessful opertaion will cause an LogException.
:type request: ListLogstoresRequest
:param request: the ListLogstores request parameters class.
:return: ListLogStoresResponse
:raise: LogException
"""
headers = {}
params = {}
resource = '/logstores'
project = request.get_project()
(resp, header) = self._send("GET", project, None, resource, params, headers)
return ListLogstoresResponse(resp, header)
def list_topics(self, request):
""" List all topics in a logstore.
Unsuccessful opertaion will cause an LogException.
:type request: ListTopicsRequest
:param request: the ListTopics request parameters class.
:return: ListTopicsResponse
:raise: LogException
"""
headers = {}
params = {}
if request.get_token()!=None:
params['token'] = request.get_token()
if request.get_line()!=None:
params['line'] = request.get_line()
params['type'] = 'topic'
logstore = request.get_logstore()
project = request.get_project()
resource = "/logstores/" + logstore
(resp, header) = self._send("GET", project, None, resource, params, headers)
return ListTopicsResponse(resp, header)
def get_histograms(self, request):
""" Get histograms of requested query from log service.
Unsuccessful opertaion will cause an LogException.
:type request: GetHistogramsRequest
:param request: the GetHistograms request parameters class.
:return: GetHistogramsResponse
:raise: LogException
"""
headers = {}
params = {}
if request.get_topic()!=None:
params['topic'] = request.get_topic()
if request.get_from()!=None:
params['from'] = request.get_from()
if request.get_to()!=None:
params['to'] = request.get_to()
if request.get_query()!=None:
params['query'] = request.get_query()
params['type'] = 'histogram'
logstore = request.get_logstore()
project = request.get_project()
resource = "/logstores/" + logstore
(resp, header) = self._send("GET", project, None, resource, params, headers)
return GetHistogramsResponse(resp, header)
def get_logs(self, request):
""" Get logs from log service.
Unsuccessful opertaion will cause an LogException.
:type request: GetLogsRequest
:param request: the GetLogs request parameters class.
:return: GetLogsResponse
:raise: LogException
"""
headers = {}
params = {}
if request.get_topic()!=None:
params['topic'] = request.get_topic()
if request.get_from()!=None:
params['from'] = request.get_from()
if request.get_to()!=None:
params['to'] = request.get_to()
if request.get_query()!=None:
params['query'] = request.get_query()
params['type'] = 'log'
if request.get_line()!=None:
params['line'] = request.get_line()
if request.get_offset()!=None:
params['offset'] = request.get_offset()
if request.get_reverse()!=None:
params['reverse'] = 'true' if request.get_reverse() else 'false'
logstore = request.get_logstore()
project = request.get_project()
resource = "/logstores/" + logstore
(resp, header) = self._send("GET", project, None, resource, params, headers)
return GetLogsResponse(resp, header)
def get_cursor(self, project_name, logstore_name, shard_id, start_time) :
""" Get cursor from log service for batch pull logs
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shard_id: int
:param shard_id: the shard id
:type start_time: int
:param start_time: the start time of cursor, e.g 1441093445
:return: GetCursorResponse
:raise: LogException
"""
headers = {}
headers['Content-Type'] = 'application/json'
params = {}
resource = "/logstores/" + logstore_name + "/shards/" + str(shard_id)
params['type'] = 'cursor'
params['from'] = str(start_time)
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetCursorResponse(resp, header)
def get_cursor_time(self, project_name, logstore_name, shard_id, cursor) :
""" Get cursor time from log service
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shard_id: int
:param shard_id: the shard id
:type cursor: string
:param cursor: the cursor to get its service receive time
:return: GetCursorTimeResponse
:raise: LogException
"""
headers = {}
headers['Content-Type'] = 'application/json'
params = {}
resource = "/logstores/" + logstore_name + "/shards/" + str(shard_id)
params['type'] = 'cursor_time'
params['cursor'] = cursor
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetCursorTimeResponse(resp, header)
def get_begin_cursor(self, project_name, logstore_name, shard_id) :
""" Get begin cursor from log service for batch pull logs
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shard_id: int
:param shard_id: the shard id
:return: GetLogsResponse
:raise: LogException
"""
return self.get_cursor(project_name, logstore_name, shard_id, "begin")
def get_end_cursor(self, project_name, logstore_name, shard_id) :
""" Get end cursor from log service for batch pull logs
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shard_id: int
:param shard_id: the shard id
:return: GetLogsResponse
:raise: LogException
"""
return self.get_cursor(project_name, logstore_name, shard_id, "end")
def pull_logs(self, project_name, logstore_name, shard_id, cursor, count = 1000, end_cursor = None, compress=False):
""" batch pull log data from log service
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shard_id: int
:param shard_id: the shard id
:type cursor: string
:param cursor: the start to cursor to get data
:type count: int
:param count: the required pull log package count, default 1000 packages
:type end_cursor : string
:param end_cursor: the end cursor position to get data
:type comress : boolean
:param compress : if use lz4 compress for transfer data
:return: PullLogResponse
:raise: LogException
"""
headers = {}
if compress :
headers['Accept-Encoding'] = 'lz4'
else :
headers['Accept-Encoding'] = ''
headers['Accept'] = 'application/x-protobuf'
params = {}
resource = "/logstores/" + logstore_name + "/shards/" + str(shard_id)
params['type'] = 'log'
params['cursor'] = cursor
params['count'] = str(count)
if end_cursor != None and len(end_cursor) > 0 :
params['end_cursor'] = end_cursor
(resp, header) = self._send("GET", project_name, None, resource, params, headers, "binary")
if compress :
raw_size = int(header['x-log-bodyrawsize'])
raw_data = logservice_lz4.uncompress(raw_size, resp)
return PullLogResponse(raw_data, header)
else :
return PullLogResponse(resp, header)
def create_logstore(self, project_name, logstore_name, ttl, shard_count):
""" create log store
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type ttl: int
:param ttl: the life cycle of log in the logstore in days
:type shard_count: int
:param shard_count: the shard count of the logstore to create
:return: CreateLogStoreResponse
:raise: LogException
"""
headers = {}
params = {}
headers["x-log-bodyrawsize"] = '0'
headers["Content-Type"] = "application/json"
resource = "/logstores"
body = {}
body["logstoreName"] = logstore_name.encode("utf-8");
body["ttl"] = (int)(ttl);
body["shardCount"] = (int)(shard_count);
body_str = json.dumps(body);
(resp, header) = self._send("POST", project_name, body_str, resource, params, headers)
return CreateLogStoreResponse(header)
def delete_logstore(self, project_name, logstore_name):
""" delete log store
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:return: DeleteLogStoreResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name
(resp, header) = self._send("DELETE", project_name, None, resource, params, headers)
return DeleteLogStoreResponse(header)
def get_logstore(self, project_name, logstore_name):
""" get the logstore meta info
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:return: GetLogStoreResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetLogStoreResponse(resp, header)
def update_logstore(self, project_name, logstore_name, ttl, shard_count):
"""
update the logstore meta info
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type ttl: int
:param ttl: the life cycle of log in the logstore in days
:type shard_count: int
:param shard_count: the shard count of the logstore to create
:return: UpdateLogStoreResponse
:raise: LogException
"""
headers = {}
headers["x-log-bodyrawsize"] = '0'
headers["Content-Type"] = "application/json"
params = {}
resource = "/logstores/" + logstore_name
body = {}
body["logstoreName"] = logstore_name
body["ttl"] = (int)(ttl);
body["shardCount"] = (int)(shard_count);
body_str = json.dumps(body);
(resp, header) = self._send("PUT", project_name, body_str, resource, params, headers)
return UpdateLogStoreResponse(header)
def list_logstore(self, project_name, logstore_name_pattern = None, offset = 0, size = 100) :
""" list the logstore in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name_pattern: string
:param logstore_name_pattern: the sub name logstore, used for the server to return logstore names contain this sub name
:type offset: int
:param offset: the offset of all the matched names
:type size: int
:param size: the max return names count
:return: ListLogStoreResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores"
if logstore_name_pattern != None :
params['logstorename'] = logstore_name_pattern
params['offset'] = str(offset)
params['size'] = str(size)
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return ListLogStoreResponse(resp, header)
def list_shards(self, project_name, logstore_name) :
""" list the shard meta of a logstore
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:return: ListShardResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/shards"
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return ListShardResponse(resp, header)
def split_shard(self,project_name,logstore_name,shardId,split_hash):
""" split a readwrite shard into two shards
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shardId: int
:param shardId : the shard id
:type split_hash: string
:param split_hash: the internal hash between the shard begin and end hash
:return: ListShardResponse
:raise: LogException
"""
headers = {}
params = {"action":"split","key":split_hash}
resource = "/logstores/"+logstore_name+"/shards/"+str(shardId);
(resp,header) = self._send("POST",project_name,None,resource,params,headers);
return ListShardResponse(resp,header);
def merge_shard(self,project_name,logstore_name,shardId):
""" split two adjacent readwrite hards into one shards
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shardId: int
:param shardId : the shard id of the left shard, server will determine the right adjacent shardId
:return: ListShardResponse
:raise: LogException
"""
headers = {}
params = {"action":"merge"}
resource = "/logstores/"+logstore_name+"/shards/"+str(shardId);
(resp,header) = self._send("POST",project_name,None,resource,params,headers);
return ListShardResponse(resp,header);
def delete_shard(self,project_name,logstore_name,shardId):
""" delete a readonly shard
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shardId: int
:param shardId : the read only shard id
:return: ListShardResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/"+logstore_name+"/shards/"+str(shardId);
(resp,header) = self._send("DELETE",project_name,None,resource,params,headers);
return DeleteShardResponse(header);
def create_index(self, project_name, logstore_name, index_detail) :
""" create index for a logstore
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type index_detail: index_config.IndexConfig
:param index_detail: the index config detail used to create index
:return: CreateIndexResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/index"
headers['Content-Type'] = 'application/json'
body = json.dumps(index_detail.to_json())
headers['x-log-bodyrawsize'] = str(len(body))
(resp, header) = self._send("POST", project_name, body, resource, params, headers)
return CreateIndexResponse(header)
def update_index(self, project_name, logstore_name, index_detail) :
""" update index for a logstore
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type index_detail: index_config.IndexConfig
:param index_detail: the index config detail used to update index
:return: UpdateIndexResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/index"
headers['Content-Type'] = 'application/json'
body = json.dumps(index_detail.to_json())
headers['x-log-bodyrawsize'] = str(len(body))
(resp, header) = self._send("PUT", project_name, body, resource, params, headers)
return UpdateIndexResponse(header)
def delete_index(self, project_name, logstore_name) :
""" delete index of a logstore
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:return: DeleteIndexResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/index"
(resp, header) = self._send("DELETE", project_name, None, resource, params, headers)
return DeleteIndexResponse(header)
def get_index_config(self, project_name , logstore_name) :
""" get index config detail of a logstore
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:return: GetIndexResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/index"
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetIndexResponse(resp, header)
def create_logtail_config(self, project_name, config_detail) :
""" create logtail config in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type config_detail: logtail_config_detail.CommonRegLogConfigDetail or logtail_config_detail.ApsaraLogConfigDetail
:param config_detail: the logtail config detail info, the CommonRegLogConfigDetail is used to create common regex logs ,the ApsaraLogConfigDetail is used to create apsara log
:return: CreateLogtailConfigResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/configs"
headers['Content-Type'] = 'application/json'
body = json.dumps(config_detail.to_json())
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("POST", project_name, body, resource, params, headers)
return CreateLogtailConfigResponse(headers)
def update_logtail_config(self, project_name, config_detail) :
""" update logtail config in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type config_detail: logtail_config_detail.CommonRegLogConfigDetail or logtail_config_detail.ApsaraLogConfigDetail
:param config_detail: the logtail config detail info, the CommonRegLogConfigDetail is used to create common regex logs, the ApsaraLogConfigDetail is used to create apsara log
:return: UpdateLogtailConfigResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/configs/" + config_detail.config_name
headers['Content-Type'] = 'application/json'
body = json.dumps(config_detail.to_json())
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("PUT", project_name, body, resource, params, headers)
return UpdateLogtailConfigResponse(headers)
def delete_logtail_config(self, project_name, config_name):
""" delete logtail config in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type config_name: string
:param config_name: the logtail config name
:return: DeleteLogtailConfigResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/configs/" + config_name
(resp, headers) = self._send("DELETE", project_name, None, resource, params, headers)
return DeleteLogtailConfigResponse(headers)
def get_logtail_config(self, project_name, config_name) :
""" get logtail config in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type config_name: string
:param config_name: the logtail config name
:return: GetLogtailConfigResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/configs/" + config_name
(resp, headers) = self._send("GET", project_name, None, resource, params, headers)
return GetLogtailConfigResponse(resp, headers)
def list_logtail_config(self, project_name, offset = 0, size = 100) :
""" list logtail config name in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type offset: int
:param offset: the offset of all config names
:type size: int
:param size: the max return names count
:return: ListLogtailConfigResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/configs"
params['offset'] = str(offset)
params['size'] = str(size)
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return ListLogtailConfigResponse(resp, header)
def create_machine_group(self, project_name, group_detail) :
""" create machine group in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_detail: machine_group_detail.MachineGroupDetail
:param group_detail: the machine group detail config
:return: CreateMachineGroupResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups"
headers['Content-Type'] = 'application/json'
body = json.dumps(group_detail.to_json())
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("POST", project_name, body, resource, params, headers)
return CreateMachineGroupResponse(headers)
def delete_machine_group(self, project_name, group_name):
""" delete machine group in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_name: string
:param group_name: the group name
:return: DeleteMachineGroupResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_name
(resp, headers) = self._send("DELETE", project_name, None, resource, params, headers)
return DeleteMachineGroupResponse(headers)
def update_machine_group(self, project_name, group_detail) :
""" update machine group in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_detail: machine_group_detail.MachineGroupDetail
:param group_detail: the machine group detail config
:return: UpdateMachineGroupResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_detail.group_name
headers['Content-Type'] = 'application/json'
body = json.dumps(group_detail.to_json())
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("PUT", project_name, body, resource, params, headers)
return UpdateMachineGroupResponse(headers)
def get_machine_group(self, project_name, group_name) :
""" get machine group in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_name: string
:param group_name: the group name to get
:return: GetMachineGroupResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_name
(resp, headers) = self._send("GET", project_name, None, resource, params, headers)
return GetMachineGroupResponse(resp, headers)
def list_machine_group(self, project_name, offset = 0, size = 100) :
""" list machine group names in a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type offset: int
:param offset: the offset of all group name
:type size: int
:param size: the max return names count
:return: ListMachineGroupResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups"
params['offset'] = str(offset)
params['size'] = str(size)
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return ListMachineGroupResponse(resp, header)
def list_machines(self, project_name, group_name, offset = 0, size = 100) :
""" list machines in a machine group
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_name: string
:param group_name: the group name to list
:type offset: int
:param offset: the offset of all group name
:type size: int
:param size: the max return names count
:return: ListMachinesResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_name + "/machines"
params['offset'] = str(offset)
params['size'] = str(size)
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return ListMachinesResponse(resp, header)
def apply_config_to_machine_group(self, project_name, config_name, group_name) :
""" apply a logtail config to a machine group
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type config_name: string
:param config_name: the logtail config name to apply
:type group_name: string
:param group_name: the machine group name
:return: ApplyConfigToMachineGroupResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_name + "/configs/" + config_name
(resp, header) = self._send("PUT", project_name, None, resource, params, headers)
return ApplyConfigToMachineGroupResponse(header)
def remove_config_to_machine_group(self, project_name, config_name, group_name) :
""" remove a logtail config to a machine group
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type config_name: string
:param config_name: the logtail config name to apply
:type group_name: string
:param group_name: the machine group name
:return: RemoveConfigToMachineGroupResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_name + "/configs/" + config_name
(resp, header) = self._send("DELETE", project_name, None, resource, params, headers)
return RemoveConfigToMachineGroupResponse(header)
def get_machine_group_applied_configs(self, project_name, group_name):
""" get the logtail config names applied in a machine group
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type group_name: string
:param group_name: the group name list
:return: GetMachineGroupAppliedConfigResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/machinegroups/" + group_name + "/configs"
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetMachineGroupAppliedConfigResponse(resp, header)
def get_config_applied_machine_groups(self, project_name, config_name):
""" get machine group names where the logtail config applies to
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type config_name: string
:param config_name: the logtail config name used to apply
:return: GetConfigAppliedMachineGroupsResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/configs/" + config_name + "/machinegroups"
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetConfigAppliedMachineGroupsResponse(resp, header)
def _update_acl(self, project_name, logstore_name, acl_action, acl_config) :
headers = {}
params = {}
params['type'] = 'acl'
resource = "/"
if logstore_name != None and len(logstore_name) > 0 :
resource = "/logstores/" + logstore_name
body = acl_config.to_json()
body['action'] = acl_action
body = json.dumps(body)
headers['Content-Type'] = 'application/json'
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("PUT", project_name, body, resource, params, headers)
return UpdateAclResponse(headers)
def update_project_acl(self, project_name, acl_action, acl_config):
""" update acl of a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type acl_action: string
:param acl_action: "grant" or "revoke", grant or revoke the acl_config to/from a project
:type acl_config: acl_config.AclConfig
:param acl_config: the detail acl config info
:return: UpdateAclResponse
:raise: LogException
"""
return self._update_acl(project_name, None, acl_action, acl_config)
def update_logstore_acl(self, project_name, logstore_name, acl_action, acl_config):
""" update acl of a logstore
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type acl_action: string
:param acl_action: "grant" or "revoke", grant or revoke the acl_config to/from a logstore
:type acl_config: acl_config.AclConfig
:param acl_config: the detail acl config info
:return: UpdateAclResponse
:raise: LogException
"""
return self._update_acl(project_name, logstore_name, acl_action, acl_config)
def _list_acl(self, project_name, logstore_name, offset = 0 , size = 100) :
headers = {}
params = {}
params['type'] = 'acl'
params['offset'] = str(offset)
params['size'] = str(size)
resource = "/"
if logstore_name != None and len(logstore_name) > 0 :
resource = "/logstores/" + logstore_name
(resp, headers) = self._send("GET", project_name, None, resource, params, headers)
return ListAclResponse(resp, headers)
def list_project_acl(self, project_name, offset = 0 , size = 100) :
""" list acl of a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type offset: int
:param offset: the offset of all acl
:type size: int
:param size: the max return acl count
:return: ListAclResponse
:raise: LogException
"""
return self._list_acl(project_name, None, offset, size)
def list_logstore_acl(self, project_name, logstore_name, offset = 0 ,size = 100) :
""" list acl of a logstore
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type offset: int
:param offset: the offset of all acl
:type size: int
:param size: the max return acl count
:return: ListAclResponse
:raise: LogException
"""
return self._list_acl(project_name, logstore_name, offset, size)
def create_shipper(self, project_name, logstore_name, shipper_name, shipper_type, shipper_config) :
""" create odps/oss shipper
for every type, it only allowed one shipper
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shipper_name: string
:param shipper_name: the shipper name
:type shipper_type: string
:param shipper_type: only support "odps" or "oss"
:type shipper_config : OssShipperConfig or OdpsShipperConfig
:param shipper_config : the detail shipper config, must be OssShipperConfig or OdpsShipperConfig type
:return: CreateShipperResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/shipper"
body = {}
body["shipperName"] = shipper_name
body["targetType"] = shipper_type
body["targetConfiguration"] = shipper_config.to_json()
body = json.dumps(body)
headers['Content-Type'] = 'application/json'
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("POST", project_name, body, resource, params, headers)
return CreateShipperResponse(headers)
def update_shipper(self, project_name, logstore_name, shipper_name, shipper_type, shipper_config) :
""" update odps/oss shipper
for every type, it only allowed one shipper
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shipper_name: string
:param shipper_name: the shipper name
:type shipper_type: string
:param shipper_type: only support "odps" or "oss" , the type must be same with the oringal shipper
:type shipper_config : OssShipperConfig or OdpsShipperConfig
:param shipper_config : the detail shipper config, must be OssShipperConfig or OdpsShipperConfig type
:return: UpdateShipperResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/shipper/" + shipper_name
body = {}
body["shipperName"] = shipper_name
body["targetType"] = shipper_type
body["targetConfiguration"] = shipper_config.to_json()
body = json.dumps(body)
headers['Content-Type'] = 'application/json'
headers['x-log-bodyrawsize'] = str(len(body))
(resp, headers) = self._send("PUT", project_name, body, resource, params, headers)
return UpdateShipperResponse(headers)
def delete_shipper(self, project_name, logstore_name, shipper_name) :
""" delete odps/oss shipper
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shipper_name: string
:param shipper_name: the shipper name
:return: DeleteShipperResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/shipper/" + shipper_name
(resp, header) = self._send("DELETE", project_name, None, resource, params, headers)
return DeleteShipperResponse(header)
def get_shipper_config(self, project_name, logstore_name, shipper_name) :
""" get odps/oss shipper
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shipper_name: string
:param shipper_name: the shipper name
:return: GetShipperConfigResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/shipper/" + shipper_name
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetShipperConfigResponse(resp, header)
def list_shipper(self, project_name, logstore_name) :
""" list odps/oss shipper
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:return: ListShipperResponse
:raise: LogException
"""
headers = {}
params = {}
resource = "/logstores/" + logstore_name + "/shipper"
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return ListShipperResponse(resp, header)
def get_shipper_tasks(self, project_name, logstore_name, shipper_name, start_time, end_time, status_type = '', offset = 0, size = 100):
""" get odps/oss shipper tasks in a certain time range
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shipper_name: string
:param shipper_name: the shipper name
:type start_time: int
:param start_time: the start timestamp
:type end_time: int
:param end_time: the end timestamp
:type status_type : string
:param status_type : support one of ['', 'fail', 'success', 'running'] , if the status_type = '' , return all kinds of status type
:type offset : int
:param offset : the begin task offset
:type size : int
:param size : the needed tasks count
:return: ListShipperResponse
:raise: LogException
"""
headers = {}
params = {}
params["from"] = str(int(start_time))
params["to"] = str(int(end_time))
params["status"] = status_type
params["offset"] = str(int(offset))
params["size"] = str(int(size))
resource = "/logstores/" + logstore_name + "/shipper/" + shipper_name + "/tasks"
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetShipperTasksResponse(resp, header)
def retry_shipper_tasks(self, project_name, logstore_name, shipper_name, task_list) :
""" retry failed tasks , only the failed task can be retried
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type logstore_name: string
:param logstore_name: the logstore name
:type shipper_name: string
:param shipper_name: the shipper name
:type task_list: string array
:param task_list: the failed task_id list, e.g ['failed_task_id_1', 'failed_task_id_2',...], currently the max retry task count 10 every time
:return: RetryShipperTasksResponse
:raise: LogException
"""
headers = {}
params = {}
body = json.dumps(task_list)
headers['Content-Type'] = 'application/json'
headers['x-log-bodyrawsize'] = str(len(body))
resource = "/logstores/" + logstore_name + "/shipper/" + shipper_name + "/tasks"
(resp, header) = self._send("PUT", project_name, body, resource, params, headers)
return RetryShipperTasksResponse(header)
def create_project(self, project_name, project_des) :
""" Create a project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:type project_des: string
:param project_des: the description of a project
:return: CreateProjectResponse
:raise: LogException
"""
headers = {}
params = {}
body = {}
body["projectName"] = project_name
body["description"] = project_des
body = json.dumps(body)
headers['Content-Type'] = 'application/json'
headers['x-log-bodyrawsize'] = str(len(body))
resource = "/"
(resp, header) = self._send("POST", project_name, body, resource, params, headers)
return CreateProjectResponse(header)
def get_project(self, project_name) :
""" get project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:return: GetProjectResponse
:raise: LogException
"""
headers = {}
params = {}
body = {}
resource = "/"
(resp, header) = self._send("GET", project_name, None, resource, params, headers)
return GetProjectResponse(resp, header)
def delete_project(self, project_name):
""" delete project
Unsuccessful opertaion will cause an LogException.
:type project_name: string
:param project_name: the Project name
:return: DeleteProjectResponse
:raise: LogException
"""
headers = {}
params = {}
body = {}
resource = "/"
(resp, header) = self._send("DELETE", project_name, None, resource, params, headers)
return DeleteProjectResponse(header)
| 36.325553 | 183 | 0.615543 | 55,341 | 0.935794 | 0 | 0 | 0 | 0 | 0 | 0 | 28,734 | 0.48588 |
504b5c677cc47a60e5d73108185305ec18d1d8d6 | 489 | py | Python | CodeWars/Python/AlternateCase.py | BobbyRobillard/CodingChallenges | 71d5ca0b7f7c470c547d858dde7a799ce7d0d1a0 | [
"MIT"
] | null | null | null | CodeWars/Python/AlternateCase.py | BobbyRobillard/CodingChallenges | 71d5ca0b7f7c470c547d858dde7a799ce7d0d1a0 | [
"MIT"
] | null | null | null | CodeWars/Python/AlternateCase.py | BobbyRobillard/CodingChallenges | 71d5ca0b7f7c470c547d858dde7a799ce7d0d1a0 | [
"MIT"
] | null | null | null | def alternate_case(s):
# Like a Giga Chad
return "".join([char.lower() if char.isupper() else char.upper() for char in s])
# Like a Beta Male
# return s.swapcase()
# EXAMPLE AND TESTING #
input = ["Hello World", "cODEwARS"]
for item in input:
print("\nInput: {0}\nAlternate Case: {1}".format(item, alternate_case(item)))
assert alternate_case("Hello World") == "hELLO wORLD" # Simple Unit Tests
assert alternate_case("cODEwARS") == "CodeWars" # Simple Unit Tests
| 30.5625 | 84 | 0.672802 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 224 | 0.458078 |
504ca1ede2e47eb980188d865fd66a3fa14bb848 | 729 | py | Python | todoist_python_additions/scripts/personal.py | aagnone3/todoist-python-additions | 56bfe6e0160fd28ba0b3b04f4f156b89d4512c2f | [
"Apache-2.0"
] | null | null | null | todoist_python_additions/scripts/personal.py | aagnone3/todoist-python-additions | 56bfe6e0160fd28ba0b3b04f4f156b89d4512c2f | [
"Apache-2.0"
] | null | null | null | todoist_python_additions/scripts/personal.py | aagnone3/todoist-python-additions | 56bfe6e0160fd28ba0b3b04f4f156b89d4512c2f | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import todoist
import pickle
from os import path
from argparse import ArgumentParser
from todoist_python_additions.lib.state import Todoist
def build_parser():
parser = ArgumentParser()
parser.add_argument('-p', '--project', required=True, help='Project to get descendant tasks of.')
return parser
def main():
args = build_parser().parse_args()
with Todoist(sync=True) as api:
tasks = api.get_subtasks(args.project)
if len(tasks) > 0:
for task in tasks:
print("({}) {}".format(task['project'], task['content']))
else:
print("No tasks under {}".format(args.project))
if __name__ == '__main__':
main()
| 22.78125 | 101 | 0.643347 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.176955 |
504e0deae6fdb470fcb220ab052c73d97c382d30 | 636 | py | Python | recipe_server/recipeView.py | Shouyin/Recipe | dffaafdebefd7c39a1438444db910f5d7943cf1f | [
"MIT"
] | null | null | null | recipe_server/recipeView.py | Shouyin/Recipe | dffaafdebefd7c39a1438444db910f5d7943cf1f | [
"MIT"
] | null | null | null | recipe_server/recipeView.py | Shouyin/Recipe | dffaafdebefd7c39a1438444db910f5d7943cf1f | [
"MIT"
] | null | null | null | from django import http
from django.shortcuts import render
from django.shortcuts import render_to_response
from . import settings
from .scripts import logic
import os
import urllib
import json
def recipe_html(request):
print(os.path.join(settings.BASE_DIR, "recipe_server/static"))
return render_to_response("recipe.html")
def recipe_api(request):
fridge = request.GET["fridge"]
reconstructed_string = ""
for i in json.loads(fridge):
reconstructed_string += i + "\n"
recipe = request.GET["recipe"]
print(reconstructed_string)
return http.HttpResponse(logic.main(reconstructed_string, recipe)) | 28.909091 | 70 | 0.751572 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.089623 |
504e2922325d5c9d9b0ab7cf89788990779e8e47 | 296 | py | Python | ua_project_transfer/wf_steps_template.py | UACoreFacilitiesIT/UA-Project_Transfer | 0360f20f54a6c9c49dcdb1568a1c961222cb1404 | [
"MIT"
] | 1 | 2020-07-14T16:27:25.000Z | 2020-07-14T16:27:25.000Z | ua_project_transfer/wf_steps_template.py | UACoreFacilitiesIT/UA-Project_Transfer | 0360f20f54a6c9c49dcdb1568a1c961222cb1404 | [
"MIT"
] | null | null | null | ua_project_transfer/wf_steps_template.py | UACoreFacilitiesIT/UA-Project_Transfer | 0360f20f54a6c9c49dcdb1568a1c961222cb1404 | [
"MIT"
] | null | null | null | """A json-like master list of workflows and steps."""
# NOTE: Create a json-like dictionary in the form of:
# WF_STEPS = {
# env1: {
# 1st condition defined in next_steps: {
# 2nd condition defined in next_steps: (Workflow Name, Step Name),
# },
# },
# }
WF_STEPS = {}
| 24.666667 | 76 | 0.608108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.918919 |
504f7132a6b57d1fc297a4a0f887b07196d4dbf4 | 250 | py | Python | lang/python/pptv_strip.py | liuyang1/test | a4560e0c9ffd0bc054d55bbcf12a894ab5b7d417 | [
"MIT"
] | 8 | 2015-06-07T13:25:48.000Z | 2022-03-22T23:14:50.000Z | lang/python/pptv_strip.py | liuyang1/test | a4560e0c9ffd0bc054d55bbcf12a894ab5b7d417 | [
"MIT"
] | 30 | 2016-01-29T01:36:41.000Z | 2018-09-19T07:01:22.000Z | lang/python/pptv_strip.py | liuyang1/test | a4560e0c9ffd0bc054d55bbcf12a894ab5b7d417 | [
"MIT"
] | null | null | null | import sys
fn = sys.argv[1]
fp = open(fn)
fo = open(fn+".flv","wb")
data = fp.read()
offset = int(sys.argv[2])
if len(sys.argv)==3:
fo.write(data[offset:])
else:
l = int(sys.argsv[3])
fo.write(data[offset:offset+l])
fo.close()
fp.close()
| 17.857143 | 35 | 0.608 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.04 |
5050238035d838d374850823b3ad1446226e79b5 | 329 | py | Python | tools/exploitation_tools.py | LucaRibeiro/pentestools | 2e7a6b9bf51a84aec90944c50a23e882d184ccdc | [
"MIT"
] | 1 | 2021-02-18T16:15:25.000Z | 2021-02-18T16:15:25.000Z | tools/exploitation_tools.py | LucaRibeiro/Pentools | 2e7a6b9bf51a84aec90944c50a23e882d184ccdc | [
"MIT"
] | null | null | null | tools/exploitation_tools.py | LucaRibeiro/Pentools | 2e7a6b9bf51a84aec90944c50a23e882d184ccdc | [
"MIT"
] | null | null | null | #!/usr/bin/python3
list = ["Armitage", "Backdoor Factory", "BeEF","cisco-auditing-tool",
"cisco-global-exploiter","cisco-ocs","cisco-torch","Commix","crackle",
"exploitdb","jboss-autopwn","Linux Exploit Suggester","Maltego Teeth",
"Metasploit Framework","MSFPC","RouterSploit","SET","ShellNoob","sqlmap",
"THC-IPV6","Yersinia"]
| 41.125 | 73 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 291 | 0.884498 |
50530270ca9767c3a423e4f06bb397a9db26bf9c | 1,152 | py | Python | web/api/serializer/foodComment.py | bounswe/bounswe2016group2 | f5dbba9b78fc03e8fd6a1fc7548de6cd1177a5ad | [
"Apache-2.0"
] | 10 | 2016-02-10T13:57:10.000Z | 2021-04-01T14:34:33.000Z | web/api/serializer/foodComment.py | bounswe/bounswe2016group2 | f5dbba9b78fc03e8fd6a1fc7548de6cd1177a5ad | [
"Apache-2.0"
] | 203 | 2016-02-14T16:13:15.000Z | 2016-12-23T21:27:08.000Z | web/api/serializer/foodComment.py | bounswe/bounswe2016group2 | f5dbba9b78fc03e8fd6a1fc7548de6cd1177a5ad | [
"Apache-2.0"
] | 2 | 2017-05-10T18:41:28.000Z | 2019-02-27T21:01:18.000Z | from rest_framework import serializers
from api.model.foodComment import FoodComment
from api.model.food import Food
from django.contrib.auth.models import User
from api.serializer.user import UserSerializer
class FoodCommentSerializer(serializers.ModelSerializer):
comment = serializers.CharField(max_length=255)
photo = serializers.CharField(max_length=255, allow_null=True, required=False)
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
food = serializers.PrimaryKeyRelatedField(queryset=Food.objects.all())
class Meta:
model = FoodComment
fields = '__all__'
depth = 1
class FoodCommentReadSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = FoodComment
fields = '__all__'
depth = 1
class FoodCommentPureSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
food = serializers.PrimaryKeyRelatedField(queryset=Food.objects.all())
class Meta:
model = FoodComment
fields = ('comment', 'user', 'food')
depth = 1
| 27.428571 | 82 | 0.730903 | 934 | 0.810764 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.033854 |
50546bf4b57d2a73845feae541d069b7a6fc01c7 | 616 | py | Python | pycccl/exchanges/binance.py | beraldoleal/pycccl | 488bc799fdd91b626c2735be11ad014f0c5262f4 | [
"MIT"
] | 2 | 2017-12-18T23:58:56.000Z | 2020-02-13T16:16:45.000Z | pycccl/exchanges/binance.py | beraldoleal/pycccl | 488bc799fdd91b626c2735be11ad014f0c5262f4 | [
"MIT"
] | null | null | null | pycccl/exchanges/binance.py | beraldoleal/pycccl | 488bc799fdd91b626c2735be11ad014f0c5262f4 | [
"MIT"
] | null | null | null | from pycccl.base import ExchangeBase
class Binance(ExchangeBase):
_public_endpoint = "https://api.binance.com/api/v1"
_private_endpoint = _public_endpoint
def get_ticker(self, ticker, against='USDT'):
pair = "{}{}".format(ticker, against)
endpoint = "{}/ticker/24hr".format(self._public_endpoint)
params = {'symbol': pair}
raw = self._get(endpoint, params=params)
return {'last': float(raw['lastPrice']),
'24highest': float(raw['highPrice']),
'24lowest': float(raw['lowPrice']),
'24volume': float(raw['volume'])}
| 36.235294 | 65 | 0.608766 | 576 | 0.935065 | 0 | 0 | 0 | 0 | 0 | 0 | 145 | 0.23539 |
505794ef4686ee28ab927b92a5720e1b1a514e5a | 3,036 | py | Python | features/feature_helpers.py | Omarzintan/bumblebee-ai | 0b8c5cecf032730e23b1b710a88538f5e4ea70c9 | [
"MIT"
] | 3 | 2021-05-06T16:29:26.000Z | 2022-01-09T03:32:40.000Z | features/feature_helpers.py | Omarzintan/bumblebee-ai | 0b8c5cecf032730e23b1b710a88538f5e4ea70c9 | [
"MIT"
] | 1 | 2021-05-20T17:59:12.000Z | 2021-05-20T17:59:12.000Z | features/feature_helpers.py | Omarzintan/bumblebee-ai | 0b8c5cecf032730e23b1b710a88538f5e4ea70c9 | [
"MIT"
] | null | null | null | '''Contains helper functions that can be used by any feature.'''
from nltk_utils import tokenize
def get_search_query(
spoken_text,
patterns,
search_terms,
false_search_term_indicators=['like', 'love', 'want', 'ready']
):
'''
General function that extracts a search query from spoken
text given search terms to look out for.
Some possible search terms include 'to', 'on', 'for', 'about'
e.g. 'do a google search on Python.'
search term = on
query found = Python
This function also ignores false search_term_indicators such
as 'like to', 'love to', 'want to'
e.g. 'I want to do a google search on Python'
false search term = to
false search term indicator = want
actual search term = on
query found = Python
Thus the function will correctly ignore 'do a google search on Python'
as a possible search query and will capture 'Python' as
the right query.
Other exmaples of use cases: 'Send an email to Alex'
search term = to
query found = Alex
Arguments: <string> spoken_text, <list> feature_patterns,
<list> search_terms, <list> false_search_term_indicators
Return type: <string> spoken_text (now stripped down to
only the search query.)
'''
query_found = False
query = ""
# spoken_text from features is already in a tokenized form. If not, we
# tokenize the text here.
tokenized_text = spoken_text
if isinstance(tokenized_text, str):
tokenized_text = tokenize(spoken_text)
has_search_term = any(
search_term in tokenized_text for search_term in search_terms)
while not query_found and has_search_term and tokenized_text != []:
for search_term in search_terms:
if search_term in tokenized_text:
search_index = tokenized_text.index(search_term)
# ignore cases with "like to", "love to" "ready to"
if (tokenized_text[search_index-1] in
false_search_term_indicators):
phrase_after_false_term = tokenized_text[search_index+1:]
tokenized_text = phrase_after_false_term
break
# get everything after the search term
query = tokenized_text[search_index+1:]
query_found = True
break
# In case none of the search terms are included in spoken_text.
# This is just a fallback and is not expected to be used very often.
if not query_found:
tokenized_patterns = []
for pattern in patterns:
tokens = tokenize(pattern)
tokenized_patterns.extend(tokens)
query = [
word for word in tokenized_text if word not in tokenized_patterns
]
query = ' '.join(query)
# Need to remove whitespace before and after the wanted query.
# This if useful for doing database searches on the query.
query = query.strip()
return query
| 38.43038 | 78 | 0.641634 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,601 | 0.527339 |
5058a081df1dc931f3e41294a65ae1da77668e15 | 1,042 | py | Python | tests/core/test_machine.py | zionwu/rancher | f3e80f69e94d7da64e75545caadda61870148753 | [
"Apache-2.0"
] | null | null | null | tests/core/test_machine.py | zionwu/rancher | f3e80f69e94d7da64e75545caadda61870148753 | [
"Apache-2.0"
] | null | null | null | tests/core/test_machine.py | zionwu/rancher | f3e80f69e94d7da64e75545caadda61870148753 | [
"Apache-2.0"
] | 1 | 2021-07-13T05:49:57.000Z | 2021-07-13T05:49:57.000Z | from common import auth_check
def test_machine_fields(cclient):
fields = {
'useInternalIpAddress': 'cr',
'nodeTaints': 'r',
'nodeLabels': 'r',
'nodeAnnotations': 'r',
'namespaceId': 'cr',
'conditions': 'r',
'allocatable': 'r',
'capacity': 'r',
'hostname': 'r',
'info': 'r',
'ipAddress': 'r',
'limits': 'r',
'nodeName': 'r',
'requested': 'r',
'clusterId': 'cr',
'role': 'cr',
'requestedHostname': 'cr',
'volumesAttached': 'r',
'machineTemplateId': 'cr',
'volumesInUse': 'r',
'podCidr': 'r',
'name': 'cru',
'taints': 'ru',
'unschedulable': 'ru',
'providerId': 'r',
'sshUser': 'r',
}
for name, field in cclient.schema.types['machine'].resourceFields.items():
if name.endswith("Config"):
fields[name] = 'cr'
fields['customConfig'] = 'cru'
auth_check(cclient.schema, 'machine', 'crud', fields)
| 25.414634 | 78 | 0.487524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 455 | 0.43666 |
505a515d4a168bd3da730d0e4d81ce61082b150d | 1,764 | py | Python | tempest/lib/services/placement/base_placement_client.py | rishabh20111990/tempest | df15531cd4231000b0da016f5cd8641523ce984e | [
"Apache-2.0"
] | 254 | 2015-01-05T19:22:52.000Z | 2022-03-29T08:14:54.000Z | tempest/lib/services/placement/base_placement_client.py | rishabh20111990/tempest | df15531cd4231000b0da016f5cd8641523ce984e | [
"Apache-2.0"
] | 13 | 2015-03-02T15:53:04.000Z | 2022-02-16T02:28:14.000Z | tempest/lib/services/placement/base_placement_client.py | rishabh20111990/tempest | df15531cd4231000b0da016f5cd8641523ce984e | [
"Apache-2.0"
] | 367 | 2015-01-07T15:05:39.000Z | 2022-03-04T09:50:35.000Z | # Copyright (c) 2019 Ericsson
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.common import api_version_utils
from tempest.lib.common import rest_client
PLACEMENT_MICROVERSION = None
class BasePlacementClient(rest_client.RestClient):
api_microversion_header_name = 'OpenStack-API-Version'
version_header_value = 'placement %s'
def get_headers(self):
headers = super(BasePlacementClient, self).get_headers()
if PLACEMENT_MICROVERSION:
headers[self.api_microversion_header_name] = \
self.version_header_value % PLACEMENT_MICROVERSION
return headers
def request(self, method, url, extra_headers=False, headers=None,
body=None, chunked=False):
resp, resp_body = super(BasePlacementClient, self).request(
method, url, extra_headers, headers, body, chunked)
if (PLACEMENT_MICROVERSION and
PLACEMENT_MICROVERSION != api_version_utils.LATEST_MICROVERSION):
api_version_utils.assert_version_header_matches_request(
self.api_microversion_header_name,
self.version_header_value % PLACEMENT_MICROVERSION,
resp)
return resp, resp_body
| 40.090909 | 78 | 0.709751 | 1,033 | 0.585601 | 0 | 0 | 0 | 0 | 0 | 0 | 628 | 0.356009 |
505ba5b3fd5ef3def46cb59aa349536857d79a09 | 6,743 | py | Python | Telegram Bot/Controllo_intenti.py | LuzDeGea/FoodHelper | 2b4ad1b731f605c1aaf92863d8dc69ee9e9a1599 | [
"MIT"
] | null | null | null | Telegram Bot/Controllo_intenti.py | LuzDeGea/FoodHelper | 2b4ad1b731f605c1aaf92863d8dc69ee9e9a1599 | [
"MIT"
] | null | null | null | Telegram Bot/Controllo_intenti.py | LuzDeGea/FoodHelper | 2b4ad1b731f605c1aaf92863d8dc69ee9e9a1599 | [
"MIT"
] | null | null | null | from Dialogflow_Api import rispondimi
from collegamentoSito import inserisci_utente
from Nutrition import get_food, traduzione
import re
tipo_cibo = ["frutta", "carne", "verdure", "ortaggi", "primi_piatti", "legumi"]
"""
controllo_intent(query_result, utente)--> text_respose
prende il risultato della query e lo confronta con i possibili intenti,
se l'intento risulta essere "cibo", si andrà nella sezione di rilevazione cibo,
altrimenti si passa al controllo sulla modifica dei dati dell'utente.
in caso non ci sia una intento adatto oppure alcun intento, si riceverà
la risposta negativa.
"""
def controllo_intent(query_result, utente):
intent = query_result.intent.display_name
text = query_result.fulfillment_text
if intent == "Cibo":
return rilevazione_cibo(utente, query_result)
if utente:
if intent == "Saluto":
return text + " " + utente.get_nome()
if intent == "Fame":
return controllo_fame(utente, text)
if intent == "Modifica_nome":
return modifica_nome(utente, query_result)
elif intent == "Modifica_peso":
return modifica_peso(utente, query_result)
elif intent == "Modifica_altezza":
return modifica_altezza(utente, query_result)
elif intent == "Modifica_sesso":
return modifica_sesso(utente, query_result)
elif intent == "Modifica_data":
return modifica_data(utente, query_result)
elif intent == "Modifica_attività":
return modifica_attività(utente, query_result)
if text == "":
return "Al momento non sono in grado di risponderti."
return text
"""
controllo_fame(utente, text)--> text_respose
prende un utente e le sue patologie, ed eleabora la risposta
da dare all'utente in merito alla possibilità di mangiare oppure
no un determinato cibo.
"""
def controllo_fame(utente, text):
if utente.get_anemia_sideropenica():
return rispondimi("spuntino ferro").fulfillment_text
if utente.get_iper_tens():
return rispondimi("spuntino ipertensione").fulfillment_text
if utente.get_nefropatia():
return rispondimi("spuntino nefropatia").fulfillment_text
else:
return text
def modifica_nome(utente, result):
try:
nome = result.parameters.fields["given-name"].string_value
except KeyError:
return "Inserisci correttamente il tuo primo nome."
if nome == "":
return "Inserisci correttamente il tuo primo nome."
utente.set_nome(nome)
inserisci_utente(utente)
return result.fulfillment_text
def modifica_peso(utente, result):
try:
unit = result.parameters.fields["unit-weight"].struct_value.fields["unit"].string_value
amount = result.parameters.fields["unit-weight"].struct_value.fields["amount"].number_value
except KeyError:
return "Inserisci correttamente il peso, ad esempio '70kg'."
if not unit == "kg":
return "Ti preghiamo di inserire il peso in kg."
if 39 < amount < 201:
utente.set_peso(amount)
inserisci_utente(utente)
return result.fulfillment_text
else:
return "Inserisci il tuo peso corretto."
def modifica_altezza(utente, result):
try:
unit = result.parameters.fields["unit-length"].struct_value.fields["unit"].string_value
amount = result.parameters.fields["unit-length"].struct_value.fields["amount"].number_value
except KeyError:
return "Inserisci correttamente la tua altezza, ad esempio '180cm'."
if unit == "cm":
if 109 < amount < 231:
utente.set_altezza(amount)
inserisci_utente(utente)
return result.fulfillment_text
else:
return "Inserisci la tua altezza corretta."
elif unit == "m":
if 1.09 < amount < 2.31:
utente.set_altezza(amount)
inserisci_utente(utente)
return result.fulfillment_text
else:
return "Inserisci la tua altezza corretta."
else:
return "Inserisci la tua altezza in cm o in m."
def modifica_sesso(utente, result):
try:
sesso = result.parameters.fields["sesso"].string_value
except KeyError:
return "Inserisci correttamente il tuo sesso, ad esempio 'maschio'."
utente.set_sesso(sesso)
inserisci_utente(utente)
return result.fulfillment_text
def modifica_data(utente, result):
try:
data = result.parameters.fields["date"].string_value
except KeyError:
return "Inserire correttamente la data, ad esempio: '01/01/90'"
if data == "":
return "Inserire correttamente la data, ad esempio: '01/01/90'"
else:
data = re.split("T", data)[0]
utente.set_data(data)
inserisci_utente(utente)
return result.fulfillment_text + " " + str(utente.get_eta()) + " anni."
def modifica_attività(utente, result):
try:
attivita = result.parameters.fields["attivita"].string_value
except KeyError:
return "Inserisci correttamente l'attività."
if attivita == "":
return "Inserisci correttamente l'attività tra queste: 'Sedentaria', 'Leggera', 'Moderata', 'Attiva' " \
"o 'Molto attiva'."
else:
utente.set_attivita(attivita)
inserisci_utente(utente)
return result.fulfillment_text
"""
rilevazione_cibo(utente, result)--> respose
identifica se l'intento dell'utente è parlare di cibo, se è così restituisce il risultato,
altrimenti restituisce stringhe di risposta negative sulla scorretta rilevazione del cibo.
"""
def rilevazione_cibo(utente, result):
cibo = controllo_tipo_cibo(result)
if not cibo:
return "Spiacente, non abbiamo informazione relative a questo cibo."
food = get_food(cibo)
if not food:
food = get_food(traduzione(cibo))
if not food:
return "Il cibo non è stato riconosciuto correttamente."
if utente:
return str(food) + "\n\n" + utente.can_eat(food)
else:
return "Non ti sei ancora registrato, non posso darti consigli alimentari.\nPer registrarti utilizza il" \
" comando /new.\nInformazioni su: " + str(food)
def controllo_tipo_cibo(result):
food = ""
try:
food = result.parameters.fields["Cibo"].list_value.values[0].string_value
except IndexError:
print("Problema riconoscimento tipo di cibo!")
if food != "":
return food
for cibo in tipo_cibo:
try:
food = result.parameters.fields["Cibo"].list_value.values[0].struct_value.fields[cibo].string_value
except IndexError:
continue
if food != "":
return food
return False
| 34.757732 | 114 | 0.667655 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,275 | 0.336788 |
505bbd3722c72235b1e0bdbc9e5ed0fb5d0411eb | 780 | py | Python | solutions/problem_122.py | ksvr444/daily-coding-problem | 5d9f488f81c616847ee4e9e48974523ec2d598d7 | [
"MIT"
] | 1,921 | 2018-11-13T18:19:56.000Z | 2021-11-15T14:25:41.000Z | solutions/problem_122.py | MohitIndian/daily-coding-problem | 5d9f488f81c616847ee4e9e48974523ec2d598d7 | [
"MIT"
] | 2 | 2019-07-19T01:06:16.000Z | 2019-08-01T22:21:36.000Z | solutions/problem_122.py | MohitIndian/daily-coding-problem | 5d9f488f81c616847ee4e9e48974523ec2d598d7 | [
"MIT"
] | 1,066 | 2018-11-19T19:06:55.000Z | 2021-11-13T12:33:56.000Z | def get_max_coins_helper(matrix, crow, ccol, rows, cols):
cval = matrix[crow][ccol]
if crow == rows - 1 and ccol == cols - 1:
return cval
down, right = cval, cval
if crow < rows - 1:
down += get_max_coins_helper(
matrix, crow + 1, ccol, rows, cols)
if ccol < cols - 1:
right += get_max_coins_helper(
matrix, crow, ccol + 1, rows, cols)
return max(down, right)
def get_max_coins(matrix):
if matrix:
return get_max_coins_helper(
matrix, 0, 0, len(matrix), len(matrix[0]))
coins = [[0, 3, 1, 1],
[2, 0, 0, 4],
[1, 5, 3, 1]]
assert get_max_coins(coins) == 12
coins = [[0, 3, 1, 1],
[2, 8, 9, 4],
[1, 5, 3, 1]]
assert get_max_coins(coins) == 25
| 23.636364 | 57 | 0.534615 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
505c47079444405aa3ca837c5da8f814a93cab97 | 1,745 | py | Python | examples/GANs/3DGAN/eval.py | Tarkiyah/kaotlin | 97374f648a53f6532f2348ca3f9ace943c4e2a4c | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-11-18T05:22:15.000Z | 2020-02-12T15:23:14.000Z | examples/GANs/3DGAN/eval.py | AOE-khkhan/kaolin | ed132736421ee723d14d59eaeb0286a8916a159d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | examples/GANs/3DGAN/eval.py | AOE-khkhan/kaolin | ed132736421ee723d14d59eaeb0286a8916a159d | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-11-18T13:03:53.000Z | 2019-11-18T13:03:53.000Z | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import numpy as np
import os
import torch
from torch.autograd import Variable
import torch.optim as optim
from torch.utils.data import DataLoader
import sys
from tqdm import tqdm
from architectures import Generator
import kaolin as kal
"""
Commandline arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('-expid', type=str, default='GAN', help='Unique experiment identifier.')
parser.add_argument('--device', type=str, default='cuda', help='Device to use')
parser.add_argument('-batchsize', type=int, default=50, help='Batch size.')
args = parser.parse_args()
gen = Generator().to(args.device)
gen.load_state_dict(torch.load('log/{0}/gen.pth'.format(args.expid)))
gen.eval()
z = torch.normal(torch.zeros(args.batchsize, 200), torch.ones(args.batchsize, 200)*.33).to(args.device)
fake_voxels = gen(z)[:,0]
for i,model in enumerate(fake_voxels):
model = model[:-2,:-2,:-2]
model = kal.rep.voxel.max_connected(model, .5)
verts, faces = kal.conversion.voxel.to_mesh_quad(model)
mesh = kal.rep.QuadMesh.from_tensors( verts, faces)
mesh.laplacian_smoothing(iterations = 3)
mesh.show() | 32.314815 | 103 | 0.755874 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 739 | 0.423496 |
505c9ede825c6ed2e1eb8da363a465a3e5284503 | 26,526 | py | Python | camfi/annotator.py | J-Wall/camfi | feec4d5df5cdc0c4e1144ce8f85bbf6190c68be1 | [
"MIT"
] | null | null | null | camfi/annotator.py | J-Wall/camfi | feec4d5df5cdc0c4e1144ce8f85bbf6190c68be1 | [
"MIT"
] | 3 | 2021-07-01T05:41:57.000Z | 2021-11-03T01:58:15.000Z | camfi/annotator.py | J-Wall/camfi | feec4d5df5cdc0c4e1144ce8f85bbf6190c68be1 | [
"MIT"
] | 1 | 2021-09-22T13:54:54.000Z | 2021-09-22T13:54:54.000Z | """Defines procedures for training, and evaluation automatic camfi annotation models,
and for using them for making automatic annotations (inference). Depends on camfi.util,
camfi.datamodel.autoannotation, camfi.datamodel.geometry, camfi.datamode.via, as well
as ._torchutils and ._models."""
from datetime import datetime
import itertools
from math import pi
from pathlib import Path
from typing import Any, Callable, Optional, Union
from sys import stderr
import numpy as np
from pydantic import (
BaseModel,
DirectoryPath,
NonNegativeInt,
NonNegativeFloat,
PositiveFloat,
PositiveInt,
ValidationError,
validator,
)
from scipy import sparse
import torch
from torch.utils.data import DataLoader
from torchvision.models.detection.mask_rcnn import MaskRCNN
from tqdm import tqdm, trange
from camfi.datamodel.autoannotation import CamfiDataset, Prediction
from camfi.datamodel.geometry import (
BoundingBox,
CircleShapeAttributes,
PolylineShapeAttributes,
)
from camfi.datamodel.via import (
ViaFileAttributes,
ViaMetadata,
ViaProject,
ViaRegion,
ViaRegionAttributes,
)
from camfi.models import model_urls
from camfi.util import (
endpoint_truncate,
smallest_enclosing_circle,
weighted_intersection_over_minimum,
Field,
)
from ._torchutils import collate_fn, get_model_instance_segmentation, train_one_epoch
def load_annotation_model(model_path_or_url: Union[Path, str]) -> MaskRCNN:
"""Loads a camfi annotation model. Accepts any model key provided in
camfi.models, a Path object, or a URL str.
Parameters
----------
model_path_or_url : Union[Path, str]
Path to .pth file specifying model parameters, model name defined in
camfi.models.model_urls, or url to model to download from the internet.
Returns
-------
model : MaskRCNN
Instance segmentation model used for automatic annotation.
"""
print(f"Loading model: {model_path_or_url}", file=stderr)
model = get_model_instance_segmentation(2, pretrained=False)
if isinstance(model_path_or_url, Path):
state_dict = torch.load(model_path_or_url)
elif model_path_or_url in model_urls:
state_dict = torch.hub.load_state_dict_from_url(model_urls[model_path_or_url])
else:
state_dict = torch.hub.load_state_dict_from_url(model_path_or_url)
model.load_state_dict(state_dict)
return model
def copy_annotation_model(model: MaskRCNN) -> MaskRCNN:
"""Copies a camfi annotation model.
Parameters
----------
model : MaskRCNN
Model to copy.
Returns
-------
model_copy : MaskRCNN
Copy of model.
"""
model_copy = get_model_instance_segmentation(2, pretrained=False)
model_copy.load_state_dict(model.state_dict())
return model_copy
def train_model(
dataset: CamfiDataset,
load_pretrained_model: Optional[Union[Path, str]] = None,
device: Union[str, torch.device] = "cpu",
batch_size: int = 5,
num_workers: int = 2,
num_epochs: int = 10,
outdir: DirectoryPath = Path(),
model_name: Optional[str] = None,
save_intermediate: bool = False,
) -> Path:
"""Trains a camfi instance segmentation annotation model on specified dataset,
saving to trained model to outdir.
Parameters
----------
dataset : CamfiDataset
Dataset on which to train the model.
load_pretrained_model : Optional[Union[Path, str]]
Path or url to model parameters file. If set, will load the pretrained
parameters. By default, will start with a model pre-trained on the Microsoft
COCO dataset.
device : Union[str, torch.device]
E.g. "cpu" or "cuda". Training is typically much faster on a GPU. Use "cuda" for
Nvidia GPUs.
batch_size : int
Number of images to load at once.
num_workers : int
Number of worker processes for data loader to spawn.
num_epochs : int
Number of epochs to train.
outdir : DirectoryPath
Path to directory where to save model(s).
model_name : Optional[str]
Identifier to include in model save file. By default the current date in
YYYYmmdd format.
save_intermediate : bool
If True, model is saved after each epoch, not just after all epoch are complete.
This is recommended, especially if training on a service which could terminate
unpredicatbly (e.g. Google Colab).
Returns
-------
model_path : Path
Path to saved model.
"""
# Parameter setting
device = torch.device(device)
if model_name is None:
model_name = datetime.now().strftime("%Y%m%d")
# Initialise data_loader
data_loader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
collate_fn=collate_fn,
)
# Initialise model
if load_pretrained_model is not None:
model = load_annotation_model(load_pretrained_model)
else:
model = get_model_instance_segmentation(2)
model.to(device)
# Initialise optimiser and lr_scheduler
params = [p for p in model.parameters() if p.requires_grad]
optimiser = torch.optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimiser, step_size=3, gamma=0.1)
# Train the model
for epoch in range(num_epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(model, optimiser, data_loader, device, epoch, print_freq=10)
# update the learning rate
lr_scheduler.step()
if save_intermediate or epoch == num_epochs - 1:
save_path = outdir / f"{model_name}_{epoch}_model.pth"
torch.save(model.state_dict(), save_path)
print(f"Training complete. Model saved at {save_path}")
return save_path
class Annotator(BaseModel):
"""Provides methods for automatically annotating images of flying insects using a
pre-trained instance segmentation model.
Parameters
----------
dataset : CamfiDataset
Dataset to annotate.
model : Union[str, Path, MaskRCNN]
Either a path to state dict file which defines the segmentation model, or a url
pointing to a model to download, or one of the model names defined in
camfi.models.model_urls.
Alternatively, a MaskRCNN instance can be given directly.
device : Union[str, torch.device]
Specifies device to run inference on. E.g. set to "cuda" to use an Nvidia GPU.
backup_device : Optional[Union[str, torch.device]]
Specifies device to run inference on when a runtime error occurs while using
device. Probably only makes sense to set this to "cpu" if device="cuda". This
option enables the annotator to leverage a GPU with limited memory capacity
without crashing if a difficult image is encountered.
backup_model: Optional[MaskRCNN]
Defines the backup model. Will be automatically generated if backup_device is
set. Should not be set manually.
split_angle : PositiveFloat
Approximate maximum angle between polyline segments in degrees. Note that this
will immediately be converted to radians upon instantiation of Annotator.
poly_order : PositiveInt
Order of polynomial used for fitting motion blur paths.
endpoint_method : Callable[[np.ndarray, ...], tuple[NonNegativeInt, NonNegativeInt]]
Method to find endpoints of motion blurs. The first argument to this method
should be a cropped mask np.ndarray.
endpoint_extra_args : list[Any]
Extra arguments to pass to endpoint_method.
score_thresh : float
Score threshold between 0.0 and 1.0 for automatic annotations to be kept.
overlap_thresh : float
Minimum proportion of overlap (weighted intersection over minimum) between two
instance segmentation masks to infer that one of the masks should be discarded.
edge_thresh : NonNegativeInt
Minimum distance an annotation has to be from the edge of the image before it is
converted from a polyline annotation to a circle annotation.
"""
dataset: CamfiDataset
model: MaskRCNN = "release"
device: Union[str, torch.device] = "cpu"
backup_device: Optional[Union[str, torch.device]] = None
backup_model: Optional[MaskRCNN] = None
split_angle: PositiveFloat = 15.0
poly_order: PositiveInt = 2
endpoint_method: Callable[
..., tuple[NonNegativeInt, NonNegativeInt]
] = endpoint_truncate
endpoint_extra_args: list[Any] = [10]
score_thresh: float = 0.4
overlap_thresh: float = 0.4
edge_thresh: NonNegativeInt = 20
backup_model_used: int = 0
class Config:
arbitrary_types_allowed = True
@validator("model", pre=True, always=True)
def get_model(cls, v):
if isinstance(v, MaskRCNN):
return v
else:
return load_annotation_model(v)
@validator("device", always=True)
def put_model_on_device_and_set_to_eval(cls, v, values):
print(f"Putting model on device: {v}", file=stderr)
v = torch.device(v)
values["model"].to(v)
values["model"].eval()
return v
@validator("backup_model", pre=True, always=True)
def copy_model_to_backup_device(cls, v, values):
assert v is None, "Should not set 'backup_model'. It will be set automatically"
if "backup_device" in values and values["backup_device"] is not None:
v = copy_annotation_model(values["model"])
v.to(values["backup_device"])
v.eval()
return v
@validator("split_angle", always=True)
def convert_split_angle_to_radians(cls, v):
return v * pi / 180.0
def get_prediction(self, img_idx: NonNegativeInt) -> Prediction:
"""Run predicion on a single image. First tries to use the model on self.device,
and falls back to the model on self.backup_device if a RuntimeError is caught
(if set).
Parameters
----------
img_idx: int
Index of image in via project.
Returns
-------
prediction: Prediction
Output of model prediction.
"""
try:
img, _ = self.dataset[img_idx]
except (OSError, RuntimeError) as e:
print(
f"Error loading {self.dataset.metadata(img_idx).filename}. {e!r}. Skipping.",
file=stderr,
)
return Prediction.empty()
with torch.no_grad():
try:
prediction = self.model([img.to(self.device)])[0]
except RuntimeError:
if self.backup_model:
prediction = self.backup_model([img.to(self.backup_device)])[0]
self.backup_model_used += 1
else:
raise
del img
return Prediction.from_tensor_dict(prediction)
def filter_annotations(self, prediction: Prediction) -> Prediction:
"""Applies self.score_thresh and self.overlap_thresh to filter out poor quality
annotations.
Parameters
----------
prediction : Prediction
Output of model prediction.
Returns
-------
filtered_prediction : Prediction
Filtered prediction.
"""
# Remove predictions with below-threshold score
prediction = prediction.filter_by_score(self.score_thresh)
n_predictions = len(prediction)
if n_predictions == 0:
return prediction
# Calculate mask overlaps for all pairs of predicted instances
mask_overlaps = np.zeros((n_predictions, n_predictions), dtype="f4")
for i, j in itertools.combinations(range(n_predictions), 2):
if prediction.boxes[i].overlaps(prediction.boxes[j]):
mask_overlaps[i, j] = weighted_intersection_over_minimum(
prediction.masks[i], prediction.masks[j]
)
mask_overlaps[j, i] = mask_overlaps[i, j]
# Remove worst overlapping instances until there are no above-threshold overlaps
keep = set(range(n_predictions))
overlap_mask = mask_overlaps.max(axis=1) >= self.overlap_thresh
while np.any(overlap_mask):
# Figure out which overlapping annotation has the worst score
overlap_annotations = np.where(overlap_mask)[0]
to_discard = overlap_annotations[
np.argmin(np.array(prediction.scores)[overlap_annotations])
]
# Remove the annotation
keep.remove(to_discard)
mask_overlaps[to_discard, :] = 0.0
mask_overlaps[:, to_discard] = 0.0
overlap_mask = mask_overlaps.max(axis=1) >= self.overlap_thresh
return prediction.get_subset_from_index(list(keep))
def fit_poly(
self,
box: BoundingBox,
mask: torch.Tensor,
) -> Union[PolylineShapeAttributes, CircleShapeAttributes, None]:
"""Uses polynomial regression to fit a polyline annotation to the provided
segmentation mask.
Parameters
----------
box : BoundingBox
Fully contains the object to be annotated.
mask : tensor or array
Segmentation mask of instance with shape (image_width, image_height).
Returns
-------
shape_attributes : Union[PolylineShapeAttributes, CircleShapeAttributes, None]
Geometry of automatic annotation.
"""
portrait = box.is_portrait()
crop_mask = box.crop_image(mask).cpu().numpy().reshape(box.shape)
y, x = np.where(crop_mask > 0.0)
weights = np.array(crop_mask[y, x]).flatten()
# Set longest axis as independent variable and fit polynomial
ind = (x, y)[portrait]
dep = (y, x)[portrait]
poly_fit = np.polynomial.Polynomial.fit(ind, dep, self.poly_order, w=weights)
# Find endpoints
ind_vals = np.arange(crop_mask.shape[not portrait])
dep_vals = poly_fit(ind_vals)
val_mask = np.logical_and(dep_vals < crop_mask.shape[portrait], dep_vals >= 0)
y_vals = (dep_vals, ind_vals)[portrait][val_mask]
x_vals = (ind_vals, dep_vals)[portrait][val_mask]
fit_mask_vals = crop_mask[y_vals.astype("i4"), x_vals.astype("i4")]
endpoints = ind_vals[
list(self.endpoint_method(fit_mask_vals, *self.endpoint_extra_args))
]
# Approximate polynomial segment with polyline
end_gradients = poly_fit.deriv()(endpoints)
end_angles = np.arctan(end_gradients)
angle_diff = abs(end_angles[1] - end_angles[0])
all_points_ind, all_points_dep = poly_fit.linspace(
n=int(np.ceil(angle_diff / self.split_angle) + 2), domain=endpoints
)
all_points_x = list((all_points_ind, all_points_dep)[portrait] + box.x0)
all_points_y = list((all_points_dep, all_points_ind)[portrait] + box.y0)
shape_attributes: Union[PolylineShapeAttributes, CircleShapeAttributes, None]
try:
shape_attributes = PolylineShapeAttributes(
all_points_x=all_points_x, all_points_y=all_points_y
)
except ValidationError:
try:
cx, cy, r = smallest_enclosing_circle(zip(all_points_x, all_points_y))
shape_attributes = CircleShapeAttributes(cx=cx, cy=cy, r=r)
except ValidationError:
shape_attributes = None
return shape_attributes
def convert_to_circle(
self,
polyline: PolylineShapeAttributes,
img_shape: tuple[PositiveInt, PositiveInt],
) -> Union[PolylineShapeAttributes, CircleShapeAttributes]:
"""Checks if a polyline annotation is close to the edge of an image, and if so,
converts it to a circle annotation by computing the smallest enclosing circle of
all points in the polyline.
Parameters
----------
polyline : PolylineShapeAttributes
Shape to convert if too close to edge.
img_shape: tuple[int, int]
Height and width of image.
Returns
-------
shape_attributes : Union[PolylineShapeAttributes, CircleShapeAttributes]
Geometry of annotation after (possible) conversion. If polyline does not
go too close to the edge of the image, then polyline is returned unchanged.
Else, a circle annotation is returned.
"""
polyline_accepted_region = BoundingBox.from_shape(
img_shape, border=self.edge_thresh
)
if polyline.in_box(polyline_accepted_region):
return polyline
return polyline.as_circle()
def annotate_img(self, img_idx: int) -> list[ViaRegion]:
"""Calls self.get_prediction, self.filter_annotations, and self.fit_poly to
produce annotations for an image specified with img_idx.
Parameters
----------
img_idx: int
Index of image in via project.
Returns
-------
regions : list[ViaRegion]
list of annotations for image.
"""
prediction = self.get_prediction(img_idx)
prediction = self.filter_annotations(prediction)
regions = []
for i in range(len(prediction)):
box = prediction.boxes[i]
mask = prediction.masks[i]
score = prediction.scores[i]
shape_attributes = self.fit_poly(box, mask)
if shape_attributes is None:
continue
if shape_attributes.name == "polyline":
assert isinstance(shape_attributes, PolylineShapeAttributes)
shape_attributes = self.convert_to_circle(
shape_attributes, (mask.shape[-2], mask.shape[-1])
)
region_attributes = ViaRegionAttributes(score=score)
regions.append(
ViaRegion(
region_attributes=region_attributes,
shape_attributes=shape_attributes,
)
)
return regions
def annotate(self, disable_progress_bar: Optional[bool] = True) -> ViaProject:
"""Calls self.annotate_img on all images and returns a ViaProject instance.
Copies the `via_attributes` and `via_settings` fields from
`self.dataset.via_project`, and just replaces the `via_img_metadata` field.
Parameters
----------
disable_progress_bar : Optional[bool]
If True (default), progress bar is disabled.
If set to None, disable on non-TTY.
Returns
-------
project : ViaProject
With automatic annotations made.
"""
via_img_metadata: dict[str, ViaMetadata] = {}
postfix = {"tot_annotations": 0}
if self.backup_device:
postfix["backup_device_used"] = self.backup_model_used
pb = trange(
len(self.dataset),
disable=disable_progress_bar,
desc="Annotating images",
unit="img",
dynamic_ncols=True,
ascii=True,
postfix=postfix,
)
for img_idx in pb:
img_key = self.dataset.keys[img_idx]
regions = self.annotate_img(img_idx)
in_metadata = self.dataset.metadata(img_idx)
out_metadata = ViaMetadata.construct(
file_attributes=in_metadata.file_attributes.copy(),
filename=in_metadata.filename,
regions=regions,
size=in_metadata.size,
)
via_img_metadata[img_key] = out_metadata
postfix["tot_annotations"] += len(regions)
if self.backup_device:
postfix["backup_device_used"] = self.backup_model_used
pb.set_postfix(postfix, refresh=False)
print(f"Annotation complete.", file=stderr)
return ViaProject.construct(
via_attributes=self.dataset.via_project.via_attributes,
via_img_metadata=via_img_metadata,
via_settings=self.dataset.via_project.via_settings,
)
class AnnotationValidationResult(BaseModel):
"""Contains various metrics for assessing the quality of a set of automatically
obtained annotations of flying insects.
Parameters
----------
ious : list[tuple[NonNegativeFloat, NonNegativeFloat]]
list of (iou, score) pairs.
iou is the Intersection over Union of the bounding boxes of true positives
to their matched ground truth annotation. All matched annotations are
included.
polyline_hausdorff_distances : list[tuple[NonNegativeFloat, NonNegativeFloat]]
list of (h_dist, score) pairs.
h_dist is the hausdorff distance of a true positive polyline annotation,
where the annotation is matched to a polyline ground truth annotation. Only
polyline annotations which matched to a polyline ground truth annotation are
included.
length_differences : list[tuple[float, NonNegativeFloat]]
list of (l_diff, score) pairs.
l_diff is calculated as the length of a true positive polyline annotation
minus the length of it's matched ground truth annotation. Only polyline
annotations which matched to a polyline ground truth annotation are
included.
true_positives : list[NonNegativeFloat]
list of scores.
false_positives : list[NonNegativeFloat]
list of scores. Score is the prediction score of the automatic annotation.
false_negatives : int
Number of false negative annotations.
"""
ious: list[tuple[NonNegativeFloat, NonNegativeFloat]] = []
polyline_hausdorff_distances: list[tuple[NonNegativeFloat, NonNegativeFloat]] = []
length_differences: list[tuple[float, NonNegativeFloat]] = []
true_positives: list[NonNegativeFloat] = []
false_positives: list[NonNegativeFloat] = []
false_negatives: NonNegativeInt = 0
def validate_annotations(
auto_annotations: ViaProject,
ground_truth: ViaProject,
iou_thresh: float = 0.5,
subset_functions: Optional[dict[str, Callable[[ViaMetadata], bool]]] = None,
disable_progress_bar: Optional[bool] = True,
) -> list[AnnotationValidationResult]:
"""Compares automatic annotations against a ground-truth annotations for validation
puposes. Validation data is stored in an AnnotationValidationResult object.
Parameters
----------
auto_annotations : ViaProject
Automatically obtained annotations to assess.
ground_truth : ViaProject
Manually created ground-truth annotations.
iou_thresh : float
Threshold of intersection-over-union of bounding boxes to be considered a
match. Typically, this is 0.5.
subset_functions : Optional[dict[str, Callable[[ViaMetadata], bool]]]
Mapping from subset name to subset function. If set, validation will be repeated
multiple times with different subsets, once for each element.
disable_progress_bar : Optional[bool]
If True (default), progress bar is disabled.
If set to None, disable on non-TTY.
Returns
-------
validation_results : list[AnnotationValidationResult]
list containing instances of AnnotationValidationResult. If subset_functions is
set, then validation_results will have len(subset_functions) elements. By
default it will just contain one element.
"""
if subset_functions is None:
subset_functions = {"all": lambda x: True}
results: list[AnnotationValidationResult] = []
for name, subset_function in subset_functions.items():
gt_annotations = ground_truth.filtered_copy(subset_function)
result = AnnotationValidationResult()
for img_key in tqdm(
gt_annotations.via_img_metadata.keys()
& auto_annotations.via_img_metadata.keys(),
disable=disable_progress_bar,
desc=f"Validating {name} annotations",
unit="img",
dynamic_ncols=True,
ascii=True,
):
gt_metadata = gt_annotations.via_img_metadata[img_key]
metadata = auto_annotations.via_img_metadata[img_key]
ious = sparse.dok_matrix(
(len(metadata.regions), len(gt_metadata.regions)), dtype="f8"
)
for i, j in itertools.product(
range(len(metadata.regions)), range(len(gt_metadata.regions))
):
iou = metadata.regions[i].shape_attributes.intersection_over_union(
gt_metadata.regions[j].shape_attributes
)
if iou >= iou_thresh:
ious[i, j] = iou
ious = ious.tocsr()
matches = sparse.csgraph.maximum_bipartite_matching(ious, "column")
result.false_negatives += len(gt_metadata.regions) - np.count_nonzero(
matches >= 0
)
for i, match in enumerate(matches):
score = metadata.regions[i].region_attributes.score
if score is None:
raise ValueError(
"Invalid automatically obtained annotation. "
"Ensure that auto_annotations were obtained automatically "
f"(region {i} of {img_key} missing 'score' region_attribute)."
)
elif match >= 0:
result.true_positives.append(score)
result.ious.append((ious[i, match], score))
shape = metadata.regions[i].shape_attributes
gt_shape = gt_metadata.regions[match].shape_attributes
if shape.name == gt_shape.name == "polyline":
assert isinstance(shape, PolylineShapeAttributes)
h_dist = shape.hausdorff_distance(gt_shape)
result.polyline_hausdorff_distances.append((h_dist, score))
l_diff = shape.length() - gt_shape.length()
result.length_differences.append((l_diff, score))
else:
result.false_positives.append(score)
results.append(result)
return results
| 38.221902 | 93 | 0.64695 | 16,317 | 0.615132 | 0 | 0 | 964 | 0.036342 | 0 | 0 | 11,523 | 0.434404 |
50609a1ab04ea45a8835acdab90437bf93e94353 | 1,644 | py | Python | SLpackage/private/pacbio/pythonpkgs/pbreports/lib/python2.7/site-packages/pbreports/report/laagc_input.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | 5 | 2022-02-20T07:10:02.000Z | 2022-03-18T17:47:53.000Z | SLpackage/private/pacbio/pythonpkgs/pbreports/lib/python2.7/site-packages/pbreports/report/laagc_input.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | null | null | null | SLpackage/private/pacbio/pythonpkgs/pbreports/lib/python2.7/site-packages/pbreports/report/laagc_input.py | fanglab/6mASCOPE | 3f1fdcb7693ff152f17623ce549526ec272698b1 | [
"BSD-3-Clause"
] | null | null | null | """
Wrapper for running amplicon_analysis_inputs report for LAAgc (with an extra
input file).
"""
import logging
import sys
from pbcommand.models import FileTypes
from pbcommand.cli import pbparser_runner
from pbcommand.utils import setup_log
from pbreports.report.amplicon_analysis_input import _get_parser, make_report
log = logging.getLogger(__name__)
class Constants(object):
TOOL_ID = "pbreports.tasks.laagc_input"
DRIVER_EXE = "python -m pbreports.report.laagc_input --resolved-tool-contract"
def _args_runner(args):
return make_report(args.report_csv, args.report_json, args.locus_csv, args.barcoded_subreads)
def _rtc_runner(rtc):
return make_report(rtc.task.input_files[0], rtc.task.output_files[0],
rtc.task.input_files[1], rtc.task.input_files[2])
def _get_laagc_parser():
p = _get_parser(Constants.TOOL_ID, Constants.DRIVER_EXE)
p.add_input_file_type(
FileTypes.CSV,
file_id="locus_csv",
name="Mapped Subreads CSV",
description="CSV of mapped subreads per sample per locus")
p.add_input_file_type(
FileTypes.DS_SUBREADS,
file_id="barcoded_subreads",
name="Barcoded Subreads",
description="Barcoded SubreadSet XML")
return p
def main(argv=sys.argv):
return pbparser_runner(argv[1:],
_get_laagc_parser(),
_args_runner,
_rtc_runner,
log,
setup_log)
# for 'python -m pbreports.report.amplicon_analysis_input ...'
if __name__ == "__main__":
sys.exit(main())
| 27.4 | 97 | 0.670316 | 151 | 0.091849 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.245134 |
5060bae394ef43f65427b7889ebd8a488a199475 | 1,014 | py | Python | toughradius/common/event_common.py | geosson/GSRadius | 5870e3d055e8366f98b8e65220a1520b5da22f6d | [
"Apache-2.0"
] | 1 | 2019-05-12T15:06:58.000Z | 2019-05-12T15:06:58.000Z | toughradius/common/event_common.py | geosson/GSRadius | 5870e3d055e8366f98b8e65220a1520b5da22f6d | [
"Apache-2.0"
] | null | null | null | toughradius/common/event_common.py | geosson/GSRadius | 5870e3d055e8366f98b8e65220a1520b5da22f6d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding:utf-8
from toughlib import dispatch
"""触发邮件,短信发送公共方法"""
def trigger_notify(obj, user_info, **kwargs):
if int(obj.get_param_value("webhook_notify_enable", 0)) > 0 and kwargs.get('webhook_notify'):
dispatch.pub(kwargs['webhook_notify'], user_info, async=False)
if int(obj.get_param_value("mail_notify_enable", 0)) > 0:
if obj.get_param_value("mail_mode", 'smtp') == 'toughcloud' and \
obj.get_param_value("toughcloud_license", None) and kwargs.get('toughcloud_mail'):
dispatch.pub(kwargs['toughcloud_mail'], user_info, async=False)
if obj.get_param_value("mail_mode", 'smtp') == 'smtp' and kwargs.get('smtp_mail'):
dispatch.pub(kwargs['smtp_mail'], user_info, async=False)
if int(obj.get_param_value("sms_notify_enable", 0)) > 0 and \
obj.get_param_value("toughcloud_license", None) and kwargs.get('toughcloud_sms'):
dispatch.pub(kwargs['toughcloud_sms'], user_info, async=False)
| 39 | 98 | 0.68146 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 352 | 0.339114 |
5060ee963497faf44238f2e6162528f64a4e0e16 | 2,017 | py | Python | sahara/utils/openstack/nova.py | citrix-openstack-build/sahara | 17e4f4dac5bb321ef4d5a55664cca0857127d7e6 | [
"Apache-2.0"
] | 1 | 2022-02-25T19:14:33.000Z | 2022-02-25T19:14:33.000Z | sahara/utils/openstack/nova.py | citrix-openstack-build/sahara | 17e4f4dac5bb321ef4d5a55664cca0857127d7e6 | [
"Apache-2.0"
] | null | null | null | sahara/utils/openstack/nova.py | citrix-openstack-build/sahara | 17e4f4dac5bb321ef4d5a55664cca0857127d7e6 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from novaclient import exceptions as nova_ex
from novaclient.v1_1 import client as nova_client
from sahara import context
import sahara.utils.openstack.base as base
from sahara.utils.openstack import images
def client():
ctx = context.current()
auth_url = base.retrieve_auth_url()
compute_url = base.url_for(ctx.service_catalog, 'compute')
nova = nova_client.Client(username=ctx.username,
api_key=None,
project_id=ctx.tenant_id,
auth_url=auth_url)
nova.client.auth_token = ctx.token
nova.client.management_url = compute_url
nova.images = images.SaharaImageManager(nova)
return nova
def get_flavors():
return [flavor.name for flavor in client().flavors.list()]
def get_flavor(**kwargs):
return client().flavors.find(**kwargs)
def get_images():
return [image.id for image in client().images.list()]
def get_limits():
limits = client().limits.get().absolute
return dict((l.name, l.value) for l in limits)
def get_user_keypair(cluster):
try:
return client().keypairs.get(cluster.user_keypair_id)
except nova_ex.NotFound:
return None
def get_instance_info(instance):
return client().servers.get(instance.instance_id)
def get_network(**kwargs):
try:
return client().networks.find(**kwargs)
except nova_ex.NotFound:
return None
| 27.630137 | 69 | 0.699554 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 577 | 0.286068 |
50632d26f7af85cd942cc29b1b5c01dae2c5c47e | 2,978 | py | Python | swim_backend/events.py | eurocontrol-swim/swim-backend | bdeba82d43b833f2fc9ef81e806d8ce0aafdb5b9 | [
"BSD-3-Clause"
] | null | null | null | swim_backend/events.py | eurocontrol-swim/swim-backend | bdeba82d43b833f2fc9ef81e806d8ce0aafdb5b9 | [
"BSD-3-Clause"
] | null | null | null | swim_backend/events.py | eurocontrol-swim/swim-backend | bdeba82d43b833f2fc9ef81e806d8ce0aafdb5b9 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright 2019 EUROCONTROL
==========================================
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==========================================
Editorial note: this license is an instance of the BSD license template as provided by the Open Source Initiative:
http://opensource.org/licenses/BSD-3-Clause
Details on EUROCONTROL: http://www.eurocontrol.int
"""
import abc
__author__ = "EUROCONTROL (SWIM)"
class Event(list):
"""
Simplistic implementation of event handling.
A list of callable objects. Calling an instance of this will cause a
call to each item in the list in ascending order by index.
"""
_type = 'Generic'
def __call__(self, *args, **kwargs):
for handler in self:
handler(*args, **kwargs)
def __repr__(self):
return f"{self._type} Event({list.__repr__(self)})"
class EventSafe(list):
def __call__(self, *args, **kwargs):
handlers = [handler_class(*args, **kwargs) for handler_class in self]
processed_handlers = []
for handler in handlers:
try:
handler.do()
processed_handlers.append(handler)
except:
handler.undo()
processed_handlers.reverse()
for processed_handler in processed_handlers:
processed_handler.undo()
raise
class EventHandler(abc.ABC):
@abc.abstractmethod
def do(self, *args, **kwargs):
pass
@abc.abstractmethod
def undo(self, *args, **kwargs):
pass
| 36.765432 | 121 | 0.693083 | 1,142 | 0.383479 | 0 | 0 | 136 | 0.045668 | 0 | 0 | 2,050 | 0.688381 |
ac8cc54ceb3e6f1445ff8cd5bb3540d66783ed73 | 22,063 | py | Python | src/ama/validator.py | sffjunkie/ama | e05e98fb771ae783901516f882990fcc54ca0026 | [
"Apache-2.0"
] | 1 | 2019-03-21T09:50:20.000Z | 2019-03-21T09:50:20.000Z | src/ama/validator.py | sffjunkie/ama | e05e98fb771ae783901516f882990fcc54ca0026 | [
"Apache-2.0"
] | null | null | null | src/ama/validator.py | sffjunkie/ama | e05e98fb771ae783901516f882990fcc54ca0026 | [
"Apache-2.0"
] | null | null | null | # Copyright 2013-2014, Simon Kennedy, sffjunkie+code@gmail.com
# pylint: disable=unused-argument
"""Provides access to a registry of validation functions.
Functions are returned via the :func:`get_validator` function and can be refined
by passing a specification which alters what passes the validation.
All validators throw :class:`TypeError` if the value's type cannot be validated
and :class:`ValueError` if the value fails validation.
======================== ======================================================
Validator Name Tests that the value...
======================== ======================================================
``nonempty`` is not None or an empty string
``constant`` always returns the same value
``str`` can be converted to a string
``int`` can be converted to an integer value
``float`` can be converted to a floating point value
``bool`` can be converted to a boolean value
``yesno`` matches one of ``yes``, ``y``, ``no``, ``n`` with any
case plus 1, 0, True and False
``re`` matches the regular expression.
``path`` is a valid path
``date`` is a valid date
``time`` is a valid time
``color`` is a valid RGB or RGB hex color
``email`` is a valid email address
======================== ======================================================
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sys
import csv
import gettext
import glob
import shutil
import string
import tempfile
from io import StringIO
from datetime import datetime, date, time
from functools import partial
try:
from pkg_resources import load_entry_point
except:
load_entry_point = lambda x: None
import re
try:
import pyisemail
PYISEMAIL = True
except ImportError:
PYISEMAIL = False
if sys.version_info < (3, 0):
gettext.install('ama', unicode=True) #pylint: disable=unexpected-keyword-arg
else:
gettext.install('ama')
DEFAULT_TIME_FORMAT = '%H:%M'
DEFAULT_DATE_FORMAT = '%Y-%m-%d'
if sys.version_info >= (3, 0):
str_type = str
csv.register_dialect('ama', delimiter='|')
else:
str_type = basestring
csv.register_dialect('ama', delimiter=b'|')
def str_to_elems(string):
if sys.version_info < (3, 0) and isinstance(string, basestring):
string = string.decode('UTF-8')
ds = StringIO(string)
reader = csv.reader(ds, dialect=csv.get_dialect('ama'))
for row in reader:
for elem in row:
yield elem
def str_to_kwargs(string, allowed=None):
kwargs = {}
for elem in str_to_elems(string):
option, value = elem.split('=')
if not allowed or option in allowed:
kwargs[option] = value
return kwargs
def NonEmpty(*args, **kwargs):
"""Create a validator that checks that any value is provided"""
msg = kwargs.get('message', _('Please enter anything.'))
def validate(value):
if not value:
raise ValueError(msg)
return value
return validate
def Constant(*args, **kwargs):
"""Create a validator that always return the same value."""
def validate(value):
return args[0]
return validate
def OneOf(*args, **kwargs):
"""Create a validator that checks that the value is one of the those provided."""
def validate(value):
msg = _('Value must be one of %s')
if value in args:
return value
else:
raise ValueError(msg % ', '.join(args))
return validate
def Str(*args, **kwargs):
"""Create a validator that checks that the value is a valid string
according to the `spec`
:param spec: The specification to check the string against.
Can be either
None
Anything that can be converted to a string passes
The string ``nonempty``
a string of length greater than 1 passes
A string of `argument=value` pairs separated by commas.
Checks the string matches based on the arguments specified
The following arguments can be specified.
| ``min`` - The minimum number of characters
| ``max`` - The maximum number of characters
e.g. "min=3,max=6" means the string must be between 3 and 6
characters long.
:type spec: str
"""
def validate(value, **kwargs):
if value is None or value=='':
return ''
try:
value = str(value)
except:
raise ValueError(_('Unable to convert value to string'))
length = len(value)
if 'min' in kwargs:
min_ = int(kwargs['min'])
if length < min_:
raise ValueError(_('String must be at least %d characters') % min_)
if 'max' in kwargs:
max_ = int(kwargs['max'])
if length > max_:
raise ValueError(_('String must be a maximum of %d characters') % max_)
return value
if args and args[0] == 'nonempty':
return NonEmpty()
else:
return partial(validate, **kwargs)
def Int(*args, **kwargs):
"""Create a validator that checks that the value is a valid integer
according to the `spec`
:param spec: The specification to check the integer against.
Can be either
None
Anything that is an integer passes. e.g. 1 and "1" are
valid integers but 1.2, "1.2" or "chas" are not.
A string of `argument=value` pairs separated by commas.
Alters how the integer is validated. The following arguments
can be specified.
| ``min`` - The minimum value
| ``max`` - The maximum value
e.g. "min=3,max=6" means the value must be between 3 and 6.
:type spec: str
"""
def validate(value, **kwargs):
msg = _('Invalid integer value')
if isinstance(value, float):
raise TypeError(msg)
if isinstance(value, str_type):
decimal = kwargs.get('decimal', '.')
if decimal in value:
raise ValueError(msg)
try:
value = int(value)
except:
raise ValueError(msg)
if 'min' in kwargs:
min_ = int(kwargs['min'])
if value < min_:
raise ValueError('Integer value less than minimum %d' % min_)
if 'max' in kwargs:
max_ = int(kwargs['max'])
if value > max_:
raise ValueError('Integer value greater than maximum %d' % max_)
return value
return partial(validate, **kwargs)
def Float(*args, **kwargs):
"""Create a validator that checks that the value is a valid float
according to the `spec`
:param spec: The specification to check the float against.
Can be either
None
Anything that is a float passes. e.g. 1.2 and "1.2" are
valid floats but 1, "1" or "dave" are not.
A string of `argument=value` pairs separated by commas.
Alters how the float is validated. The following arguments
can be specified.
| ``min`` - The minimum value
| ``max`` - The maximum value
| ``decimal`` - The character to consider as the decimal separator
| ``nocoerce`` - Disable coercing int to float
e.g. "min=3.1,max=6.0" means the value must be between
3.1 and 6.0; "decimal=\\\\," means that "33,234" is a valid float.
:type spec: str
"""
def validate(value, **kwargs):
msg = _('Invalid floating point value')
if 'nocoerce' in kwargs and isinstance(value, int):
raise TypeError(msg)
if isinstance(value, str_type):
decimal = kwargs.get('decimal', '.')
if 'nocoerce' in kwargs and decimal not in value:
raise ValueError(msg)
elif decimal != '.':
value = value.replace(decimal, '.')
try:
value = float(value)
except:
raise ValueError(msg)
if 'min' in kwargs:
min_ = float(kwargs['min'])
if value < min_:
raise ValueError('Float value less than minimum %f' % min_)
if 'max' in kwargs:
max_ = float(kwargs['max'])
if value > max_:
raise ValueError('Float value greater than maximum %f' % max_)
return value
return partial(validate, **kwargs)
def Number(*args, **kwargs):
"""Create a validator that checks that the value is a valid number
according to the `spec`
:param spec: The specification to check the integer against.
Can be either
None
Anything that is a number passes.
A string of `argument=value` pairs separated by commas.
Check s the integer matches based on the arguments specified
The following arguments can be specified.
| ``min`` - The minimum value
| ``max`` - The maximum value
| ``decimal`` - The character to consider as the decimal separator
e.g. "min=3,max=6" means the value must be between 3 and 6.
:type spec: str
"""
def validate(value, **kwargs):
msg = _('Invalid number')
if isinstance(value, str_type):
decimal = kwargs.get('decimal', '.')
if decimal != '.':
value = value.replace(decimal, '.')
try:
value = float(value)
except ValueError:
raise ValueError(msg)
if 'min' in kwargs:
min_ = float(kwargs['min'])
if value < min_:
raise ValueError('Float value less than minimum %d' % min_)
if 'max' in kwargs:
max_ = float(kwargs['max'])
if value > max_:
raise ValueError('Float value greater than maximum %d' % max_)
return value
return partial(validate, **kwargs)
def Bool(*args, **kwargs):
"""Create a validator that checks that the value is a valid bool."""
def validate(value):
msg = _('Invalid boolean value')
true_values = ['true', '1', 'yes', 'y']
false_values = ['false', '0', 'no', 'n']
if isinstance(value, bool):
return value
elif isinstance(value, int):
return bool(value)
elif str(value).lower() in true_values:
return True
elif str(value).lower() in false_values:
return False
else:
raise ValueError(msg)
return validate
def Regex(*args, **kwargs):
"""Create a validator that checks that the value matches a regular
expression.
"""
# if no regex provided just check that the value can be converted to a string
if len(args) == 0:
return lambda value: str(value)
def validate(value, **kwargs):
msg = _('Please enter a string which matches the regex')
regex = kwargs.pop('regex', None)
if regex:
m = re.match(regex, value)
if m is not None:
return value
else:
raise ValueError('%s %s' % (msg, regex))
else:
return value
kwargs['regex'] = args[0]
return partial(validate, **kwargs)
def Path(*args, **kwargs):
"""Create a validator that checks that the value is a valid path.
The meaning of valid is determined by the `spec` argument
:param spec: Determines what is a valid path.
``existing``
is a path that exists (the default)
``empty``
is a path that is empty
``nonempty``
is a path that is not empty
``new``
is a path that does not exist and is a valid name for a path
:samp:`{pathspec}`
is a valid path name that contains files that conform to `pathspec`
`pathspec` is of the form :samp:`[+-]{glob}` where the
leading ``+`` indicates that the path must include a
file that matches the glob and ``-`` indicates that it
must not include files that match the glob. Multiple
pathspecs can be specified separated by commas.
:type spec: str
"""
def validate_path_existing(value):
"""Validate that path exists"""
msg1 = _('Path does not exist.')
is_dir = os.path.exists(value) and os.path.isdir(value)
if not is_dir:
raise ValueError(msg1)
return value
def validate_path_new(value):
"""Validate that the path could be created."""
msg1 = _('Path already exists.')
msg2 = _('Invalid path name.')
if value == '':
return ''
if os.path.isdir(value):
raise ValueError(msg1)
if os.path.isabs(value):
dummy, p = os.path.splitdrive(value)
else:
p = value
if p[0] == '\\' or p[0] == '/':
p = p[1:]
try:
tf = tempfile.mkdtemp(prefix=p)
shutil.rmtree(tf)
return value
except OSError:
raise ValueError(msg2)
def validate_path_empty(value):
msg1 = _('Path does not exist.')
msg2 = _('Path should be empty.')
is_dir = os.path.exists(value) and os.path.isdir(value)
if not is_dir:
raise ValueError(msg1)
if len(os.listdir(value)) != 0:
raise ValueError(msg2)
return value
def validate_path_nonempty(value):
msg1 = _('Path does not exist.')
msg2 = _('Path should contain files.')
is_dir = os.path.exists(value) and os.path.isdir(value)
if not is_dir:
raise ValueError(msg1)
if len(os.listdir(value)) == 0:
raise ValueError(msg2)
return value
def validate_path_with_spec(*args):
included = []
not_included = []
for elem in str_to_elems(args[0]):
if elem.startswith('+'):
included.append(elem[1:].strip('"'))
elif elem.startswith('-'):
not_included.append(elem[1:].strip('"'))
def validate(value):
msg_start = _('Path %s')
msg_should_contain = _('should contain files matching %s')
msg_should_not_contain = _('should not contain files matching %s')
not_found = []
for spec in included:
if len(glob.glob(os.path.join(value, spec))) == 0:
not_found.append(spec)
found = []
for spec in not_included:
if len(glob.glob(os.path.join(value, spec))) != 0:
found.append(spec)
found_count = len(found)
not_found_count = len(not_found)
if found_count != 0 or not_found_count != 0:
msg_elem = [msg_start % value]
if not_found_count > 0:
msg_elem.append(msg_should_contain % ','.join(included))
if found_count > 0:
if not_found_count > 0:
msg_elem.append(_('and'))
msg_elem.append(msg_should_not_contain % ','.join(not_included))
msg = ' '.join(msg_elem)
raise ValueError(msg)
return value
return validate
if not args or args[0] == 'existing':
return validate_path_existing
elif args[0] == 'new':
return validate_path_new
elif args[0] == 'empty':
return validate_path_empty
elif args[0] == 'nonempty':
return validate_path_nonempty
else:
return validate_path_with_spec(*args)
def Date(*args, **kwargs):
"""Create a validator that checks that the value is a valid date.
:param spec: The date format to accept if a string value is used.
``spec`` follows the standard Python
:ref:`strftime <python:strftime-strptime-behavior>`
format string.
:type spec: str
"""
if not args:
spec = DEFAULT_DATE_FORMAT
else:
spec = args[0]
def validate(value):
msg = _('Invalid date for format %s')
if value is None or value == '':
return ''
if isinstance(value, datetime):
return value.date()
if isinstance(value, date):
return value
try:
d = datetime.strptime(value, spec)
return d.date()
except:
f = spec
format_conv = {'%Y': 'YYYY', '%y': 'YY', '%m': 'MM', '%d': 'DD'}
for k, v in format_conv.items():
f = f.replace(k, v)
raise ValueError(msg % spec)
return validate
def Time(*args, **kwargs):
"""Create a validator that checks that the value is a valid time.
:param spec: The time format to accept if a string value is used.
``spec`` follows the standard Python
:ref:`strftime <python:strftime-strptime-behavior>`
format string.
:type spec: str
"""
if not args:
spec = DEFAULT_TIME_FORMAT
else:
spec = args[0]
def validate(value):
msg = _('Invalid time for format %s')
if value is None or value == '':
return ''
if isinstance(value, time):
return value
try:
d = datetime.strptime(value, spec)
return d.time()
except:
f = spec
format_conv = {'%H': 'hh', '%M': 'mm', '%S': 'ss'}
for k, v in format_conv.items():
f = f.replace(k, v)
raise ValueError(msg % spec)
return validate
def Color(*args, **kwargs):
"""Create a validator that checks that the value is a valid color
The color format, which is determined by the `spec` argument, can be one
of the following
* An RGB hex representation i.e. `#` followed by either 3 or 6 hex digits.
* A string of the form 'rgb(R, G, B)' where R, G and B are floating point
values between 0.0 and 1.0
:param spec: The color type to accept either 'rgbhex' or 'rgb'
:type spec: str
"""
def validate_rgb(value):
if isinstance(value, (tuple, list)):
return tuple(value[:3])
elif value.startswith('rgb('):
elems = value[4:-1].split(',')[:3]
else:
elems = value.split(',')[:3]
return tuple(map(int, elems))
def validate_hex(value):
msg = _('Invalid RGB hex value')
if value[0] == '#' and len(value) in (4, 7):
if all([x in string.hexdigits for x in value[1:]]):
return value
raise ValueError(msg)
if not args or args[0] == 'rgbhex':
return validate_hex
elif args[0] == 'rgb':
return validate_rgb
def Email(*args, **kwargs):
"""Create a validator that checks that the value is a valid email address.
If the :mod:`pyisemail` module is available then that is used to validate
the email address otherwise a regular expression is used (which may produce
false positives.)
"""
def validate(value):
msg = _('Invalid email address')
if PYISEMAIL and 're' not in args:
kwargs = str_to_kwargs(args[0])
match = pyisemail.is_email(value, **kwargs)
else:
match = re.match(r'^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}$',
value,
flags=re.IGNORECASE)
if not match:
raise ValueError(msg)
else:
return value
return validate
entry_point_re = re.compile(r'\w+(\.\w)?\:\w+(\.\w)?')
validators = {
'nonempty': NonEmpty,
'constant': Constant,
'str': Str,
'bool': Bool,
'yesno': Bool,
'int': Int,
'float': Float,
'number': Number,
'path': Path,
'date': Date,
'time': Time,
'color': Color,
're': Regex,
'password': Str,
'email': Email,
}
def spec_to_args(spec):
args = []
kwargs = {}
if spec:
for elem in str_to_elems(spec):
pos = elem.find('=')
if pos == -1:
args.append(elem)
else:
while True:
if elem[pos - 1] != '\\':
break
pos = elem.find('=', pos + 1)
if pos == -1:
break
if pos != -1:
key = elem[:pos]
value = elem[pos + 1:]
kwargs[key] = value
else:
args.append(elem)
return args, kwargs
def get_validator(validator, spec=None):
"""Get a validation function
:param validator: The name of the validator to create
:type validator: str
:param spec: A specification to modify how the validator works
:type spec: str
"""
if not entry_point_re.match(validator):
func = validators[validator]
args, kwargs = spec_to_args(spec)
return func(*args, **kwargs)
else:
if validator in validators:
return validators[validator]
else:
func = load_entry_point(validator)
validators[validator] = func
return func
| 29.10686 | 91 | 0.537098 | 0 | 0 | 292 | 0.013235 | 0 | 0 | 0 | 0 | 9,583 | 0.434347 |
ac8dbd9e323eeca0d685d4a4900b4bd4b422d5be | 11,396 | py | Python | Thirdparty/libcurl/build.py | reven86/dava.engine | ca47540c8694668f79774669b67d874a30188c20 | [
"BSD-3-Clause"
] | 5 | 2020-02-11T12:04:17.000Z | 2022-01-30T10:18:29.000Z | Thirdparty/libcurl/build.py | reven86/dava.engine | ca47540c8694668f79774669b67d874a30188c20 | [
"BSD-3-Clause"
] | null | null | null | Thirdparty/libcurl/build.py | reven86/dava.engine | ca47540c8694668f79774669b67d874a30188c20 | [
"BSD-3-Clause"
] | 4 | 2019-11-28T19:24:34.000Z | 2021-08-24T19:12:50.000Z | import os
import shutil
import build_utils
def get_supported_targets(platform):
if platform == 'win32':
return ['win32', 'win10']
elif platform == 'darwin':
return ['macos', 'ios', 'android']
elif platform == 'linux':
return ['android', 'linux']
else:
return []
def get_dependencies_for_target(target):
if target == 'android':
return ['openssl']
else:
return []
def build_for_target(target, working_directory_path, root_project_path):
if target == 'win32':
_build_win32(working_directory_path, root_project_path)
elif target == 'win10':
_build_win10(working_directory_path, root_project_path)
elif target == 'macos':
_build_macos(working_directory_path, root_project_path)
elif target == 'ios':
_build_ios(working_directory_path, root_project_path)
elif target == 'android':
_build_android(working_directory_path, root_project_path)
elif target == 'linux':
_build_linux(working_directory_path, root_project_path)
def get_download_info():
return {'macos_and_ios': 'maintained by curl-ios-build-scripts (bundled)',
'others': 'https://curl.haxx.se/download/curl-7.50.3.tar.gz'}
def _download_and_extract(working_directory_path):
source_folder_path = os.path.join(working_directory_path, 'libcurl_source')
url = get_download_info()['others']
build_utils.download_and_extract(
url,
working_directory_path,
source_folder_path,
build_utils.get_url_file_name_no_ext(url))
return source_folder_path
@build_utils.run_once
def _patch_sources(source_folder_path, working_directory_path):
# Apply fixes
build_utils.apply_patch(
os.path.abspath('patch.diff'), working_directory_path)
def _build_win32(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
vc12_solution_file_path = os.path.join(
source_folder_path, 'projects/Windows/VC12/curl-all.sln')
build_utils.build_vs(
vc12_solution_file_path,
'LIB Debug - DLL Windows SSPI', 'Win32', 'libcurl')
build_utils.build_vs(
vc12_solution_file_path,
'LIB Release - DLL Windows SSPI', 'Win32', 'libcurl')
build_utils.build_vs(
vc12_solution_file_path,
'LIB Debug - DLL Windows SSPI', 'x64', 'libcurl')
build_utils.build_vs(
vc12_solution_file_path,
'LIB Release - DLL Windows SSPI', 'x64', 'libcurl')
libs_win_root = os.path.join(root_project_path, 'Libs/lib_CMake/win')
shutil.copyfile(
os.path.join(
source_folder_path,
'build/Win32/VC12/LIB Debug - DLL Windows SSPI/libcurld.lib'),
os.path.join(libs_win_root, 'x86/Debug/libcurl.lib'))
shutil.copyfile(
os.path.join(
source_folder_path,
'build/Win32/VC12/LIB Release - DLL Windows SSPI/libcurl.lib'),
os.path.join(libs_win_root, 'x86/Release/libcurl.lib'))
shutil.copyfile(
os.path.join(
source_folder_path,
'build/Win64/VC12/LIB Debug - DLL Windows SSPI/libcurld.lib'),
os.path.join(libs_win_root, 'x64/Debug/libcurl_a_debug.lib'))
shutil.copyfile(
os.path.join(
source_folder_path,
'build/Win64/VC12/LIB Release - DLL Windows SSPI/libcurl.lib'),
os.path.join(libs_win_root, 'x64/Release/libcurl_a.lib'))
_copy_headers(source_folder_path, root_project_path, 'Others')
def _build_win10(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
_patch_sources(source_folder_path, working_directory_path)
vc14_solution_folder_path = os.path.join(
source_folder_path, 'projects/Windows/VC14')
vc14_solution_file_path = os.path.join(
vc14_solution_folder_path, 'curl-all.sln')
build_utils.build_vs(
vc14_solution_file_path,
'LIB Debug - DLL Windows SSPI', 'Win32', 'libcurl')
build_utils.build_vs(
vc14_solution_file_path,
'LIB Release - DLL Windows SSPI', 'Win32', 'libcurl')
build_utils.build_vs(
vc14_solution_file_path,
'LIB Debug - DLL Windows SSPI', 'x64', 'libcurl')
build_utils.build_vs(
vc14_solution_file_path,
'LIB Release - DLL Windows SSPI', 'x64', 'libcurl')
build_utils.build_vs(
vc14_solution_file_path,
'LIB Debug - DLL Windows SSPI', 'ARM', 'libcurl')
build_utils.build_vs(
vc14_solution_file_path,
'LIB Release - DLL Windows SSPI', 'ARM', 'libcurl')
shutil.copyfile(
os.path.join(
source_folder_path,
'build/Win32/VC14/LIB Debug - DLL Windows SSPI/libcurld.lib'),
os.path.join(
root_project_path,
'Libs/lib_CMake/win10/Win32/Debug/libcurl.lib'))
shutil.copyfile(
os.path.join(
source_folder_path,
'build/Win32/VC14/LIB Release - DLL Windows SSPI/libcurl.lib'),
os.path.join(
root_project_path,
'Libs/lib_CMake/win10/Win32/Release/libcurl.lib'))
shutil.copyfile(
os.path.join(
source_folder_path,
'build/Win64/VC14/LIB Debug - DLL Windows SSPI/libcurld.lib'),
os.path.join(
root_project_path,
'Libs/lib_CMake/win10/x64/Debug/libcurl.lib'))
shutil.copyfile(
os.path.join(
source_folder_path,
'build/Win64/VC14/LIB Release - DLL Windows SSPI/libcurl.lib'),
os.path.join(
root_project_path, 'Libs/lib_CMake/win10/x64/Release/libcurl.lib'))
# ARM outptu folder isn't specifically set by solution, so it's a default one
shutil.copyfile(
os.path.join(
vc14_solution_folder_path,
'ARM/LIB Debug - DLL Windows SSPI/libcurld.lib'),
os.path.join(
root_project_path,
'Libs/lib_CMake/win10/arm/Debug/libcurl.lib'))
shutil.copyfile(
os.path.join(
vc14_solution_folder_path,
'ARM/LIB Release - DLL Windows SSPI/libcurl.lib'),
os.path.join(
root_project_path,
'Libs/lib_CMake/win10/arm/Release/libcurl.lib'))
_copy_headers(source_folder_path, root_project_path, 'Others')
def _build_macos(working_directory_path, root_project_path):
build_curl_run_dir = os.path.join(working_directory_path, 'gen/build_osx')
if not os.path.exists(build_curl_run_dir):
os.makedirs(build_curl_run_dir)
build_curl_args = [
'./build_curl', '--arch', 'x86_64', '--run-dir', build_curl_run_dir]
if (build_utils.verbose):
build_curl_args.append('--verbose')
build_utils.run_process(
build_curl_args,
process_cwd='curl-ios-build-scripts-master')
output_path = os.path.join(build_curl_run_dir, 'curl/osx/lib/libcurl.a')
shutil.copyfile(
output_path,
os.path.join(
root_project_path,
os.path.join('Libs/lib_CMake/mac/libcurl_macos.a')))
include_path = os.path.join(
root_project_path,
os.path.join('Libs/include/curl/iOS_MacOS'))
build_utils.copy_files(
os.path.join(build_curl_run_dir, 'curl/osx/include'),
include_path,
'*.h')
def _build_ios(working_directory_path, root_project_path):
build_curl_run_dir = os.path.join(working_directory_path, 'gen/build_ios')
if not os.path.exists(build_curl_run_dir):
os.makedirs(build_curl_run_dir)
build_curl_args = [
'./build_curl',
'--arch',
'armv7,armv7s,arm64',
'--run-dir',
build_curl_run_dir]
if (build_utils.verbose):
build_curl_args.append('--verbose')
build_utils.run_process(
build_curl_args, process_cwd='curl-ios-build-scripts-master')
output_path = os.path.join(
build_curl_run_dir, 'curl/ios-appstore/lib/libcurl.a')
shutil.copyfile(
output_path,
os.path.join(
root_project_path,
os.path.join('Libs/lib_CMake/ios/libcurl_ios.a')))
include_path = os.path.join(
root_project_path, os.path.join('Libs/include/curl/iOS_MacOS'))
build_utils.copy_files(
os.path.join(build_curl_run_dir, 'curl/ios-appstore/include'),
include_path,
'*.h')
def _build_android(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
env = os.environ.copy()
original_path_var = env["PATH"]
# ARM
toolchain_path_arm = os.path.join(
working_directory_path, 'gen/ndk_toolchain_arm')
build_utils.android_ndk_make_toolchain(
root_project_path,
'arm',
'android-14',
'darwin-x86_64',
toolchain_path_arm)
env['PATH'] = '{}:{}'.format(
os.path.join(toolchain_path_arm, 'bin'), original_path_var)
install_dir_arm = os.path.join(working_directory_path, 'gen/install_arm')
configure_args = [
'--host=arm-linux-androideabi',
'--disable-shared',
'--with-ssl=' + os.path.abspath(
os.path.join(
working_directory_path, '../openssl/gen/install_arm/'))]
build_utils.build_with_autotools(
source_folder_path,
configure_args,
install_dir_arm,
env)
# x86
toolchain_path_x86 = os.path.join(
working_directory_path, 'gen/ndk_toolchain_x86')
build_utils.android_ndk_make_toolchain(
root_project_path,
'x86',
'android-14',
'darwin-x86_64',
toolchain_path_x86)
env['PATH'] = '{}:{}'.format(
os.path.join(toolchain_path_x86, 'bin'), original_path_var)
install_dir_arm = os.path.join(working_directory_path, 'gen/install_x86')
configure_args = [
'--host=i686-linux-android',
'--disable-shared',
'--with-ssl=' + os.path.abspath(
os.path.join(
working_directory_path,
'../openssl/gen/install_x86/'))]
build_utils.build_with_autotools(
source_folder_path,
configure_args,
install_dir_arm, env)
_copy_headers(source_folder_path, root_project_path, 'Others')
def _build_linux(working_directory_path, root_project_path):
source_folder_path = _download_and_extract(working_directory_path)
env = build_utils.get_autotools_linux_env()
install_dir = os.path.join(working_directory_path, 'gen/install_linux')
openssl_install_dir = os.path.abspath(os.path.join(working_directory_path, '../openssl/gen/install_linux/'))
configure_args = [
'--disable-shared',
'--with-ssl=' + openssl_install_dir]
build_utils.build_with_autotools(
source_folder_path,
configure_args,
install_dir,
env)
shutil.copyfile(os.path.join(install_dir, 'lib/libcurl.a'),
os.path.join(root_project_path, 'Libs/lib_CMake/linux/libcurl.a'))
_copy_headers(source_folder_path, root_project_path, 'Others')
def _copy_headers(source_folder_path, root_project_path, target_folder):
include_path = os.path.join(
root_project_path, os.path.join('Libs/include/curl', target_folder))
build_utils.copy_files(
os.path.join(source_folder_path, 'include/curl'), include_path, '*.h')
| 33.616519 | 112 | 0.660495 | 0 | 0 | 0 | 0 | 195 | 0.017111 | 0 | 0 | 2,909 | 0.255265 |
ac8dff59d2ee5472682b61a81f77760f6305472b | 1,871 | py | Python | tests/test_schnell.py | damonge/SNELL | 4bb276225fce8f535619d0f2133a19f3c42aa44f | [
"BSD-3-Clause"
] | 2 | 2020-05-07T03:22:37.000Z | 2021-02-19T14:34:42.000Z | tests/test_schnell.py | damonge/SNELL | 4bb276225fce8f535619d0f2133a19f3c42aa44f | [
"BSD-3-Clause"
] | 2 | 2020-04-28T11:13:10.000Z | 2021-06-08T12:20:25.000Z | tests/test_schnell.py | damonge/GWSN | 4bb276225fce8f535619d0f2133a19f3c42aa44f | [
"BSD-3-Clause"
] | 2 | 2020-05-07T03:22:43.000Z | 2021-12-05T15:41:05.000Z | import healpy as hp
import numpy as np
from schnell import MapCalculator, GroundDetector, LISADetector
# Detectors
det1 = GroundDetector('Hanford', 46.4, -119.4, 90-171.8,
'plots/data/aLIGO_design.txt')
det2 = GroundDetector('Livingstone', 30.7, -90.8, 90-243.0,
'plots/data/aLIGO_design.txt')
detl = LISADetector(0, is_L5Gm=False)
# Calculators
mc11_n = MapCalculator([det1])
mc12_n = MapCalculator([det1, det2])
mcLL_n = MapCalculator([detl], f_pivot=1E-2)
# Angles
nside = 64
theta, phi = hp.pix2ang(nside, np.arange(hp.nside2npix(nside)))
def test_antenna_new():
a11 = np.real(mc11_n.get_antenna(0, 0, 0, 0, theta, phi,
inc_baseline=False))
a12 = np.real(mc12_n.get_antenna(0, 1, 0, 0, theta, phi,
inc_baseline=False))
aLL = np.abs(mcLL_n.get_antenna(0, 0, 0, 1E-2, theta, phi,
inc_baseline=False))
a11_test, a12_test, aLL_test = hp.read_map(
"tests/test_data/antenna_test.fits",
field=None)
assert np.all(np.fabs(a11-a11_test) < 1E-5)
assert np.all(np.fabs(a12-a12_test) < 1E-5)
assert np.all(np.fabs(aLL-aLL_test) < 1E-5)
def test_Gell_new():
# The factor 2 here corrects for a pevious missing factor
# for auto-correlations.
gl11 = mc11_n.get_G_ell(0, 100., nside) * 2 * 4
gl12 = mc12_n.get_G_ell(0, 100., nside, no_autos=True) * 4
glLL = mcLL_n.get_G_ell(0, 1E-2, nside) * 2 * 4
ls, gl11_test, gl12_test, glLL_test = np.loadtxt(
"tests/test_data/gls_test.txt",
unpack=True)
assert np.all(np.fabs(gl11/gl11_test-1)[::2] < 1E-8)
assert np.all(np.fabs(gl12/gl12_test-1)[::2] < 1E-8)
# We changed the noise model, so this doesn't agree anymore
assert np.all(np.fabs(16*glLL/glLL_test-1)[::2] < 0.05)
| 36.686275 | 63 | 0.622662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 317 | 0.169428 |
ac8ff801eb4c39b0e7abc25a4c80f6925127a04d | 7,281 | py | Python | bittensor/wallet.py | il-dar/bittensor | 12ea83a76abc3556f42e619ed04a223c0407b9b3 | [
"MIT"
] | null | null | null | bittensor/wallet.py | il-dar/bittensor | 12ea83a76abc3556f42e619ed04a223c0407b9b3 | [
"MIT"
] | null | null | null | bittensor/wallet.py | il-dar/bittensor | 12ea83a76abc3556f42e619ed04a223c0407b9b3 | [
"MIT"
] | null | null | null |
# The MIT License (MIT)
# Copyright © 2021 Opentensor.ai
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import argparse
import json
import os
import re
import stat
from munch import Munch
from loguru import logger
import bittensor
from bittensor.crypto import is_encrypted, decrypt_data
from bittensor.crypto import decrypt_keypair
from bittensor.crypto.keyfiles import KeyFileError, load_keypair_from_data
class Wallet():
"""
Bittensor wallet maintenance class. Each wallet contains a coldkey and a hotkey.
The coldkey is the user's primary key for holding their stake in their wallet
and is the only way that users can access their Tao. Coldkeys can hold tokens and should be encrypted on your device.
The coldkey must be used to stake and unstake funds from a running node. The hotkey, on the other hand, is only used
for suscribing and setting weights from running code. Hotkeys are linked to coldkeys through the metagraph.
"""
def __init__(self, config: Munch = None):
if config == None:
config = Wallet.build_config()
self.config = config
try:
self.load_hotkeypair()
self.load_cold_key()
except (KeyError):
logger.error("Invalid password")
quit()
except KeyFileError:
logger.error("Keyfile corrupt")
quit()
def load_cold_key(self):
path = self.config.wallet.coldkeyfile
path = os.path.expanduser(path)
with open(path, "r") as file:
self.coldkey = file.readline().strip()
logger.info("Loaded coldkey: {}", self.coldkey)
def load_hotkeypair(self):
keyfile = os.path.expanduser(self.config.wallet.hotkeyfile)
with open(keyfile, 'rb') as file:
data = file.read()
if is_encrypted(data):
password = bittensor.utils.Cli.ask_password()
data = decrypt_data(password, data)
hotkey = load_keypair_from_data(data)
self.keypair = hotkey
logger.info("Loaded hotkey: {}", self.keypair.public_key)
@staticmethod
def build_config() -> Munch:
# Parses and returns a config Munch for this object.
parser = argparse.ArgumentParser();
Wallet.add_args(parser)
config = bittensor.config.Config.to_config(parser);
Wallet.check_config(config)
return config
@staticmethod
def add_args(parser: argparse.ArgumentParser):
try:
parser.add_argument('--wallet.hotkeyfile', required=False, default='~/.bittensor/wallets/default/hotkeys/default',
help='''The path to your bittensor hot key file,
Hotkeys should not hold tokens and are only used
for suscribing and setting weights from running code.
Hotkeys are linked to coldkeys through the metagraph''')
parser.add_argument('--wallet.coldkeyfile', required=False, default='~/.bittensor/wallets/default/coldkeypub.txt',
help='''The path to your bittensor cold publickey text file.
Coldkeys can hold tokens and should be encrypted on your device.
The coldkey must be used to stake and unstake funds from a running node.
On subscribe this coldkey account is linked to the associated hotkey on the subtensor chain.
Only this key is capable of making staking and unstaking requests for this neuron.''')
except:
pass
@staticmethod
def check_config(config: Munch):
Wallet.__check_hot_key_path(config.wallet.hotkeyfile)
Wallet.__check_cold_key_path(config.wallet.coldkeyfile)
@staticmethod
def __check_hot_key_path(path):
path = os.path.expanduser(path)
if not os.path.isfile(path):
logger.error("--wallet.hotkeyfile {} is not a file", path)
logger.error("You can create keys with: bittensor-cli new_wallet")
raise KeyFileError
if not os.access(path, os.R_OK):
logger.error("--wallet.hotkeyfile {} is not readable", path)
logger.error("Ensure you have proper privileges to read the file {}", path)
raise KeyFileError
if Wallet.__is_world_readable(path):
logger.error("--wallet.hotkeyfile {} is world readable.", path)
logger.error("Ensure you have proper privileges to read the file {}", path)
raise KeyFileError
@staticmethod
def __is_world_readable(path):
st = os.stat(path)
return st.st_mode & stat.S_IROTH
@staticmethod
def __check_cold_key_path(path):
path = os.path.expanduser(path)
if not os.path.isfile(path):
logger.error("--wallet.coldkeyfile {} does not exist", path)
raise KeyFileError
if not os.path.isfile(path):
logger.error("--wallet.coldkeyfile {} is not a file", path)
raise KeyFileError
if not os.access(path, os.R_OK):
logger.error("--wallet.coldkeyfile {} is not readable", path)
raise KeyFileError
with open(path, "r") as file:
key = file.readline().strip()
if not re.match("^0x[a-z0-9]{64}$", key):
logger.error("Cold key file corrupt")
raise KeyFileError
@staticmethod
def __create_keypair() -> bittensor.subtensor.interface.Keypair:
return bittensor.subtensor.interface.Keypair.create_from_mnemonic(bittensor.subtensor.interface.Keypair.generate_mnemonic())
@staticmethod
def __save_keypair(keypair : bittensor.subtensor.interface.Keypair, path : str):
path = os.path.expanduser(path)
with open(path, 'w') as file:
json.dump(keypair.toDict(), file)
file.close()
os.chmod(path, stat.S_IWUSR | stat.S_IRUSR)
@staticmethod
def __has_keypair(path):
path = os.path.expanduser(path)
return os.path.exists(path) | 43.598802 | 136 | 0.641121 | 5,862 | 0.804115 | 0 | 0 | 4,099 | 0.562277 | 0 | 0 | 3,237 | 0.444033 |
ac921c70997b850dff4ce1806e2a8f895193810b | 1,924 | py | Python | build_data/data_partitions.py | zkurtz/kaggle_malware_2019 | 72465b2f5d5f49d1acefa9b4f6b06df2aa53e4a8 | [
"MIT"
] | null | null | null | build_data/data_partitions.py | zkurtz/kaggle_malware_2019 | 72465b2f5d5f49d1acefa9b4f6b06df2aa53e4a8 | [
"MIT"
] | null | null | null | build_data/data_partitions.py | zkurtz/kaggle_malware_2019 | 72465b2f5d5f49d1acefa9b4f6b06df2aa53e4a8 | [
"MIT"
] | null | null | null | '''
Split the training data into subsets for cross validation, sample from the testing data, etc.
'''
from feather import read_dataframe as read_feather
import numpy as np
import pdb
from zpylib import data_path as dp
from zpylib import datatools
from zpylib import N_TEST
from zpylib import N_TRAIN
# Config
NGROUPS=3
types = datatools.FeaturesByType()
types.predictors += ['pydens_' + str(k) for k in range(10)]
def split_train():
filepath = dp("refactored/densified_train.feather")
print("loading " + filepath)
df = read_feather(filepath, columns = types.predictors + [types.response])
# Append the raw data index
assert 'raw_data_index' not in df.columns
assert df.shape[0] == N_TRAIN
df['raw_data_index'] = range(N_TRAIN)
print("Splitting into subgroups ... ")
np.random.seed(0)
idx_random = np.random.permutation(N_TRAIN)
idx_groups = np.array_split(idx_random, NGROUPS)
for k, idxs in enumerate(idx_groups):
sk = str(k)
print("... featherizing group " + sk + " of " + str(NGROUPS))
fname = dp("refactored/train_split_" + sk + ".feather")
df.iloc[idxs].reset_index(drop=True).to_feather(fname)
def test_sample():
''' Generate a 500k random sample of the testing data '''
filepath = dp("refactored/densified_test.feather")
print("loading " + filepath)
df = read_feather(filepath, columns = types.predictors)
# Append the raw data index
assert 'raw_data_index' not in df.columns
assert df.shape[0] == N_TEST
df['raw_data_index'] = N_TRAIN + np.array(range(N_TEST))
print("Uniformly sampling 500k rows from the testing data ... ")
np.random.seed(0)
idx_random = np.random.permutation(500000)
print("Featherizing sample ... ")
df.iloc[idx_random].reset_index(drop=True).to_feather(dp("refactored/test_sample.feather"))
if __name__=='__main__':
split_train()
test_sample() | 32.610169 | 95 | 0.696985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 606 | 0.314969 |
ac92df5c1931094331c3056c256d7646c3351e9b | 1,732 | py | Python | tests/integration/application/test_attribute_server_and_client.py | microprediction/predictionserver | 817243a4c85aac63995ab3a18cfbdcb01bb07bd0 | [
"MIT"
] | 1 | 2020-12-04T17:59:19.000Z | 2020-12-04T17:59:19.000Z | tests/integration/application/test_attribute_server_and_client.py | microprediction/predictionserver | 817243a4c85aac63995ab3a18cfbdcb01bb07bd0 | [
"MIT"
] | null | null | null | tests/integration/application/test_attribute_server_and_client.py | microprediction/predictionserver | 817243a4c85aac63995ab3a18cfbdcb01bb07bd0 | [
"MIT"
] | 1 | 2021-03-05T05:03:36.000Z | 2021-03-05T05:03:36.000Z | from predictionserver.clientmixins.attributereader import (
AttributeReader, AttributeType, AttributeGranularity
)
from predictionserver.set_config import MICRO_TEST_CONFIG
from predictionserver.servermixins.attributeserver import AttributeServer
BABLOH_CATTLE = MICRO_TEST_CONFIG['BABLOH_CATTLE']
PUBLIC_PROFILE = {
AttributeType.homepage: 'https://www.savetrumble.com.au',
AttributeType.repository: 'https://pypi.org/project/microfilter/',
AttributeType.paper: 'https://arxiv.org/pdf/1512.01389.pdf',
AttributeType.topic: 'AutoMl',
AttributeType.description: 'Herding cattle using AutoMl'
}
PRIVATE_PROFILE = {
AttributeType.email: 'info@savetrundle.nsw.com.au',
AttributeType.description: 'private description'
}
# def test_attribute_server(localhost_process):
# """ Test using local flask app (see testconf.py """
# print('Running localhost_process test')
# ar = AttributeReader()
# ar.base_url = 'http://127.0.0.1:5000'
# server = AttributeServer()
# server.connect(**MICRO_TEST_CONFIG)
# email = 'babloh@cattle.com'
# server.set_attribute(
# attribute_type=AttributeType.email,
# granularity=AttributeGranularity.write_key,
# write_key=BABLOH_CATTLE,
# value=email
# )
# email_back = server.get_attribute(
# attribute_type=AttributeType.email,
# granularity=AttributeGranularity.write_key,
# write_key=BABLOH_CATTLE
# )
# assert email == email_back
# email_back_from_client = ar.get_attribute(
# attribute_type=AttributeType.email,
# granularity=AttributeGranularity.write_key,
# write_key=BABLOH_CATTLE
# )
# assert email == email_back_from_client
| 35.346939 | 73 | 0.718822 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,164 | 0.672055 |
ac9387393ec1679069c88ceafbe610de436150e4 | 20,187 | py | Python | aiokraken/utils/tests/test_timeindexeddataframe.py | asmodehn/aiokraken | b260bd41d5aa091e6a4f1818328426fbe6f625c0 | [
"MIT"
] | null | null | null | aiokraken/utils/tests/test_timeindexeddataframe.py | asmodehn/aiokraken | b260bd41d5aa091e6a4f1818328426fbe6f625c0 | [
"MIT"
] | 82 | 2019-08-30T09:37:49.000Z | 2022-03-29T14:53:22.000Z | aiokraken/utils/tests/test_timeindexeddataframe.py | asmodehn/aiokraken | b260bd41d5aa091e6a4f1818328426fbe6f625c0 | [
"MIT"
] | null | null | null | import unittest
from datetime import datetime, timezone
from pandas import DatetimeTZDtype
from parameterized import parameterized
import pandas as pd
from aiokraken.utils.timeindexeddataframe import TimeindexedDataframe
"""
Test module.
This is intended for extensive testing, using parameterized, hypothesis or similar generation methods
For simple usecase examples, we should rely on doctests.
"""
class TestTimeindexedDataframe(unittest.TestCase):
@parameterized.expand(
[
[
pd.DataFrame( # One with "datetime" column (like internal model)
# TODO: proper currencies...
[
[
datetime.fromtimestamp(1567039620, tz=timezone.utc),
8746.4,
8751.5,
8745.7,
8745.7,
8749.3,
0.09663298,
8,
],
[
datetime.fromtimestamp(1567039680, tz=timezone.utc),
8745.7,
8747.3,
8745.7,
8747.3,
8747.3,
0.00929540,
1,
],
],
# grab that from kraken documentation
columns=[
"datetime",
"open",
"high",
"low",
"close",
"vwap",
"volume",
"count",
],
) # there is no datetime index a priori.
], [
pd.DataFrame( # One with "datetime" column (like internal model)
# TODO: proper currencies...
[
[
datetime.fromtimestamp(1567039620, tz=timezone.utc),
8746.4,
8751.5,
8745.7,
8745.7,
8749.3,
0.09663298,
8,
],
[
datetime.fromtimestamp(1567039680, tz=timezone.utc),
8745.7,
8747.3,
8745.7,
8747.3,
8747.3,
0.00929540,
1,
],
],
# grab that from kraken documentation
columns=[
"datetime",
"open",
"high",
"low",
"close",
"vwap",
"volume",
"count",
],
).set_index("datetime") # we already have an index
],
]
)
def test_load_ok(self, df):
""" Verifying that expected data parses properly """
tidf = TimeindexedDataframe(data=df, index="datetime")
import pandas.api.types as ptypes
num_cols = ["open", "high", "low", "close", "vwap", "volume", "count"]
assert all(ptypes.is_numeric_dtype(tidf.dataframe[col]) for col in num_cols)
assert tidf.dataframe.index.name == "datetime"
# Verify we have a timezone aware, ns precision datetime.
assert ptypes.is_datetime64tz_dtype(tidf.dataframe.index.dtype)
assert ptypes.is_datetime64_ns_dtype(tidf.dataframe.index.dtype)
# TODO : property test instead (move this example test to doc...)
@parameterized.expand(
[
[
pd.DataFrame(
# TODO: proper Time, proper currencies...
[
[
datetime.fromtimestamp(1567039620, tz=timezone.utc),
8746.4,
8751.5,
8745.7,
8745.7,
8749.3,
0.09663298,
8,
],
[
datetime.fromtimestamp(1567039680, tz=timezone.utc),
8745.7,
8747.3,
8745.7,
8747.3,
8747.3,
0.00929540,
1,
],
],
# grab that from kraken documentation
columns=[
"datetime",
"open",
"high",
"low",
"close",
"vwap",
"volume",
"count",
],
),
pd.DataFrame(
# TODO: proper Time, proper currencies...
[
[
datetime.fromtimestamp(1567039680, tz=timezone.utc),
8745.8,
8747.3,
8745.7,
8747.3,
8747.3,
0.00929540,
1,
], # Not the value is a bit modified to trigger stitching...
[
datetime.fromtimestamp(1567039720, tz=timezone.utc),
8746.6,
8751.4,
8745.3,
8745.4,
8748.1,
0.09663297,
3,
],
],
# grab that from kraken documentation
columns=[
"datetime",
"open",
"high",
"low",
"close",
"vwap",
"volume",
"count",
],
),
],
]
)
def test_stitch_ok(
self, df1, df2
): # TODO : there are MANY cases to test for stitch
""" Verifying that expected data parses properly """
tidf1 = TimeindexedDataframe(data=df1)
tidf2 = TimeindexedDataframe(data=df2)
stitched1 = tidf1.merge(tidf2)
import pandas.api.types as ptypes
num_cols = ["open", "high", "low", "close", "vwap", "volume", "count"]
assert all(
ptypes.is_numeric_dtype(stitched1.dataframe[col]) for col in num_cols
)
assert stitched1.dataframe.index.name == "datetime"
# Verify we have a timezone aware, ns precision datetime.
assert ptypes.is_datetime64tz_dtype(stitched1.dataframe.index.dtype)
assert ptypes.is_datetime64_ns_dtype(stitched1.dataframe.index.dtype)
# verifying stitches
assert (stitched1.dataframe.iloc[0] == tidf1.dataframe.iloc[0]).all()
assert (stitched1.dataframe.iloc[-1] == tidf2.dataframe.iloc[-1]).all()
assert len(stitched1) == 3
# Note : careful with default merging strategy, ORDER MATTERS !
# To make it not matter, we need mode semantics...
@parameterized.expand(
[
[
pd.DataFrame( # One with "datetime" column (like internal model)
# TODO: proper Time, proper currencies...
[
[
datetime.fromtimestamp(1567039620, tz=timezone.utc),
8746.4,
8751.5,
8745.7,
8745.7,
8749.3,
0.09663298,
8,
],
[
datetime.fromtimestamp(1567039680, tz=timezone.utc),
8745.7,
8747.3,
8745.7,
8747.3,
8747.3,
0.00929540,
1,
],
],
# grab that from kraken documentation
columns=[
"datetime",
"open",
"high",
"low",
"close",
"vwap",
"volume",
"count",
],
).set_index("datetime")
],
]
)
def test_getitem_ok(self, df):
""" Verifying that expected data parses properly """
tidf = TimeindexedDataframe(data=df)
import pandas.api.types as ptypes
num_cols = ["open", "high", "low", "close", "vwap", "volume", "count"]
assert all(ptypes.is_numeric_dtype(tidf.dataframe[col]) for col in num_cols)
assert ptypes.is_datetime64_any_dtype(tidf.dataframe.index)
assert tidf.dataframe.index.name == "datetime"
assert tidf.dataframe.index.dtype == DatetimeTZDtype(tz=timezone.utc)
# verifying all ways to access data
# get the first element
assert isinstance(tidf.iloc[0], pd.Series)
assert tidf.iloc[0]["open"] == 8746.4
assert tidf.iloc[0]["high"] == 8751.5
assert tidf.iloc[0]["low"] == 8745.7
assert tidf.iloc[0]["close"] == 8745.7
assert tidf.iloc[0]["vwap"] == 8749.3
assert tidf.iloc[0]["volume"] == 0.09663298
assert tidf.iloc[0]["count"] == 8
# NOT WORKING
# get based on timeindex
# assert isinstance(tidf.tloc[1567039620], pd.Series)
# assert tidf.tloc[1567039620]["open"] == 8746.4
# assert tidf.tloc[1567039620]["high"] == 8751.5
# assert tidf.tloc[1567039620]["low"] == 8745.7
# assert tidf.tloc[1567039620]["close"] == 8745.7
# assert tidf.tloc[1567039620]["vwap"] == 8749.3
# assert tidf.tloc[1567039620]["volume"] == 0.09663298
# assert tidf.tloc[1567039620]["count"] == 8
# get from datetime
firstdatetime = datetime(
year=2019, month=8, day=29, hour=0, minute=47, second=0, tzinfo=timezone.utc
)
assert isinstance(tidf[firstdatetime], pd.Series)
assert tidf[firstdatetime]["open"] == 8746.4
assert tidf[firstdatetime]["high"] == 8751.5
assert tidf[firstdatetime]["low"] == 8745.7
assert tidf[firstdatetime]["close"] == 8745.7
assert tidf[firstdatetime]["vwap"] == 8749.3
assert tidf[firstdatetime]["volume"] == 0.09663298
assert tidf[firstdatetime]["count"] == 8
scnddatetime = datetime(
year=2019, month=8, day=29, hour=0, minute=48, second=0, tzinfo=timezone.utc
)
# get slice and verify equality
assert isinstance(tidf[firstdatetime:scnddatetime], TimeindexedDataframe)
assert tidf[firstdatetime:scnddatetime] == tidf
# get list of columns only
assert isinstance(tidf[["open", "high", "low", "close"]], TimeindexedDataframe)
assert tidf[["open", "high", "low", "close"]][firstdatetime]["open"] == tidf[firstdatetime]["open"]
assert tidf[["open", "high", "low", "close"]][firstdatetime]["high"] == tidf[firstdatetime]["high"]
assert tidf[["open", "high", "low", "close"]][firstdatetime]["low"] == tidf[firstdatetime]["low"]
assert tidf[["open", "high", "low", "close"]][firstdatetime]["close"] == tidf[firstdatetime]["close"]
@parameterized.expand(
[
[
pd.DataFrame( # One with "datetime" column (like internal model)
# TODO: proper Time, proper currencies...
[
[
datetime.fromtimestamp(1567039620, tz=timezone.utc),
8746.4,
8751.5,
8745.7,
8745.7,
8749.3,
0.09663298,
8,
],
[
datetime.fromtimestamp(1567039680, tz=timezone.utc),
8745.7,
8747.3,
8745.7,
8747.3,
8747.3,
0.00929540,
1,
],
],
# grab that from kraken documentation
columns=[
"datetime",
"open",
"high",
"low",
"close",
"vwap",
"volume",
"count",
],
).set_index("datetime")
],
]
)
def test_iter_ok(self, df):
""" Verifying that expected data iterates properly """
tidf = TimeindexedDataframe(data=df)
import pandas.api.types as ptypes
num_cols = ["open", "high", "low", "close", "vwap", "volume", "count"]
assert all(ptypes.is_numeric_dtype(tidf.dataframe[col]) for col in num_cols)
assert ptypes.is_datetime64_any_dtype(tidf.dataframe.index)
assert tidf.dataframe.index.name == "datetime"
assert tidf.dataframe.index.dtype == DatetimeTZDtype(tz=timezone.utc)
it = iter(tidf)
ts, s = next(it)
assert ts == datetime(
year=2019, month=8, day=29, hour=0, minute=48, second=0, tzinfo=timezone.utc
)
assert (s == pd.Series(data={
"open":8745.7,
"high":8747.3,
"low":8745.7,
"close":8747.3,
"vwap":8747.3,
"volume":0.00929540,
"count":1,
})).all()
ts2, s2 = next(it)
assert ts2 == datetime(
year=2019, month=8, day=29, hour=0, minute=47, second=0, tzinfo=timezone.utc
)
assert (s2 == pd.Series(data={
"open": 8746.4,
"high": 8751.5,
"low": 8745.7,
"close": 8745.7,
"vwap": 8749.3,
"volume": 0.09663298,
"count": 8,
})).all()
# @parameterized.expand(
# [
# [
# pd.DataFrame( # One with "datetime" column (like internal model)
# # TODO: proper Time, proper currencies...
# [
# [
# datetime.fromtimestamp(1567039620, tz=timezone.utc),
# 8746.4,
# 8751.5,
# 8745.7,
# 8745.7,
# 8749.3,
# 0.09663298,
# 8,
# ],
# [
# datetime.fromtimestamp(1567039680, tz=timezone.utc),
# 8745.7,
# 8747.3,
# 8745.7,
# 8747.3,
# 8747.3,
# 0.00929540,
# 1,
# ],
# ],
# # grab that from kraken documentation
# columns=[
# "datetime",
# "open",
# "high",
# "low",
# "close",
# "vwap",
# "volume",
# "count",
# ],
# ).set_index("datetime")
# ],
# ]
# )
# def test_aiter_ok(self, df):
# import asyncio
#
# clock = [1567039690,1567039750,1567039810,1567039870]
# countcall = iter(clock)
# def timer():
# return datetime.fromtimestamp(next(countcall), tz=timezone.utc)
#
# slept = 0
# async def sleeper(secs):
# slept = secs
#
# """ Verifying that expected data iterates properly asynchronously """
# tidf = TimeindexedDataframe(data=df, timer=timer, sleeper=sleeper)
#
# import pandas.api.types as ptypes
#
# num_cols = ["open", "high", "low", "close", "vwap", "volume", "count"]
# assert all(ptypes.is_numeric_dtype(tidf.dataframe[col]) for col in num_cols)
#
# assert ptypes.is_datetime64_any_dtype(tidf.dataframe.index)
# assert tidf.dataframe.index.name == "datetime"
# assert tidf.dataframe.index.dtype == DatetimeTZDtype(tz=timezone.utc)
#
# sync=asyncio.Lock()
#
# async def testrunner():
# idx = 0
#
# asyncio.get_running_loop().create_task(provider())
#
# async for m in tidf:
# async with sync:
# if idx == 0:
# assert m[0] == datetime.fromtimestamp(1567039740, tz=timezone.utc)
# assert slept == 50
# elif idx == 1:
# assert m[0]== datetime.fromtimestamp(1567039800, tz=timezone.utc)
# assert slept == 50
# elif idx == 2:
# assert m[0]== datetime.fromtimestamp(1567039860, tz=timezone.utc)
# assert slept == 50
# idx += 1
# if idx >= 3:
# break
#
# async def provider():
# idx=len(df)
# async with sync:
# # TODO : better way to append data (using __call__ ??)
# tidf.dataframe[idx] = [
# datetime.fromtimestamp(1567039740, tz=timezone.utc),
# 8745.7,
# 8747.2,
# 8745.8,
# 8747.3,
# 8747.3,
# 0.00929540,
# 1,
# ]
#
# idx = idx + 1
# async with sync:
# tidf.dataframe[idx] = [
# datetime.fromtimestamp(1567039800, tz=timezone.utc),
# 8745.7,
# 8747.3,
# 8745.7,
# 8747.3,
# 8747.3,
# 0.00929540,
# 1,
# ]
#
# idx = idx + 1
# async with sync:
# tidf.dataframe[idx] = [
# datetime.fromtimestamp(1567039860, tz=timezone.utc),
# 8745.7,
# 8747.3,
# 8745.7,
# 8747.3,
# 8747.3,
# 0.00929540,
# 1,
# ]
#
# # Note : even if we use asyncio here for apparent "parallelism" of control flow,
# # the timer and sleeper are test stubs to control syncronicity...
# asyncio.run(testrunner())
if __name__ == "__main__":
unittest.main()
| 36.904936 | 109 | 0.395849 | 19,731 | 0.977411 | 0 | 0 | 14,682 | 0.7273 | 0 | 0 | 7,274 | 0.360331 |
ac94d7600e1dceeb8d2024e62d34864ff7ca1d58 | 2,041 | py | Python | app/models/admin.py | ShuaiGao/mini-shop-server | 8a72b2d457bba8778e97637027ffa82bfa11e8a9 | [
"MIT"
] | null | null | null | app/models/admin.py | ShuaiGao/mini-shop-server | 8a72b2d457bba8778e97637027ffa82bfa11e8a9 | [
"MIT"
] | 1 | 2019-07-08T12:32:29.000Z | 2019-07-08T12:32:29.000Z | app/models/admin.py | ShuaiGao/mini-shop-server | 8a72b2d457bba8778e97637027ffa82bfa11e8a9 | [
"MIT"
] | null | null | null | # _*_ coding: utf-8 _*_
"""
Created by Allen7D on 2018/6/16.
"""
import os.path as op
from flask_admin import Admin, BaseView, expose
from flask import render_template, redirect, url_for
from flask_admin.contrib.sqla import ModelView
from flask_admin.contrib.fileadmin import FileAdmin
from flask_admin import form
from app.models.base import db
from app.models.user import User
from app.models.banner import BannerView
from app.models.user_address import UserAddressView
from app.models.product import ProductView
from app.models.category import CategoryView
__author__ = 'Allen7D'
from wtforms.fields import SelectField
class HomeView(BaseView):
@expose('/')
def index(self):
return self.render("admin.html")
class MyView(ModelView):
# Disable model creation
# can_create = False
can_delete = False
# Override displayed fields
column_exclude_list = ['delete_time', 'update_time', 'create_time', 'status']
column_list = ('email', 'nickname', 'auth')
column_labels = {
'email': u"邮件",
'nickname':u"头像",
'auth':u"权限"
}
form_extra_fields = {
'auth':form.Select2Field('权限',choices=[('1','权限1'),('2','权限2')])
}
# form_overrides = dict(auth=SelectField)
# form_args = dict(
# # Pass the choices to the `SelectField`
# auth=dict(
# choices=[(1, '超级管理员'), (10, '普通管理员'), (100, '普通用户')]
# ))
def __init__(self, session, **kwargs):
# You can pass name add other parameters if you want to
super(MyView, self).__init__(User, session, **kwargs)
# @expose("/new/", methods=("GET", "POST"))
# def create_view(self):
# return self.render("create_user.html")
def CreateAdminView(admin):
path = op.join(op.dirname(__file__), u'../static')
admin.add_view(FileAdmin(path, u'/static', name = '文件管理'))
admin.add_view(BannerView(db.session, name=u'轮播图'))
admin.add_view(MyView(db.session, name=u'用户管理'))
admin.add_view(ProductView(db.session, name=u'商品管理'))
admin.add_view(CategoryView(db.session, name=u'商品分类'))
admin.add_view(UserAddressView(db.session, name=u'地址管理'))
| 27.958904 | 78 | 0.709456 | 1,043 | 0.487611 | 0 | 0 | 65 | 0.030388 | 0 | 0 | 820 | 0.383357 |
ac9516fa184eda7179ab54abc6a3a63da22f79c3 | 9,334 | py | Python | MKCommand.py | the-snowwhite/Machinekit-Workbench | 3e0c3ae55e67553bd599a3010ccf3a0392212333 | [
"MIT"
] | 8 | 2019-09-27T18:45:51.000Z | 2020-02-27T09:58:10.000Z | MKCommand.py | the-snowwhite/Machinekit-Workbench | 3e0c3ae55e67553bd599a3010ccf3a0392212333 | [
"MIT"
] | null | null | null | MKCommand.py | the-snowwhite/Machinekit-Workbench | 3e0c3ae55e67553bd599a3010ccf3a0392212333 | [
"MIT"
] | 3 | 2019-10-19T00:18:41.000Z | 2019-11-17T19:58:44.000Z | # Classes implementing the different commands that can be sent to MK
# The implemented classes do not cover the complete functional set of MK
# but are what is required to implement a basic UI.
import enum
import machinetalk.protobuf.message_pb2 as MESSAGE
import machinetalk.protobuf.status_pb2 as STATUS
import machinetalk.protobuf.types_pb2 as TYPES
class MKCommandStatus(enum.Enum):
'''An enumeration used to track a command through its entire lifetime.'''
Created = 0
Sent = 1
Executed = 2
Completed = 3
Obsolete = 4
class MKCommand(object):
'''Base class for all commands implementing the general framework.'''
def __init__(self, command):
self.msg = MESSAGE.Container()
self.msg.type = command
self.state = MKCommandStatus.Created
def __str__(self):
return self.__class__.__name__
def expectsResponses(self):
'''Overwrite and return False if the specific command does not get a response message.
Most commands do get a response so the default is to return True'''
return True
def serializeToString(self):
return self.msg.SerializeToString()
def msgSent(self):
'''Called by the framework when the command was sent to MK'''
self.state = MKCommandStatus.Sent
def msgExecuted(self):
'''Called by the framework when the command was executed by MK'''
self.state = MKCommandStatus.Executed
def msgCompleted(self):
'''Called by the framework when the command has completed'''
self.state = MKCommandStatus.Completed
def msgObsolete(self):
'''Called by the framework when the command has become obsolete'''
self.state = MKCommandStatus.Obsolete
def isExecuted(self):
'''Returns True if the command has been executed by MK'''
return self.state in [MKCommandStatus.Executed, MKCommandStatus.Completed]
def isCompleted(self):
'''Returns True if the command has completed'''
return self.state == MKCommandStatus.Completed
def isObsolete(self):
'''Returns True if the command is obsolete and can be removed'''
return self.state == MKCommandStatus.Obsolete
def statusString(self):
'''Return command's status as string.'''
return self.state.name
class MKCommandExecute(MKCommand):
'''Base class for all commands sent to the 'execute' interpreter.'''
def __init__(self, command):
MKCommand.__init__(self, command)
self.msg.interp_name = 'execute'
class MKCommandPreview(MKCommand):
'''Base class for all commands sent to the 'preview' interpreter.'''
def __init__(self, command):
MKCommand.__init__(self, command)
self.msg.interp_name = 'preview'
class MKCommandTaskSetState(MKCommandExecute):
'''Base class for setting the state of task variables.'''
def __init__(self, state):
MKCommandExecute.__init__(self, TYPES.MT_EMC_TASK_SET_STATE)
self.msg.emc_command_params.task_state = state
class MKCommandEstop(MKCommandTaskSetState):
'''Command to engage or disengage the E-Stop.
on=True means the E-Stop is pressed and MK will ignore all other commands.'''
def __init__(self, on):
MKCommandTaskSetState.__init__(self, STATUS.EMC_TASK_STATE_ESTOP if on else STATUS.EMC_TASK_STATE_ESTOP_RESET)
class MKCommandPower(MKCommandTaskSetState):
'''Command to power MK on or off.'''
def __init__(self, on):
MKCommandTaskSetState.__init__(self, STATUS.EMC_TASK_STATE_ON if on else STATUS.EMC_TASK_STATE_OFF)
class MKCommandOpenFile(MKCommand):
'''Command to open a file, either for 'executing' it or for 'previewing' it.'''
def __init__(self, filename, preview):
if preview:
MKCommandPreview.__init__(self, TYPES.MT_EMC_TASK_PLAN_OPEN)
else:
MKCommandExecute.__init__(self, TYPES.MT_EMC_TASK_PLAN_OPEN)
self.msg.emc_command_params.path = filename
class MKCommandTaskRun(MKCommand):
'''Command to start execution of the currently opened file - or to display its preview.'''
def __init__(self, preview, line=0):
if preview:
MKCommandPreview.__init__(self, TYPES.MT_EMC_TASK_PLAN_RUN)
else:
MKCommandExecute.__init__(self, TYPES.MT_EMC_TASK_PLAN_RUN)
self.msg.emc_command_params.line_number = line
self.preview = preview
def expectsResponses(self):
return not self.preview
class MKCommandTaskStep(MKCommandExecute):
'''Command to execute a single step of the current task (from its current line).'''
def __init__(self):
MKCommandExecute.__init__(self, TYPES.MT_EMC_TASK_PLAN_STEP)
class MKCommandTaskPause(MKCommandExecute):
'''Command to pause execution of the current task.'''
def __init__(self):
MKCommandExecute.__init__(self, TYPES.MT_EMC_TASK_PLAN_PAUSE)
class MKCommandTaskResume(MKCommandExecute):
'''Command to resume a currently paused task.'''
def __init__(self):
MKCommandExecute.__init__(self, TYPES.MT_EMC_TASK_PLAN_RESUME)
class MKCommandTaskReset(MKCommandExecute):
'''Command to reset task execution. This clears any paused state and resets progress to line 0.'''
def __init__(self, preview):
if preview:
MKCommandPreview.__init__(self, TYPES.MT_EMC_TASK_PLAN_INIT)
else:
MKCommandExecute.__init__(self, TYPES.MT_EMC_TASK_PLAN_INIT)
class MKCommandAxisHome(MKCommand):
'''Command to initiate homing (or unhoming) of a gifen axis.
The homing itself is done by MK without any need of interaction. The staging and sequencing
of homing multiple axes has to be orchestrated by the UI though.'''
def __init__(self, index, home=True):
MKCommand.__init__(self, TYPES.MT_EMC_AXIS_HOME if home else TYPES.MT_EMC_AXIS_UNHOME)
self.msg.emc_command_params.index = index
def __str__(self):
return "MKCommandAxisHome[%d]" % (self.msg.emc_command_params.index)
class MKCommandTaskExecute(MKCommandExecute):
'''Command for executing arbitrary commands and command sequences.'''
def __init__(self, cmd):
MKCommandExecute.__init__(self, TYPES.MT_EMC_TASK_PLAN_EXECUTE)
self.msg.emc_command_params.command = cmd
class MKCommandTaskSetMode(MKCommandExecute):
'''Command to set a specific task mode. Valid modes are:
* STATUS.EmcTaskModeType.EMC_TASK_MODE_AUTO ... required for the execute interpreter to take control
* STATUS.EmcTaskModeType.EMC_TASK_MODE_MDI ... required to issue individual g-code commands
* STATUS.EmcTaskModeType.EMC_TASK_MODE_MANUAL ... required for jogging
'''
def __init__(self, mode):
MKCommandExecute.__init__(self, TYPES.MT_EMC_TASK_SET_MODE)
self.msg.emc_command_params.task_mode = mode
class MKCommandTaskAbort(MKCommandExecute):
'''Command to abort the current task.'''
def __init__(self):
MKCommandExecute.__init__(self, TYPES.MT_EMC_TASK_ABORT)
class MKCommandAxisAbort(MKCommandExecute):
'''Command to abort the current axis command - mostly used to stop the active jogging command.'''
def __init__(self, index):
MKCommandExecute.__init__(self, TYPES.MT_EMC_AXIS_ABORT)
self.msg.emc_command_params.index = index
class MKCommandAxisJog(MKCommandExecute):
'''Command to initiate jogging.
There are two different types of jog, distance and incremental.
Incremental jogging initiates the jog which will continue until either
a new jog command is sent or a MKCommandAxisAbort command is sent. This puts some
requirements on the UI's reliability and capability of sending that termination
command.
Distance jogging is marginally safer because MK will silently ignore
a distance jog if it exceeds the axis' limit. There is no indication that the
command was not executed and the tool is still at the same position as it was
before, making the next jog a risky manouver. This is important for scripted jog
sequences like a contour around the tasks boundaries.
It is the UI's responsibility to extract proper values for velocity and distance.
'''
def __init__(self, index, velocity, distance = None):
self.index = index
self.velocity = velocity
self.distance = distance
if distance is None:
MKCommandExecute.__init__(self, TYPES.MT_EMC_AXIS_JOG)
else:
MKCommandExecute.__init__(self, TYPES.MT_EMC_AXIS_INCR_JOG)
self.msg.emc_command_params.distance = distance
self.msg.emc_command_params.index = index
self.msg.emc_command_params.velocity = velocity
def __str__(self):
if self.distance:
return "AxisJog(%d, %.2f, %.2f)" % (self.index, self.velocity, self.distance)
return "AxisJog(%d, %.2f, -)" % (self.index, self.velocity)
class MKCommandTrajSetScale(MKCommand):
'''Command to overwrite the feed rate or rapid speed of the tool bit. scale is a multiplier of the configured speed.'''
def __init__(self, scale, rapid=False):
if rapid:
MKCommand.__init__(self, TYPES.MT_EMC_TRAJ_SET_RAPID_SCALE)
else:
MKCommand.__init__(self, TYPES.MT_EMC_TRAJ_SET_SCALE)
self.msg.emc_command_params.scale = scale
| 42.235294 | 123 | 0.714378 | 8,936 | 0.95736 | 0 | 0 | 0 | 0 | 0 | 0 | 3,582 | 0.383758 |
ac95238747ed70d5ab97026ac30760503ed60208 | 24,199 | py | Python | src/testing/TestON/bin/nec.py | securedataplane/preacher | 2f76581de47036e79cd6e1183948c88b35ce4950 | [
"MIT"
] | 1 | 2020-07-23T08:06:44.000Z | 2020-07-23T08:06:44.000Z | src/testing/TestON/bin/nec.py | securedataplane/preacher | 2f76581de47036e79cd6e1183948c88b35ce4950 | [
"MIT"
] | null | null | null | src/testing/TestON/bin/nec.py | securedataplane/preacher | 2f76581de47036e79cd6e1183948c88b35ce4950 | [
"MIT"
] | null | null | null | class NEC:
def __init__( self ):
self.prompt = '(.*)'
self.timeout = 60
def show(self, *options, **def_args ):
'''Possible Options :[' access-filter ', ' accounting ', ' acknowledgments ', ' auto-config ', ' axrp ', ' cfm ', ' channel-group ', ' clock ', ' config-lock-status ', ' cpu ', ' dhcp ', ' dot1x ', ' dumpfile ', ' efmoam ', ' environment ', ' file ', ' flash ', ' gsrp ', ' history ', ' igmp-snooping ', ' interfaces ', ' ip ', ' ip-dual ', ' ipv6-dhcp ', ' license ', ' lldp ', ' logging ', ' loop-detection ', ' mac-address-table ', ' mc ', ' memory ', ' mld-snooping ', ' netconf ', ' netstat ', ' ntp ', ' oadp ', ' openflow ', ' port ', ' power ', ' processes ', ' qos ', ' qos-flow ', ' sessions ', ' sflow ', ' spanning-tree ', ' ssh ', ' system ', ' tcpdump ', ' tech-support ', ' track ', ' version ', ' vlan ', ' vrrpstatus ', ' whoami ']'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_ip(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show ip "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_mc(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show mc "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_cfm(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show cfm "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_ntp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show ntp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_ssh(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show ssh "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_qos(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show qos "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_cpu(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show cpu "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_vlan(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show vlan "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_lldp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show lldp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_dhcp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show dhcp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_axrp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show axrp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_oadp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show oadp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_gsrp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show gsrp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_port(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show port "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_file(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show file "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_power(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show power "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_clock(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show clock "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_dot1x(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show dot1x "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_sflow(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show sflow "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_track(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show track "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_flash(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show flash "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_system(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show system "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_whoami(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show whoami "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_efmoam(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show efmoam "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_memory(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show memory "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_tcpdump(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show tcpdump "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_history(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show history "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_logging(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show logging "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_license(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show license "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_netstat(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show netstat "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_version(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show version "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_netconf(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show netconf "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_ipdual(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show ip-dual "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_sessions(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show sessions "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_qosflow(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show qos-flow "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_openflow(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show openflow "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_dumpfile(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show dumpfile "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_ipv6dhcp(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show ipv6-dhcp "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_processes(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show processes "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_vrrpstatus(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show vrrpstatus "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_interfaces(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show interfaces "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_environment(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show environment "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_autoconfig(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show auto-config "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_techsupport(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show tech-support "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_mldsnooping(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show mld-snooping "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_igmpsnooping(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show igmp-snooping "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_channelgroup(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show channel-group "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_spanningtree(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show spanning-tree "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_loopdetection(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show loop-detection "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_acknowledgments(self, *options, **def_args ):
'''Possible Options :[' interface ']'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show acknowledgments "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_macaddresstable(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show mac-address-table "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_configlockstatus(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show config-lock-status "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
def show_acknowledgments_interface(self, *options, **def_args ):
'''Possible Options :[]'''
arguments= ''
for option in options:
arguments = arguments + option +' '
prompt = def_args.setdefault('prompt',self.prompt)
timeout = def_args.setdefault('timeout',self.timeout)
self.execute( cmd= "show acknowledgments interface "+ arguments, prompt = prompt, timeout = timeout )
return main.TRUE
| 44.320513 | 867 | 0.592793 | 24,197 | 0.999917 | 0 | 0 | 0 | 0 | 0 | 0 | 4,498 | 0.185875 |
ac9572d83ed99fc10622f825cbd2ad1d0bacc722 | 1,094 | py | Python | main.py | StewartNZ/OpenCVPython_ObjectDectection_Tut_1 | bce18fded0da0e918d6e63c9fc0c469f280c09fe | [
"MIT"
] | null | null | null | main.py | StewartNZ/OpenCVPython_ObjectDectection_Tut_1 | bce18fded0da0e918d6e63c9fc0c469f280c09fe | [
"MIT"
] | null | null | null | main.py | StewartNZ/OpenCVPython_ObjectDectection_Tut_1 | bce18fded0da0e918d6e63c9fc0c469f280c09fe | [
"MIT"
] | null | null | null | import cv2
threshold = 0.5 # Threshold to detect objects
cap = cv2.VideoCapture(0)
cap.set(3, 640)
cap.set(4, 480)
classNames = []
classFile = "coco.names"
with open(classFile, "rt") as f:
classNames = f.read().rstrip("\n").split("\n")
configPath = "ssd_mobilenet_v3_large_coco_2020_01_14.pbtxt"
weightsPath = "frozen_inference_graph.pb"
net = cv2.dnn_DetectionModel(weightsPath, configPath)
net.setInputSize(320, 320)
net.setInputScale(1.0 / 127.5)
net.setInputMean((127.5, 127.5, 127.5))
net.setInputSwapRB(True)
while True:
success, img = cap.read()
classIds, confs, bbox = net.detect(img, confThreshold=threshold)
print(classIds, bbox)
if len(classIds) != 0:
for classId, confidence, box in zip(classIds.flatten(), confs.flatten(), bbox):
cv2.rectangle(img, box, color=(0, 255, 0), thickness=2)
cv2.putText(img, classNames[classId - 1].upper() + " " + str(round(confidence*100, 2)),
(box[0] + 10, box[1] + 30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)
cv2.imshow("Output", img)
cv2.waitKey(1)
| 29.567568 | 99 | 0.657221 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.125229 |
ac9628c7fcfcaba0ff2db9009e0f62d4acc1ad33 | 4,302 | py | Python | custom_functions.py | JacopoMalatesta/letterboxd_30000 | 90df0d19cd4b5a24259a76a01780615cd6d51356 | [
"MIT"
] | null | null | null | custom_functions.py | JacopoMalatesta/letterboxd_30000 | 90df0d19cd4b5a24259a76a01780615cd6d51356 | [
"MIT"
] | null | null | null | custom_functions.py | JacopoMalatesta/letterboxd_30000 | 90df0d19cd4b5a24259a76a01780615cd6d51356 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from bs4 import BeautifulSoup
import re
import json
import numpy as np
import sys
sys.setrecursionlimit(10000)
# Write a function that parses HTML pages into BeautifulSoup objects
# In[2]:
def soupify(html):
soup = BeautifulSoup(html, 'lxml')
return soup
# Film id (we need to download this again to perform a join later on)
# In[3]:
def scrape_id(soup):
id = int(soup.find("div", class_="really-lazy-load").get("data-film-id"))
return id
# Film title
# In[4]:
def scrape_title(soup):
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
title = d['name']
return title
# Year
# In[5]:
def scrape_year(soup):
try:
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
year = int(d['releasedEvent'][0]['startDate'])
except:
return np.nan
else:
return year
# Director
# In[6]:
def scrape_director(soup):
try:
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
names = [director['name'] for director in d['director']]
names = ';'.join(names)
except:
return np.nan
else:
return names
# Cast
# In[7]:
def scrape_cast(soup):
try:
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
actors = [actor['name'] for actor in d['actors']]
actors = ';'.join(actors)
except:
return np.nan
else:
return actors
# Country
# In[8]:
def scrape_country(soup):
try:
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
countries_of_origin = [country['name'] for country in d['countryOfOrigin']]
countries_of_origin = ';'.join(countries_of_origin)
except:
return np.nan
else:
return countries_of_origin
# Genres
# In[9]:
def scrape_genre(soup):
try:
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
genre_names = ';'.join(d['genre'])
except:
return np.nan
else:
return genre_names
# Production company
# In[10]:
def scrape_production_company(soup):
try:
s = soup.find("script", {"type": "application/ld+json"}).string
s = s.replace('\n/* <![CDATA[ */\n', '').replace('\n/* ]]> */\n', '')
d = json.loads(s)
company_names = [company['name'] for company in d['productionCompany']]
company_names = ';'.join(company_names)
except:
return np.nan
else:
return company_names
# Runtime
# In[11]:
def scrape_runtime(soup):
try:
string = soup.find("p", class_="text-link text-footer").text
pattern = r"\d+"
runtime = int(re.findall(pattern, string)[0])
except:
return np.nan
else:
return runtime
# Languages
# In[12]:
def scrape_languages(soup):
try:
languages = [language.text for language in soup.find_all("a", href = re.compile("language"))]
languages = ';'.join(languages)
except:
return np.nan
else:
return languages
# Alternative titles
# In[13]:
def scrape_alt_titles(soup):
try:
alt_titles = soup.find("div", class_ = "text-indentedlist").find("p").text
alt_titles = alt_titles.replace("\n", "").replace("\t", "")
except:
return np.nan
else:
return alt_titles
# People
# In[14]:
def scrape_people(soup, role):
try:
people = [person.text for person in soup.find_all("a", class_="text-slug", href = re.compile(role))]
people = ';'.join(people)
except:
return np.nan
else:
return people
| 19.824885 | 108 | 0.552069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,213 | 0.281962 |
ac966bdc85252514fe041fc3a7a1ee98e28774c4 | 2,651 | py | Python | girder/molecules/molecules/models/geometry.py | bnmajor/mongochemserver | aa76ab6e7f749c3e893f27e208984b6ed2d4b2b5 | [
"BSD-3-Clause"
] | 14 | 2015-05-04T16:40:48.000Z | 2021-07-13T08:00:30.000Z | girder/molecules/molecules/models/geometry.py | bnmajor/mongochemserver | aa76ab6e7f749c3e893f27e208984b6ed2d4b2b5 | [
"BSD-3-Clause"
] | 88 | 2015-07-24T07:58:43.000Z | 2021-02-23T19:37:13.000Z | girder/molecules/molecules/models/geometry.py | bnmajor/mongochemserver | aa76ab6e7f749c3e893f27e208984b6ed2d4b2b5 | [
"BSD-3-Clause"
] | 8 | 2015-06-12T20:54:39.000Z | 2021-04-09T01:07:15.000Z | from bson.objectid import ObjectId
from girder.models.model_base import AccessControlledModel
from girder.constants import AccessType
from molecules.models.molecule import Molecule as MoleculeModel
from molecules.utilities.get_cjson_energy import get_cjson_energy
from molecules.utilities.pagination import parse_pagination_params
from molecules.utilities.pagination import search_results_dict
from molecules.utilities.whitelist_cjson import whitelist_cjson
class Geometry(AccessControlledModel):
def __init__(self):
super(Geometry, self).__init__()
def initialize(self):
self.name = 'geometry'
self.ensureIndex('moleculeId')
self.exposeFields(level=AccessType.READ, fields=(
'_id', 'moleculeId', 'cjson', 'provenanceType', 'provenanceId'))
def validate(self, doc):
# If we have a moleculeId ensure it is valid.
if 'moleculeId' in doc:
mol = MoleculeModel().load(doc['moleculeId'], force=True)
doc['moleculeId'] = mol['_id']
return doc
def create(self, user, moleculeId, cjson, provenanceType=None,
provenanceId=None, public=True):
# We will whitelist the cjson to only include the geometry parts
geometry = {
'moleculeId': moleculeId,
'cjson': whitelist_cjson(cjson),
'creatorId': user['_id']
}
if provenanceType is not None:
geometry['provenanceType'] = provenanceType
if provenanceId is not None:
geometry['provenanceId'] = provenanceId
# If the cjson has an energy, set it
energy = get_cjson_energy(cjson)
if energy is not None:
geometry['energy'] = energy
self.setUserAccess(geometry, user=user, level=AccessType.ADMIN)
if public:
self.setPublic(geometry, True)
return self.save(geometry)
def find_geometries(self, moleculeId, user, paging_params):
limit, offset, sort = parse_pagination_params(paging_params)
query = {
'moleculeId': ObjectId(moleculeId)
}
fields = [
'creatorId',
'moleculeId',
'provenanceId',
'provenanceType',
'energy'
]
cursor = self.findWithPermissions(query, user=user, fields=fields,
limit=limit, offset=offset,
sort=sort)
num_matches = cursor.collection.count_documents(query)
geometries = [x for x in cursor]
return search_results_dict(geometries, num_matches, limit, offset, sort)
| 31.939759 | 80 | 0.634478 | 2,189 | 0.825726 | 0 | 0 | 0 | 0 | 0 | 0 | 408 | 0.153904 |
ac97d43733100708fa2b0d168201fd6612736104 | 2,185 | py | Python | GearC/material.py | cfernandesFEUP/Gear-Calculation | c15249c23f97e1168e3316ad5e27ed747758353a | [
"Unlicense"
] | 3 | 2020-09-01T13:19:10.000Z | 2021-12-13T13:59:00.000Z | GearC/material.py | cfernandesFEUP/Gear-Calculation | c15249c23f97e1168e3316ad5e27ed747758353a | [
"Unlicense"
] | null | null | null | GearC/material.py | cfernandesFEUP/Gear-Calculation | c15249c23f97e1168e3316ad5e27ed747758353a | [
"Unlicense"
] | null | null | null | ## LIBRARY OF MATERIALS #######################################################
def matp(mat, Tbulk, NL):
import numpy as np
E, v, cpg, kg, rohg, sigmaHlim, sigmaFlim = [np.zeros(2) for _ in range(7)]
for i in range(len(E)):
if mat[i] == 'POM':
E[i] = 3.2e9 # 2900 MPa (min) - 3500 MPa (max)
v[i] = 0.35
cpg[i] = 1465 # J/kg.K
kg[i] = 0.3 # W/m.K (0.23 (min) 0.37 (max))
rohg[i] = 1415 # 1410 (min) - 1420 (max)
sigmaHlim[i] = 36 - 0.0012*Tbulk**2 + (1000 - 0.025*Tbulk**2)*NL** - 0.21
sigmaFlim[i] = 26 - 0.0025*Tbulk**2 + 400*NL** - 0.2
elif mat[i] == 'PEEK':
E[i] = 3.65e9
v[i] = 0.38
cpg[i] = 1472 # 1443 - 1501
kg[i] = 0.25 # W/m.K
rohg[i] = 1320
sigmaHlim[i] = 36 - 0.0012*Tbulk**2 + (1000 - 0.025*Tbulk**2)*NL** - 0.21 # Nylon (PA66)
sigmaFlim[i] = 30 - 0.22*Tbulk + (4600 - 900*Tbulk**0.3)*NL**( - 1/3) # Nylon (PA66)
elif mat[i] == 'PA66':
E[i] = 1.85e9 # 1700 MPa (min) - 2000 MPa (max)
v[i] = 0.3 # 0.25 - 0.35
cpg[i] = 1670 # J/kg.K
kg[i] = 0.26 # W/m.K (0.25 (min) 0.27 (max))
rohg[i] = 1140 # 1130 (min) - 1150 (max))
sigmaHlim[i] = 36 - 0.0012*Tbulk**2 + (1000 - 0.025*Tbulk**2)*NL** - 0.21
sigmaFlim[i] = 30 - 0.22*Tbulk + (4600 - 900*Tbulk**0.3)*NL**( - 1/3)
elif mat[i] == 'ADI':
E[i] = 210e9
v[i] = 0.26 # 0.22 (min) 0.30 (max)
cpg[i] = 460.548
kg[i] = 55 # W/m.K
rohg[i] = 7850
sigmaHlim[i] = 1500
sigmaFlim[i] = 430
elif mat[i] == 'STEEL':
E[i] = 206e9
v[i] = 0.3 # 0.22 (min) 0.30 (max)
cpg[i] = 465
kg[i] = 46 # W/m.K
rohg[i] = 7830
sigmaHlim[i] = 1500
sigmaFlim[i] = 430
return E, v, cpg, kg, rohg, sigmaHlim, sigmaFlim
| 46.489362 | 101 | 0.381236 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 450 | 0.20595 |
ac9916de49ddb625f43df3193b1ae53742dad7ca | 802 | py | Python | crawler/browser/javascript.py | CvvT/crawler_sqlmap | 8cca8e8386e4b2160e2064a510b47c1202acdf39 | [
"Apache-2.0"
] | 9 | 2017-06-23T08:57:45.000Z | 2022-03-16T16:10:10.000Z | crawler/browser/javascript.py | CvvT/crawler_sqlmap | 8cca8e8386e4b2160e2064a510b47c1202acdf39 | [
"Apache-2.0"
] | 4 | 2017-06-27T10:16:52.000Z | 2018-08-24T00:47:07.000Z | crawler/browser/javascript.py | CvvT/crawler_sqlmap | 8cca8e8386e4b2160e2064a510b47c1202acdf39 | [
"Apache-2.0"
] | 3 | 2017-06-23T08:57:50.000Z | 2021-02-02T05:58:09.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = 'CwT'
# document.body.innerHTML += '<form id="dynForm" action="http://example.com/" method="post">
# <input type="hidden" name="q" value="a"></form>';
# document.getElementById("dynForm").submit();
POST_JS = '<form id=\\"dynamicform\\" action=\\"%s\\" method=\\"post\\">%s</form>'
INPUT_JS = '<input type=\\"hidden\\" name=\\"%s\\" value=%s>'
EXECUTE_JS = 'document.body.innerHTML = "%s"; document.getElementById("dynamicform").submit();'
def post_js(url, data):
input = ""
for key, value in data.items():
if isinstance(value, int):
input += INPUT_JS % (key, str(value))
else:
input += INPUT_JS % (key, "\\\"%s\\\"" % value)
form = POST_JS % (url, input)
return EXECUTE_JS % form
| 34.869565 | 95 | 0.587282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 472 | 0.588529 |
ac9affe885fc3efd2cfe8bac23b5b5b2bbdd9cfc | 13,297 | py | Python | vtpl_api/models/engine_task.py | vtpl1/vtpl_api | d289c92254deb040de925205c583de69802a1c6b | [
"MIT"
] | null | null | null | vtpl_api/models/engine_task.py | vtpl1/vtpl_api | d289c92254deb040de925205c583de69802a1c6b | [
"MIT"
] | null | null | null | vtpl_api/models/engine_task.py | vtpl1/vtpl_api | d289c92254deb040de925205c583de69802a1c6b | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Engine api
Engine APIs # noqa: E501
The version of the OpenAPI document: 1.0.4
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class EngineTask(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'str',
'capbilities_type': 'Capability',
'event_type': 'EventType',
'engine_machine_id': 'str',
'is_expired': 'bool',
'time_to_live': 'int',
'source': 'SourceEndPoint',
'destination': 'DestinationEndPoint',
'zone_setting': 'EngineTaskZoneSetting',
'line_setting': 'EngineTaskLineSetting',
'config': 'list[Config]',
'updated': 'datetime',
'created': 'datetime',
'etag': 'str',
'links': 'Links'
}
attribute_map = {
'id': '_id',
'capbilities_type': 'capbilitiesType',
'event_type': 'eventType',
'engine_machine_id': 'engineMachineId',
'is_expired': 'isExpired',
'time_to_live': 'timeToLive',
'source': 'source',
'destination': 'destination',
'zone_setting': 'zoneSetting',
'line_setting': 'lineSetting',
'config': 'config',
'updated': 'updated',
'created': 'created',
'etag': 'etag',
'links': 'links'
}
def __init__(self, id=None, capbilities_type=None, event_type=None, engine_machine_id=None, is_expired=False, time_to_live=-1, source=None, destination=None, zone_setting=None, line_setting=None, config=None, updated=None, created=None, etag=None, links=None): # noqa: E501
"""EngineTask - a model defined in OpenAPI""" # noqa: E501
self._id = None
self._capbilities_type = None
self._event_type = None
self._engine_machine_id = None
self._is_expired = None
self._time_to_live = None
self._source = None
self._destination = None
self._zone_setting = None
self._line_setting = None
self._config = None
self._updated = None
self._created = None
self._etag = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if capbilities_type is not None:
self.capbilities_type = capbilities_type
if event_type is not None:
self.event_type = event_type
if engine_machine_id is not None:
self.engine_machine_id = engine_machine_id
if is_expired is not None:
self.is_expired = is_expired
if time_to_live is not None:
self.time_to_live = time_to_live
if source is not None:
self.source = source
if destination is not None:
self.destination = destination
if zone_setting is not None:
self.zone_setting = zone_setting
if line_setting is not None:
self.line_setting = line_setting
if config is not None:
self.config = config
if updated is not None:
self.updated = updated
if created is not None:
self.created = created
if etag is not None:
self.etag = etag
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this EngineTask. # noqa: E501
:return: The id of this EngineTask. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this EngineTask.
:param id: The id of this EngineTask. # noqa: E501
:type: str
"""
self._id = id
@property
def capbilities_type(self):
"""Gets the capbilities_type of this EngineTask. # noqa: E501
:return: The capbilities_type of this EngineTask. # noqa: E501
:rtype: Capability
"""
return self._capbilities_type
@capbilities_type.setter
def capbilities_type(self, capbilities_type):
"""Sets the capbilities_type of this EngineTask.
:param capbilities_type: The capbilities_type of this EngineTask. # noqa: E501
:type: Capability
"""
self._capbilities_type = capbilities_type
@property
def event_type(self):
"""Gets the event_type of this EngineTask. # noqa: E501
:return: The event_type of this EngineTask. # noqa: E501
:rtype: EventType
"""
return self._event_type
@event_type.setter
def event_type(self, event_type):
"""Sets the event_type of this EngineTask.
:param event_type: The event_type of this EngineTask. # noqa: E501
:type: EventType
"""
self._event_type = event_type
@property
def engine_machine_id(self):
"""Gets the engine_machine_id of this EngineTask. # noqa: E501
:return: The engine_machine_id of this EngineTask. # noqa: E501
:rtype: str
"""
return self._engine_machine_id
@engine_machine_id.setter
def engine_machine_id(self, engine_machine_id):
"""Sets the engine_machine_id of this EngineTask.
:param engine_machine_id: The engine_machine_id of this EngineTask. # noqa: E501
:type: str
"""
self._engine_machine_id = engine_machine_id
@property
def is_expired(self):
"""Gets the is_expired of this EngineTask. # noqa: E501
Explanations: * true = Engines will NEVER execute this task * false = Engines will execute this task # noqa: E501
:return: The is_expired of this EngineTask. # noqa: E501
:rtype: bool
"""
return self._is_expired
@is_expired.setter
def is_expired(self, is_expired):
"""Sets the is_expired of this EngineTask.
Explanations: * true = Engines will NEVER execute this task * false = Engines will execute this task # noqa: E501
:param is_expired: The is_expired of this EngineTask. # noqa: E501
:type: bool
"""
self._is_expired = is_expired
@property
def time_to_live(self):
"""Gets the time_to_live of this EngineTask. # noqa: E501
Time in milliseconds of expiry or the task. Engines will not execute an expired task. Explanations: * -1 = Never expires * -2 = Expired * 0 = Will expire in 0 milliseconds * >0 = milliseconds till expiry # noqa: E501
:return: The time_to_live of this EngineTask. # noqa: E501
:rtype: int
"""
return self._time_to_live
@time_to_live.setter
def time_to_live(self, time_to_live):
"""Sets the time_to_live of this EngineTask.
Time in milliseconds of expiry or the task. Engines will not execute an expired task. Explanations: * -1 = Never expires * -2 = Expired * 0 = Will expire in 0 milliseconds * >0 = milliseconds till expiry # noqa: E501
:param time_to_live: The time_to_live of this EngineTask. # noqa: E501
:type: int
"""
self._time_to_live = time_to_live
@property
def source(self):
"""Gets the source of this EngineTask. # noqa: E501
:return: The source of this EngineTask. # noqa: E501
:rtype: SourceEndPoint
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this EngineTask.
:param source: The source of this EngineTask. # noqa: E501
:type: SourceEndPoint
"""
self._source = source
@property
def destination(self):
"""Gets the destination of this EngineTask. # noqa: E501
:return: The destination of this EngineTask. # noqa: E501
:rtype: DestinationEndPoint
"""
return self._destination
@destination.setter
def destination(self, destination):
"""Sets the destination of this EngineTask.
:param destination: The destination of this EngineTask. # noqa: E501
:type: DestinationEndPoint
"""
self._destination = destination
@property
def zone_setting(self):
"""Gets the zone_setting of this EngineTask. # noqa: E501
:return: The zone_setting of this EngineTask. # noqa: E501
:rtype: EngineTaskZoneSetting
"""
return self._zone_setting
@zone_setting.setter
def zone_setting(self, zone_setting):
"""Sets the zone_setting of this EngineTask.
:param zone_setting: The zone_setting of this EngineTask. # noqa: E501
:type: EngineTaskZoneSetting
"""
self._zone_setting = zone_setting
@property
def line_setting(self):
"""Gets the line_setting of this EngineTask. # noqa: E501
:return: The line_setting of this EngineTask. # noqa: E501
:rtype: EngineTaskLineSetting
"""
return self._line_setting
@line_setting.setter
def line_setting(self, line_setting):
"""Sets the line_setting of this EngineTask.
:param line_setting: The line_setting of this EngineTask. # noqa: E501
:type: EngineTaskLineSetting
"""
self._line_setting = line_setting
@property
def config(self):
"""Gets the config of this EngineTask. # noqa: E501
:return: The config of this EngineTask. # noqa: E501
:rtype: list[Config]
"""
return self._config
@config.setter
def config(self, config):
"""Sets the config of this EngineTask.
:param config: The config of this EngineTask. # noqa: E501
:type: list[Config]
"""
self._config = config
@property
def updated(self):
"""Gets the updated of this EngineTask. # noqa: E501
:return: The updated of this EngineTask. # noqa: E501
:rtype: datetime
"""
return self._updated
@updated.setter
def updated(self, updated):
"""Sets the updated of this EngineTask.
:param updated: The updated of this EngineTask. # noqa: E501
:type: datetime
"""
self._updated = updated
@property
def created(self):
"""Gets the created of this EngineTask. # noqa: E501
:return: The created of this EngineTask. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this EngineTask.
:param created: The created of this EngineTask. # noqa: E501
:type: datetime
"""
self._created = created
@property
def etag(self):
"""Gets the etag of this EngineTask. # noqa: E501
:return: The etag of this EngineTask. # noqa: E501
:rtype: str
"""
return self._etag
@etag.setter
def etag(self, etag):
"""Sets the etag of this EngineTask.
:param etag: The etag of this EngineTask. # noqa: E501
:type: str
"""
self._etag = etag
@property
def links(self):
"""Gets the links of this EngineTask. # noqa: E501
:return: The links of this EngineTask. # noqa: E501
:rtype: Links
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this EngineTask.
:param links: The links of this EngineTask. # noqa: E501
:type: Links
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EngineTask):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.644491 | 278 | 0.590885 | 13,074 | 0.983229 | 0 | 0 | 8,045 | 0.605024 | 0 | 0 | 7,002 | 0.526585 |
ac9bf1bc57608adb9efd17714313aca04098cd39 | 1,876 | py | Python | scripts/05_skew.py | fabiobaccarin/allstate-loss | 4c0fea6ea28847fc67b9e742dc3ac30e1ac4d10a | [
"MIT"
] | null | null | null | scripts/05_skew.py | fabiobaccarin/allstate-loss | 4c0fea6ea28847fc67b9e742dc3ac30e1ac4d10a | [
"MIT"
] | null | null | null | scripts/05_skew.py | fabiobaccarin/allstate-loss | 4c0fea6ea28847fc67b9e742dc3ac30e1ac4d10a | [
"MIT"
] | null | null | null | """
Analyses skewness for continuous features
Options:
A. Log
B. Yeo-Johnson
C. QuantileTransformer
"""
import json
import pandas as pd
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import power_transform, quantile_transform
from pathlib import Path
p = Path(__file__).parents[1]
# To load project modules
import sys; sys.path.append(str(p))
from src.logger import LOGGER
from src.utils import skewTest
LOGGER.info('Load data')
X = pd.read_pickle(p.joinpath('data', 'interim', 'research.pkl')).filter(like='cont')
LOGGER.info('Process data - Logarithm')
A = (
pd.DataFrame(X.apply(skewTest, args=(np.log1p,)).to_list())
.assign(Transformation='Logarithm')
.set_index('Transformation')
)
LOGGER.info('Process data - Yeo-Johnson')
B = (
pd.DataFrame(
X.apply(lambda s: skewTest(np.reshape(s.values, (-1, 1)), power_transform))
.to_list()
)
.apply(lambda s: s.explode().astype(float))
.assign(Transformation='Yeo-Johnson')
.set_index('Transformation')
)
LOGGER.info('Process data - Quantile Transform')
C = (
pd.DataFrame(
X.apply(lambda s: skewTest(
np.reshape(s.values, (-1, 1)),
quantile_transform,
output_distribution='normal',
random_state=0
))
.to_list()
)
.apply(lambda s: s.explode().astype(float))
.assign(Transformation='Quantile Transform')
.set_index('Transformation')
)
LOGGER.info('Computing result')
(
pd.concat([A, B, C]).reset_index().groupby('Transformation').mean()
.assign(CostEffectivenessRatio=lambda df: df['Time'].div(df['Insignificance']))
.sort_values('CostEffectivenessRatio')
.to_html(
buf=p.joinpath('reports', 'tables', '02ContTransformations.html'),
float_format='{:.2f}'.format,
bold_rows=False
)
) | 26.055556 | 85 | 0.666844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 509 | 0.271322 |
ac9c868dbd3879029efdc40e10b101e58e412a82 | 7,486 | py | Python | trakt/mapper/sync.py | ruinernin/trakt.py | a579b0adf589e1fb8efe11d1e9b5c4f6fbb78157 | [
"MIT"
] | null | null | null | trakt/mapper/sync.py | ruinernin/trakt.py | a579b0adf589e1fb8efe11d1e9b5c4f6fbb78157 | [
"MIT"
] | null | null | null | trakt/mapper/sync.py | ruinernin/trakt.py | a579b0adf589e1fb8efe11d1e9b5c4f6fbb78157 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
from trakt.mapper.core.base import Mapper
import logging
log = logging.getLogger(__name__)
class SyncMapper(Mapper):
@classmethod
def process(cls, client, store, items, media=None, flat=False, **kwargs):
if flat:
# Return flat item iterator
return cls.iterate_items(
client, store, items, cls.item,
media=media,
**kwargs
)
return cls.map_items(
client, store, items, cls.item,
media=media,
**kwargs
)
@classmethod
def item(cls, client, store, item, media=None, **kwargs):
i_type = item.get('type') or media
# Find item type function
if i_type.startswith('movie'):
func = cls.movie
elif i_type.startswith('show'):
func = cls.show
elif i_type.startswith('season'):
func = cls.season
elif i_type.startswith('episode'):
func = cls.episode
else:
raise ValueError('Unknown item type: %r' % i_type)
# Map item
return func(
client, store, item,
**kwargs
)
#
# Movie
#
@classmethod
def movies(cls, client, store, items, **kwargs):
return cls.map_items(client, store, items, cls.movie, **kwargs)
@classmethod
def movie(cls, client, store, item, **kwargs):
movie = cls.map_item(client, store, item, 'movie', **kwargs)
# Update with root info
if 'movie' in item:
movie._update(item)
return movie
#
# Show
#
@classmethod
def shows(cls, client, store, items, **kwargs):
return cls.map_items(client, store, items, cls.show, **kwargs)
@classmethod
def show(cls, client, store, item, **kwargs):
show = cls.map_item(client, store, item, 'show', **kwargs)
# Update with root info
if 'show' in item:
show._update(item)
# Process any episodes in the item
for i_season in item.get('seasons', []):
season_num = i_season.get('number')
season = cls.show_season(client, show, season_num, **kwargs)
for i_episode in i_season.get('episodes', []):
episode_num = i_episode.get('number')
cls.show_episode(client, season, episode_num, i_episode, **kwargs)
return show
@classmethod
def show_season(cls, client, show, season_num, item=None, **kwargs):
season = cls.map_item(client, show.seasons, item, 'season', key=season_num, parent=show, **kwargs)
season.show = show
# Update with root info
if item and 'season' in item:
season._update(item)
return season
@classmethod
def show_episode(cls, client, season, episode_num, item=None, **kwargs):
episode = cls.map_item(
client, season.episodes, item, 'episode',
key=episode_num,
parent=season,
**kwargs
)
episode.show = season.show
episode.season = season
# Update with root info
if item and 'episode' in item:
episode._update(item)
return episode
#
# Season
#
@classmethod
def seasons(cls, client, store, items, **kwargs):
return cls.map_items(client, store, items, cls.season, **kwargs)
@classmethod
def season(cls, client, store, item, **kwargs):
i_season = item.get('season', {})
season_num = i_season.get('number')
# Build `show`
show = cls.show(client, store, item['show'])
if show is None:
# Unable to create show
return None
# Build `season`
season = cls.show_season(client, show, season_num, item, **kwargs)
return season
#
# Episode
#
@classmethod
def episodes(cls, client, store, items, **kwargs):
return cls.map_items(client, store, items, cls.episode, **kwargs)
@classmethod
def episode(cls, client, store, item, append=False, **kwargs):
i_episode = item.get('episode', {})
season_num = i_episode.get('season')
episode_num = i_episode.get('number')
# Build `show`
show = cls.show(client, store, item['show'])
if show is None:
# Unable to create show
return None
# Build `season`
season = cls.show_season(
client, show, season_num,
**kwargs
)
# Build `episode`
episode = cls.show_episode(
client, season, episode_num, item,
append=append,
**kwargs
)
return episode
#
# Helpers
#
@classmethod
def map_items(cls, client, store, items, func, **kwargs):
if store is None:
store = {}
for item in items:
result = func(
client, store, item,
**kwargs
)
if result is None:
log.warning('Unable to map item: %s', item)
return store
@classmethod
def iterate_items(cls, client, store, items, func, **kwargs):
if store is None:
store = {}
if 'movies' not in store:
store['movies'] = {}
if 'shows' not in store:
store['shows'] = {}
if 'seasons' not in store:
store['seasons'] = {}
if 'episodes' not in store:
store['episodes'] = {}
for item in items:
i_type = item.get('type')
if i_type == 'movie':
i_store = store['movies']
elif i_type == 'show':
i_store = store['shows']
elif i_type == 'season':
i_store = store['seasons']
elif i_type == 'episode':
i_store = store['episodes']
else:
raise ValueError('Unknown item type: %r' % i_type)
# Map item
result = func(
client, i_store, item,
append=True,
**kwargs
)
if result is None:
log.warning('Unable to map item: %s', item)
# Yield item in iterator
yield result
@classmethod
def map_item(cls, client, store, item, media, key=None, parent=None, append=False, **kwargs):
if item and media in item:
i_data = item[media]
else:
i_data = item
# Retrieve item key
pk, keys = cls.get_ids(media, i_data, parent=parent)
if key is not None:
pk = key
if not keys:
keys = [pk]
if pk is None:
# Item has no keys
return None
if store is None or pk not in store or append:
# Construct item
obj = cls.construct(client, media, i_data, keys, **kwargs)
if store is None:
return obj
# Update store
if append:
if pk in store:
store[pk].append(obj)
else:
store[pk] = [obj]
else:
store[pk] = obj
return obj
else:
# Update existing item
store[pk]._update(i_data, **kwargs)
return store[pk]
| 25.462585 | 106 | 0.520438 | 7,324 | 0.97836 | 1,165 | 0.155624 | 7,081 | 0.945899 | 0 | 0 | 899 | 0.120091 |
ac9d0e2c93daef472402da87cdc770fca25ab226 | 669 | py | Python | src/murus_gallicus/constants.py | HicBoux/PyMurusGallicus | b1f8ee6f1f1100aa6e04b11db6522b86b14d1c72 | [
"MIT"
] | null | null | null | src/murus_gallicus/constants.py | HicBoux/PyMurusGallicus | b1f8ee6f1f1100aa6e04b11db6522b86b14d1c72 | [
"MIT"
] | null | null | null | src/murus_gallicus/constants.py | HicBoux/PyMurusGallicus | b1f8ee6f1f1100aa6e04b11db6522b86b14d1c72 | [
"MIT"
] | null | null | null | import pygame
pygame.init()
FPS = 60
WIDTH, HEIGHT = 800, 700
ROWS, COLS = 7, 8
SQUARE_SIZE = HEIGHT//ROWS
# GRAPHICAL USER INTERFACE
ICON_PATH = './src/murus_gallicus/assets/noun_checkers_1684698.png'
WINDOW = pygame.display.set_mode((WIDTH, HEIGHT))
PADDING = 20
OUTLINE = 2
# RGB COLORS
RED = (255, 0, 0)
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
CLEAR_BLUE = (102, 178, 255)
BLUE = (0, 0, 255)
GREY = (128, 128, 128)
SOFT_YELLOW = (246, 233, 195)
SOFT_RED = (244, 129, 134)
CELTIC_GREEN = (1, 135, 73)
DARK_GREEN = (14, 79, 0)
SPQR_RED = (213, 28, 31)
DARK_RED = (140, 8, 2)
P_2_Minimax = "Player VS MiniMax AI"
P_2_P = "Player vs Player"
AI_MINIMAX_DEPTH = 3
| 20.272727 | 67 | 0.672646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 133 | 0.198804 |
ac9de99ae05f6016bcc3df5250b508a3167f4660 | 5,855 | py | Python | streamer/output_stream.py | meryacine/shaka-streamer | 7b7e90143f531c52d96c162cc7393862db0830b7 | [
"Apache-2.0"
] | 154 | 2019-08-29T16:53:24.000Z | 2022-02-25T00:29:56.000Z | streamer/output_stream.py | meryacine/shaka-streamer | 7b7e90143f531c52d96c162cc7393862db0830b7 | [
"Apache-2.0"
] | 101 | 2019-08-30T17:34:51.000Z | 2022-03-02T18:46:22.000Z | streamer/output_stream.py | meryacine/shaka-streamer | 7b7e90143f531c52d96c162cc7393862db0830b7 | [
"Apache-2.0"
] | 56 | 2019-09-08T17:47:22.000Z | 2022-02-23T17:35:11.000Z | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains information about each output stream."""
from streamer.bitrate_configuration import AudioCodec, AudioChannelLayout, VideoCodec, VideoResolution
from streamer.input_configuration import Input, MediaType
from streamer.pipe import Pipe
from typing import Dict, Union
class OutputStream(object):
"""Base class for output streams."""
def __init__(self,
type: MediaType,
input: Input,
codec: Union[AudioCodec, VideoCodec, None],
pipe_dir: str,
skip_transcoding: bool = False,
pipe_suffix: str = '') -> None:
self.type: MediaType = type
self.skip_transcoding = skip_transcoding
self.input: Input = input
self.features: Dict[str, str] = {}
self.codec: Union[AudioCodec, VideoCodec, None] = codec
if self.skip_transcoding:
# If skip_transcoding is specified, let the Packager read from a plain
# file instead of an IPC pipe.
self.ipc_pipe = Pipe.create_file_pipe(self.input.name, mode='r')
else:
self.ipc_pipe = Pipe.create_ipc_pipe(pipe_dir, pipe_suffix)
def is_hardware_accelerated(self) -> bool:
"""Returns True if this output stream uses hardware acceleration."""
if self.codec:
return self.codec.is_hardware_accelerated()
return False
def get_ffmpeg_codec_string(self, hwaccel_api: str) -> str:
"""Returns a codec string accepted by FFmpeg for this stream's codec."""
assert self.codec is not None
return self.codec.get_ffmpeg_codec_string(hwaccel_api)
def is_dash_only(self) -> bool:
"""Returns True if the output format is restricted to DASH protocol"""
if self.codec is not None:
return self.codec.get_output_format() == 'webm'
return False
def get_init_seg_file(self) -> Pipe:
INIT_SEGMENT = {
MediaType.AUDIO: 'audio_{language}_{channels}c_{bitrate}_{codec}_init.{format}',
MediaType.VIDEO: 'video_{resolution_name}_{bitrate}_{codec}_init.{format}',
MediaType.TEXT: 'text_{language}_init.{format}',
}
path_templ = INIT_SEGMENT[self.type].format(**self.features)
return Pipe.create_file_pipe(path_templ, mode='w')
def get_media_seg_file(self) -> Pipe:
MEDIA_SEGMENT = {
MediaType.AUDIO: 'audio_{language}_{channels}c_{bitrate}_{codec}_$Number$.{format}',
MediaType.VIDEO: 'video_{resolution_name}_{bitrate}_{codec}_$Number$.{format}',
MediaType.TEXT: 'text_{language}_$Number$.{format}',
}
path_templ = MEDIA_SEGMENT[self.type].format(**self.features)
return Pipe.create_file_pipe(path_templ, mode='w')
def get_single_seg_file(self) -> Pipe:
SINGLE_SEGMENT = {
MediaType.AUDIO: 'audio_{language}_{channels}c_{bitrate}_{codec}.{format}',
MediaType.VIDEO: 'video_{resolution_name}_{bitrate}_{codec}.{format}',
MediaType.TEXT: 'text_{language}.{format}',
}
path_templ = SINGLE_SEGMENT[self.type].format(**self.features)
return Pipe.create_file_pipe(path_templ, mode='w')
class AudioOutputStream(OutputStream):
def __init__(self,
input: Input,
pipe_dir: str,
codec: AudioCodec,
channel_layout: AudioChannelLayout) -> None:
super().__init__(MediaType.AUDIO, input, codec, pipe_dir)
# Override the codec type and specify that it's an audio codec
self.codec: AudioCodec = codec
self.layout = channel_layout
# The features that will be used to generate the output filename.
self.features = {
'language': input.language,
'channels': str(self.layout.max_channels),
'bitrate': self.get_bitrate(),
'format': self.codec.get_output_format(),
'codec': self.codec.value,
}
def get_bitrate(self) -> str:
"""Returns the bitrate for this stream."""
return self.layout.bitrates[self.codec]
class VideoOutputStream(OutputStream):
def __init__(self,
input: Input,
pipe_dir: str,
codec: VideoCodec,
resolution: VideoResolution) -> None:
super().__init__(MediaType.VIDEO, input, codec, pipe_dir)
# Override the codec type and specify that it's an audio codec
self.codec: VideoCodec = codec
self.resolution = resolution
# The features that will be used to generate the output filename.
self.features = {
'resolution_name': self.resolution.get_key(),
'bitrate': self.get_bitrate(),
'format': self.codec.get_output_format(),
'codec': self.codec.value,
}
def get_bitrate(self) -> str:
"""Returns the bitrate for this stream."""
return self.resolution.bitrates[self.codec]
class TextOutputStream(OutputStream):
def __init__(self,
input: Input,
pipe_dir: str,
skip_transcoding: bool):
# We don't have a codec per se for text, but we'd like to generically
# process OutputStream objects in ways that are easier with this attribute
# set, so set it to None.
codec = None
super().__init__(MediaType.TEXT, input, codec, pipe_dir,
skip_transcoding, pipe_suffix='.vtt')
# The features that will be used to generate the output filename.
self.features = {
'language': input.language,
'format': 'mp4',
}
| 35.920245 | 102 | 0.678907 | 4,990 | 0.852263 | 0 | 0 | 0 | 0 | 0 | 0 | 2,112 | 0.360717 |
ac9eba2ba9ce65d24ae5328dd6dc146fcdf58207 | 939 | py | Python | nghp/misc/utils.py | squoilin/NewGenerationHeatPumps | 6db23af4dbad2098043a74d1871dbcc6f8eee468 | [
"MIT"
] | 1 | 2021-04-12T08:17:59.000Z | 2021-04-12T08:17:59.000Z | nghp/misc/utils.py | squoilin/NewGenerationHeatPumps | 6db23af4dbad2098043a74d1871dbcc6f8eee468 | [
"MIT"
] | null | null | null | nghp/misc/utils.py | squoilin/NewGenerationHeatPumps | 6db23af4dbad2098043a74d1871dbcc6f8eee468 | [
"MIT"
] | 1 | 2020-08-06T16:56:34.000Z | 2020-08-06T16:56:34.000Z | # -*- coding: utf-8 -*-
"""
Useful functions for the Next Generation Heat Pump library
@author: Sylvain Quoilin
"""
import sys,os
class NoStdStreams(object):
'''
This class hides the std output of the executed function
usage:
with NoStdStreams()
pass
'''
def __init__(self,stdout = None, stderr = None):
self.devnull = open(os.devnull,'w')
self._stdout = stdout or self.devnull or sys.stdout
self._stderr = stderr or self.devnull or sys.stderr
def __enter__(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.old_stdout.flush(); self.old_stderr.flush()
sys.stdout, sys.stderr = self._stdout, self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush(); self._stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
self.devnull.close()
| 29.34375 | 65 | 0.642173 | 805 | 0.857295 | 0 | 0 | 0 | 0 | 0 | 0 | 246 | 0.261981 |
aca0934a713aae21b84794c1fbd0e3f6f9ca21ea | 829 | py | Python | regex_2.py | python-pro/Learn-Python-by-Doing-by-Jose-Espanol | 5a4a6a6843fec6bbe231900aa0053021c69649d9 | [
"BSD-3-Clause"
] | null | null | null | regex_2.py | python-pro/Learn-Python-by-Doing-by-Jose-Espanol | 5a4a6a6843fec6bbe231900aa0053021c69649d9 | [
"BSD-3-Clause"
] | null | null | null | regex_2.py | python-pro/Learn-Python-by-Doing-by-Jose-Espanol | 5a4a6a6843fec6bbe231900aa0053021c69649d9 | [
"BSD-3-Clause"
] | null | null | null | # B_R_R
# M_S_A_W
"""
Coding Problem on Regular expressions:
In order to work with regular expressions or we can say regex too,
we have to import its module which is called re
Specifically we use search method of regex library
search method returns match object if it finds any
"""
import re
price='Price: $12,065.99'
expression='Price: \$([0-9\,]*\.[0-9]*)'
matches=re.search(expression, price)
print(matches.group(0)) # entire match
print(matches.group(1)) # first thing in brackets
# price_number=float(matches.group(1)) # As soon as our price contains comma, this method does not work
# print(price_number)
# If price is comma separated, it is better to replace it with empty string first
price_number_comma=matches.group(1).replace(",","")
price_number=float(price_number_comma)
print(price_number_comma)
| 20.219512 | 103 | 0.746683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 580 | 0.699638 |
aca120dbe457333e15ac5e2607f6815fc7e3bb5a | 4,465 | py | Python | deepchem/data/tests/test_shape.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 3,782 | 2016-02-21T03:53:11.000Z | 2022-03-31T16:10:26.000Z | deepchem/data/tests/test_shape.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 2,666 | 2016-02-11T01:54:54.000Z | 2022-03-31T11:14:33.000Z | deepchem/data/tests/test_shape.py | deloragaskins/deepchem | 234ab699cdb997e5963966a8b6926cb2cda7c064 | [
"MIT"
] | 1,597 | 2016-02-21T03:10:08.000Z | 2022-03-30T13:21:28.000Z | import deepchem as dc
import numpy as np
import os
def test_numpy_dataset_get_shape():
"""Test that get_shape works for numpy datasets."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_shape_single_shard():
"""Test that get_shape works for disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_shape_multishard():
"""Test that get_shape works for multisharded disk dataset."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.DiskDataset.from_numpy(X, y, w, ids)
# Should now have 10 shards
dataset.reshard(shard_size=10)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == X.shape
assert y_shape == y.shape
assert w_shape == w.shape
assert ids_shape == ids.shape
def test_disk_dataset_get_legacy_shape_single_shard():
"""Test that get_shape works for legacy disk dataset."""
# This is the shape of legacy_data
num_datapoints = 100
num_features = 10
num_tasks = 10
current_dir = os.path.dirname(os.path.abspath(__file__))
# legacy_dataset is a dataset in the legacy format kept around for testing
# purposes.
data_dir = os.path.join(current_dir, "legacy_dataset")
dataset = dc.data.DiskDataset(data_dir)
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == (num_datapoints, num_features)
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
def test_disk_dataset_get_legacy_shape_multishard():
"""Test that get_shape works for multisharded legacy disk dataset."""
# This is the shape of legacy_data_reshard
num_datapoints = 100
num_features = 10
num_tasks = 10
# legacy_dataset_reshard is a sharded dataset in the legacy format kept
# around for testing
current_dir = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(current_dir, "legacy_dataset_reshard")
dataset = dc.data.DiskDataset(data_dir)
# Should now have 10 shards
assert dataset.get_number_shards() == 10
X_shape, y_shape, w_shape, ids_shape = dataset.get_shape()
assert X_shape == (num_datapoints, num_features)
assert y_shape == (num_datapoints, num_tasks)
assert w_shape == (num_datapoints, num_tasks)
assert ids_shape == (num_datapoints,)
def test_get_shard_size():
"""
Test that using ids for getting the shard size does not break the method.
The issue arises when attempting to load a dataset that does not have a labels
column. The create_dataset method of the DataLoader class sets the y to None
in this case, which causes the existing implementation of the get_shard_size()
method to fail, as it relies on the dataset having a not None y column. This
consequently breaks all methods depending on this, like the splitters for
example.
Note
----
DiskDatasets without labels cannot be resharded!
"""
current_dir = os.path.dirname(os.path.abspath(__file__))
file_path = os.path.join(current_dir, "reaction_smiles.csv")
featurizer = dc.feat.DummyFeaturizer()
loader = dc.data.CSVLoader(
tasks=[], feature_field="reactions", featurizer=featurizer)
dataset = loader.create_dataset(file_path)
assert dataset.get_shard_size() == 4
| 33.074074 | 80 | 0.738186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,280 | 0.286674 |
aca1d2adb961819b9cb0c1cf592fcc379fa33038 | 1,104 | py | Python | docs/hydrostatic-pressure/hydrostatic_pressure.py | TOKU-Systems/tutorials | 119329b8efa614d39cda342b6bb084476fbec551 | [
"MIT"
] | null | null | null | docs/hydrostatic-pressure/hydrostatic_pressure.py | TOKU-Systems/tutorials | 119329b8efa614d39cda342b6bb084476fbec551 | [
"MIT"
] | 1 | 2021-09-03T22:11:43.000Z | 2021-09-03T22:11:43.000Z | docs/hydrostatic-pressure/hydrostatic_pressure.py | TOKU-Systems/tutorials | 119329b8efa614d39cda342b6bb084476fbec551 | [
"MIT"
] | null | null | null | import pandas as pd
df = pd.read_sql(
'''
SELECT a.name, h.name, s.name, sd.t, sd.y
FROM assets a
JOIN hardpoints h ON a.id = h.asset_id
JOIN signals s ON h.id = s.hardpoint_id
JOIN LATERAL (
SELECT x.t, x.y
FROM signal_data x
WHERE x.signal_id = s.id
ORDER BY x.t DESC
LIMIT 1
) sd ON true where s.name='Pressure'
''',
"postgresql://data_viewer:tokuapidemosystems@apidemo.tokusystems.com/new_mareland")
df_new = df.set_axis([
'Asset name',
'Hardpoint',
'Signal name',
'Last time',
'Height'], axis=1, inplace=False)
print('Enter the specific gravity of the liquid in kg/m3: ')
user_input_specific_gravity = float(input())
g = 9.81
df_new['Height'] = df_new['Height']/(user_input_specific_gravity * g)
for index, row in df_new.iterrows():
df_new.iloc[index, [4]] = row['Height']
df_new.iloc[index, [3]] = pd.to_datetime(row['Last time'])
print(df_new.to_string(
formatters={
'Last time': lambda x: f'{pd.to_datetime(x,unit="D"):%X}',
'Height': lambda x: f'{x:.5g}'
}))
| 28.307692 | 87 | 0.620471 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 640 | 0.57971 |
aca1e7eab7175173532878601e9e1f2965ea7d45 | 557 | py | Python | 3-data-hiding-1.py | Otumian-empire/My-PyClasses | 50483bc0463bfca9cf9d6815fbfcb5331f51ecea | [
"MIT"
] | null | null | null | 3-data-hiding-1.py | Otumian-empire/My-PyClasses | 50483bc0463bfca9cf9d6815fbfcb5331f51ecea | [
"MIT"
] | null | null | null | 3-data-hiding-1.py | Otumian-empire/My-PyClasses | 50483bc0463bfca9cf9d6815fbfcb5331f51ecea | [
"MIT"
] | null | null | null | # data hiding - encapsulation
# this can be achieve by making the attribute or method private
# python doesn't have private keyword so precede
# the attribute/method identifier with an underscore or a double
# this is more effective if object in import or used as a module
# it isn't that effective
# no underscore = public
# single underscore = protected
# double underscore = private
class Person:
__name = ''
__age = 0
def __init__(self, name, age):
self.__name = name
self.__age = age
me = Person("John Doe", 32)
print(me._Person__name)
| 24.217391 | 64 | 0.734291 | 112 | 0.201077 | 0 | 0 | 0 | 0 | 0 | 0 | 390 | 0.70018 |
aca208fb037c8c81bebdd200fbad9872059421b9 | 2,311 | py | Python | lib/googlecloudsdk/core/util/text.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/googlecloudsdk/core/util/text.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 11 | 2020-02-29T02:51:12.000Z | 2022-03-30T23:20:08.000Z | lib/googlecloudsdk/core/util/text.py | kustodian/google-cloud-sdk | b6bae4137d4b58030adb3dcb1271216dfb19f96d | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for manipulating text."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import collections
def Pluralize(num, word, plural=None):
"""Pluralize word based on num.
Args:
num: int, the number of objects to count.
word: str, the word to pluralize.
plural: str, the plural form of word if not "add s"
Returns:
str: the plural or singular form of word in accord with num.
"""
if num == 1:
return word
return plural or word + 's'
_SECONDS_PER = collections.OrderedDict([
('second', 1),
('minute', 60),
('hour', 60 * 60),
('day', 60 * 60 * 24)
])
def GetArticle(noun):
"""Gets article (a or an) for given noun."""
return 'an' if noun[0] in ['a', 'e', 'i', 'o', 'u'] else 'a'
def _TotalSeconds(delta):
"""Re-implementation of datetime.timedelta.total_seconds() for Python 2.6."""
return delta.days * 24 * 60 * 60 + delta.seconds
def PrettyTimeDelta(delta):
"""Pretty print the given time delta.
Rounds down.
>>> _PrettyTimeDelta(datetime.timedelta(seconds=0))
'0 seconds'
>>> _PrettyTimeDelta(datetime.timedelta(minutes=1))
'1 minute'
>>> _PrettyTimeDelta(datetime.timedelta(hours=2))
'2 hours'
>>> _PrettyTimeDelta(datetime.timedelta(days=3))
'3 days'
Args:
delta: a datetime.timedelta object
Returns:
str, a human-readable version of the time delta
"""
seconds = int(_TotalSeconds(delta))
num = seconds
unit = 'second'
for u, seconds_per in _SECONDS_PER.items():
if seconds < seconds_per:
break
unit = u
num = seconds // seconds_per
return '{0} {1}'.format(num, Pluralize(num, unit))
| 26.872093 | 79 | 0.687148 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,531 | 0.662484 |
aca30b0e051054cf8605e9d7df37ed0c840f2b51 | 383 | py | Python | for_fun/mul_and_div/mul_and_div.py | trisct/Software-Tutorials | 50d7851b861700fe256dfed97f84dc321a5286dc | [
"CC0-1.0"
] | 2 | 2021-08-22T05:19:26.000Z | 2021-12-21T12:03:57.000Z | for_fun/mul_and_div/mul_and_div.py | trisct/Software-Tutorials | 50d7851b861700fe256dfed97f84dc321a5286dc | [
"CC0-1.0"
] | null | null | null | for_fun/mul_and_div/mul_and_div.py | trisct/Software-Tutorials | 50d7851b861700fe256dfed97f84dc321a5286dc | [
"CC0-1.0"
] | null | null | null | import time
a = 3215.35127
b = 3.
start = time.time()
for i in range(100000000):
c = a / b
end = time.time()
time_elapsed = end - start
print('Time elapsed (div ver) = %.5f' % time_elapsed)
a = 3215.35127
b = 1./3.
start = time.time()
for i in range(100000000):
c = a * b
end = time.time()
time_elapsed = end - start
print('Time elapsed (mul ver) = %.5f' % time_elapsed)
| 17.409091 | 53 | 0.629243 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 62 | 0.16188 |
aca3ab6f8519991fd8a0e554e1a624e3c39129f2 | 5,373 | py | Python | src/oci/apigateway/models/execution_log_policy.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/apigateway/models/execution_log_policy.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/apigateway/models/execution_log_policy.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ExecutionLogPolicy(object):
"""
Configures the logging policies for the execution logs of an API Deployment.
"""
#: A constant which can be used with the log_level property of a ExecutionLogPolicy.
#: This constant has a value of "INFO"
LOG_LEVEL_INFO = "INFO"
#: A constant which can be used with the log_level property of a ExecutionLogPolicy.
#: This constant has a value of "WARN"
LOG_LEVEL_WARN = "WARN"
#: A constant which can be used with the log_level property of a ExecutionLogPolicy.
#: This constant has a value of "ERROR"
LOG_LEVEL_ERROR = "ERROR"
def __init__(self, **kwargs):
"""
Initializes a new ExecutionLogPolicy object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param is_enabled:
The value to assign to the is_enabled property of this ExecutionLogPolicy.
:type is_enabled: bool
:param log_level:
The value to assign to the log_level property of this ExecutionLogPolicy.
Allowed values for this property are: "INFO", "WARN", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type log_level: str
"""
self.swagger_types = {
'is_enabled': 'bool',
'log_level': 'str'
}
self.attribute_map = {
'is_enabled': 'isEnabled',
'log_level': 'logLevel'
}
self._is_enabled = None
self._log_level = None
@property
def is_enabled(self):
"""
Gets the is_enabled of this ExecutionLogPolicy.
Enables pushing of execution logs to the legacy OCI Object Storage log archival bucket.
Oracle recommends using the OCI Logging service to enable, retrieve, and query execution logs
for an API Deployment. If there is an active log object for the API Deployment and its
category is set to 'execution' in OCI Logging service, the logs will not be uploaded to the legacy
OCI Object Storage log archival bucket.
Please note that the functionality to push to the legacy OCI Object Storage log
archival bucket has been deprecated and will be removed in the future.
:return: The is_enabled of this ExecutionLogPolicy.
:rtype: bool
"""
return self._is_enabled
@is_enabled.setter
def is_enabled(self, is_enabled):
"""
Sets the is_enabled of this ExecutionLogPolicy.
Enables pushing of execution logs to the legacy OCI Object Storage log archival bucket.
Oracle recommends using the OCI Logging service to enable, retrieve, and query execution logs
for an API Deployment. If there is an active log object for the API Deployment and its
category is set to 'execution' in OCI Logging service, the logs will not be uploaded to the legacy
OCI Object Storage log archival bucket.
Please note that the functionality to push to the legacy OCI Object Storage log
archival bucket has been deprecated and will be removed in the future.
:param is_enabled: The is_enabled of this ExecutionLogPolicy.
:type: bool
"""
self._is_enabled = is_enabled
@property
def log_level(self):
"""
Gets the log_level of this ExecutionLogPolicy.
Specifies the log level used to control logging output of execution logs.
Enabling logging at a given level also enables logging at all higher levels.
Allowed values for this property are: "INFO", "WARN", "ERROR", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The log_level of this ExecutionLogPolicy.
:rtype: str
"""
return self._log_level
@log_level.setter
def log_level(self, log_level):
"""
Sets the log_level of this ExecutionLogPolicy.
Specifies the log level used to control logging output of execution logs.
Enabling logging at a given level also enables logging at all higher levels.
:param log_level: The log_level of this ExecutionLogPolicy.
:type: str
"""
allowed_values = ["INFO", "WARN", "ERROR"]
if not value_allowed_none_or_none_sentinel(log_level, allowed_values):
log_level = 'UNKNOWN_ENUM_VALUE'
self._log_level = log_level
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 38.378571 | 245 | 0.680439 | 4,833 | 0.899497 | 0 | 0 | 4,863 | 0.905081 | 0 | 0 | 4,024 | 0.74893 |
aca4304cd7dabb4c63ae50043d923687555339f1 | 1,107 | pyde | Python | Processing sketches/Line Stitching/star_mesh/star_mesh.pyde | nicoprocessor/Digital-Art-Collection | f2283d2dc7a140d8ae8783a77f78b4ed9fa06065 | [
"Apache-2.0"
] | null | null | null | Processing sketches/Line Stitching/star_mesh/star_mesh.pyde | nicoprocessor/Digital-Art-Collection | f2283d2dc7a140d8ae8783a77f78b4ed9fa06065 | [
"Apache-2.0"
] | null | null | null | Processing sketches/Line Stitching/star_mesh/star_mesh.pyde | nicoprocessor/Digital-Art-Collection | f2283d2dc7a140d8ae8783a77f78b4ed9fa06065 | [
"Apache-2.0"
] | null | null | null | import math
lines = 30
radius_x = 550
radius_y = 550
axes = 20
def frange(start, stop, step):
while start < stop:
yield start
start += step
def setup():
size(1080, 2160)
background(51)
ellipseMode(RADIUS)
stroke(255)
noFill()
strokeWeight(1)
# center the coordinate system
translate(width/2, height/2)
scale(1, -1)
m1 = [0,0]
m2 = [0,0]
for theta in frange(0, 2*math.pi, 2*math.pi/axes):
for amt in frange(0, 1.0, 1.0/lines):
theta_next = theta + (2*math.pi/axes)
p0 = [0,0]
p1 = [radius_x * math.cos(theta), radius_y * math.sin(theta)]
p2 = [radius_x * math.cos(theta_next), radius_y * math.sin(theta_next)]
m1[0] = lerp(p1[0], p0[0], amt)
m1[1] = lerp(p1[1], p0[1], amt)
m2[0] = lerp(p2[0], p0[0], 1.0 - amt)
m2[1] = lerp(p2[1], p0[1], 1.0 - amt)
line(m1[0], m1[1], m2[0], m2[1])
save("star_parabolic" + str(random(1000)) + ".png")
print("Done")
| 25.159091 | 83 | 0.495935 | 0 | 0 | 96 | 0.086721 | 0 | 0 | 0 | 0 | 58 | 0.052394 |