hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
46f8f978d1dff2ed9209d787daec474e7c997240 | 746 | py | Python | xpdacq/devices.py | xpdAcq/xpdAcq | ad850df65c185c50b2bd7daf44148480a8ec42dd | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2017-12-11T17:28:03.000Z | 2020-05-05T20:51:15.000Z | xpdacq/devices.py | xpdAcq/xpdAcq | ad850df65c185c50b2bd7daf44148480a8ec42dd | [
"BSD-2-Clause-FreeBSD"
] | 521 | 2016-02-06T23:27:47.000Z | 2022-02-02T21:25:37.000Z | xpdacq/devices.py | xpdAcq/xpdAcq | ad850df65c185c50b2bd7daf44148480a8ec42dd | [
"BSD-2-Clause-FreeBSD"
] | 15 | 2016-02-08T03:49:03.000Z | 2022-03-30T00:25:40.000Z | """Wrappers for the ophyd devices."""
from ophyd import Device, Signal
from ophyd import Kind
from ophyd.device import Component as Cpt
class CalibrationData(Device):
"""A device to hold pyFAI calibration data."""
dist = Cpt(Signal, value=1., kind=Kind.config)
poni1 = Cpt(Signal, value=0., kind=Kind.config)
poni2 = Cpt(Signal, value=0., kind=Kind.config)
rot1 = Cpt(Signal, value=0., kind=Kind.config)
rot2 = Cpt(Signal, value=0., kind=Kind.config)
rot3 = Cpt(Signal, value=0., kind=Kind.config)
pixel1 = Cpt(Signal, value=0., kind=Kind.config)
pixel2 = Cpt(Signal, value=0., kind=Kind.config)
detector = Cpt(Signal, value="", kind=Kind.config)
wavelength = Cpt(Signal, value=0., kind=Kind.config)
| 39.263158 | 56 | 0.684987 | 607 | 0.813673 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.113941 |
46f90d6e35d87e99ffb0c6d077e3c447cadfc5e5 | 3,195 | py | Python | tests/unit/model_selection/test_model_selection.py | ambader/hcrystalball | 713636e698d9a260fab982764fce4a13699be1a8 | [
"MIT"
] | 139 | 2020-06-29T16:36:16.000Z | 2022-01-25T21:49:10.000Z | tests/unit/model_selection/test_model_selection.py | ambader/hcrystalball | 713636e698d9a260fab982764fce4a13699be1a8 | [
"MIT"
] | 34 | 2020-06-29T12:31:26.000Z | 2022-03-18T13:56:21.000Z | tests/unit/model_selection/test_model_selection.py | ambader/hcrystalball | 713636e698d9a260fab982764fce4a13699be1a8 | [
"MIT"
] | 28 | 2020-06-30T06:00:39.000Z | 2022-03-18T13:27:58.000Z | import numpy as np
import pytest
from sklearn.dummy import DummyRegressor
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from hcrystalball.metrics import get_scorer
from hcrystalball.model_selection import FinerTimeSplit
from hcrystalball.model_selection import get_best_not_failing_model
from hcrystalball.model_selection import select_model
from hcrystalball.wrappers import ExponentialSmoothingWrapper
from hcrystalball.wrappers import get_sklearn_wrapper
@pytest.mark.parametrize(
"train_data, grid_search, parallel_over_dict",
[("two_regions", "", {"Region": "region_0"}), ("two_regions", "", None)],
indirect=["train_data", "grid_search"],
)
def test_select_model(train_data, grid_search, parallel_over_dict):
_train_data = train_data
if parallel_over_dict:
col, value = list(parallel_over_dict.items())[0]
_train_data = train_data[train_data[col] == value].drop(columns="Region")
partition_columns = ["Region", "Product"]
results = select_model(
_train_data,
target_col_name="Quantity",
partition_columns=partition_columns,
parallel_over_dict=parallel_over_dict,
grid_search=grid_search,
country_code_column="Holidays_code",
)
if parallel_over_dict:
partitions = (
train_data.loc[train_data[col] == value, partition_columns]
.drop_duplicates()
.to_dict(orient="records")
)
else:
partitions = train_data[partition_columns].drop_duplicates().to_dict(orient="records")
assert len(results) == len(partitions)
for result in results:
assert result.best_model_name == "good_dummy"
assert result.partition in partitions
@pytest.mark.parametrize(
"X_y_optional, negative_data, best_model_name, rank, expected_error",
[
("", False, "ExponentialSmoothingWrapper", 1, None),
("", True, "SklearnWrapper", 2, None),
("", True, "", 2, ValueError),
],
indirect=["X_y_optional"],
)
def test_get_best_not_failing_model(X_y_optional, negative_data, best_model_name, rank, expected_error):
X, y = X_y_optional
# data contains 0
y[y < 1] = 1
if negative_data:
y[-1] = -1
models = [
ExponentialSmoothingWrapper(freq="D", seasonal="mul"),
get_sklearn_wrapper(DummyRegressor, strategy="constant", constant=-1000),
]
models = models if expected_error is None else models[:1]
grid_search = GridSearchCV(
estimator=Pipeline([("model", "passthrough")]),
param_grid=[{"model": models}],
scoring=get_scorer("neg_mean_absolute_error"),
cv=FinerTimeSplit(n_splits=1, horizon=5),
refit=False,
error_score=np.nan,
)
grid_search.fit(X, y)
if expected_error:
with pytest.raises(expected_error):
get_best_not_failing_model(grid_search, X, y)
else:
best_param_rank = get_best_not_failing_model(grid_search, X, y)
assert isinstance(best_param_rank, dict)
assert best_param_rank["params"]["model"].__class__.__name__ == best_model_name
assert best_param_rank["rank"] == rank
| 34.354839 | 104 | 0.693271 | 0 | 0 | 0 | 0 | 2,689 | 0.841628 | 0 | 0 | 441 | 0.138028 |
46fa92d6b3fc162164fdf17f192beafbb5b9a007 | 1,227 | py | Python | ppci/cli/yacc.py | jsdelivrbot/ppci-mirror | 67195d628275e2332ceaf44c9e13fc58d0877157 | [
"BSD-2-Clause"
] | null | null | null | ppci/cli/yacc.py | jsdelivrbot/ppci-mirror | 67195d628275e2332ceaf44c9e13fc58d0877157 | [
"BSD-2-Clause"
] | null | null | null | ppci/cli/yacc.py | jsdelivrbot/ppci-mirror | 67195d628275e2332ceaf44c9e13fc58d0877157 | [
"BSD-2-Clause"
] | null | null | null | """ Parser generator utility.
This script can generate a python script from a grammar description.
Invoke the script on a grammar specification file:
.. code::
$ ppci-yacc test.x -o test_parser.py
And use the generated parser by deriving a user class:
.. code::
import test_parser
class MyParser(test_parser.Parser):
pass
p = MyParser()
p.parse()
Alternatively you can load the parser on the fly:
.. code::
import yacc
parser_mod = yacc.load_as_module('mygrammar.x')
class MyParser(parser_mod.Parser):
pass
p = MyParser()
p.parse()
"""
import argparse
from .base import base_parser, LogSetup
from ..lang.tools.yacc import transform
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[base_parser])
parser.add_argument(
'source', type=argparse.FileType('r'), help='the parser specification')
parser.add_argument(
'-o', '--output', type=argparse.FileType('w'), required=True)
def yacc(args=None):
args = parser.parse_args(args)
with LogSetup(args):
transform(args.source, args.output)
args.output.close()
if __name__ == '__main__':
yacc()
| 20.45 | 75 | 0.691932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 668 | 0.544417 |
46fba1205b8f8301a0c377be60303c38d5a02559 | 3,434 | py | Python | imdb/imdb.py | santhoshse7en/IMDb | c76cfa3fc9a1326587707dbab3800d0e9a32d045 | [
"MIT"
] | 1 | 2019-09-09T08:46:11.000Z | 2019-09-09T08:46:11.000Z | imdb/imdb.py | santhoshse7en/imdby | c76cfa3fc9a1326587707dbab3800d0e9a32d045 | [
"MIT"
] | 13 | 2019-05-24T05:17:03.000Z | 2019-05-29T12:19:18.000Z | imdb/imdb.py | santhoshse7en/imdby | c76cfa3fc9a1326587707dbab3800d0e9a32d045 | [
"MIT"
] | null | null | null | # Movie Related Information
from imdb.parser.character.search_character_id import search_character_id
from imdb.parser.company.search_company_id import search_company_id
from imdb.parser.event.search_event_id import search_event_id
from imdb.parser.movie.company import company
from imdb.parser.movie.critic_reviews import critic_reviews
from imdb.parser.movie.external_reviews import external_reviews
from imdb.parser.movie.external_sites import external_sites
from imdb.parser.movie.full_cast_and_crew import full_cast_and_crew
from imdb.parser.movie.imdb_charts import imdb_charts
from imdb.parser.movie.movie import movie
from imdb.parser.movie.parental_guide import parental_guide
from imdb.parser.movie.plot import plot
from imdb.parser.movie.plot_keywords import plot_keywords
from imdb.parser.movie.ratings import ratings
from imdb.parser.movie.release_info import release_info
from imdb.parser.movie.search_title_id import search_title_id
from imdb.parser.movie.taglines import taglines
from imdb.parser.movie.technical_spec import technical_spec
from imdb.parser.movie.top_india_charts import top_india_charts
from imdb.parser.movie.trending_now_in_india import trending_now_in_india
from imdb.parser.movie.upcoming_releases import upcoming_releases
from imdb.parser.movie.user_reviews import user_reviews
from imdb.parser.news.search_news_id import search_news_id
from imdb.parser.person.search_person_id import search_person_id
class IMDb:
"""
:returns: Search IMDb ID about Movies, Person, Company, Event, Character, News
"""
def search_movie(self, text):
return search_title_id(text)
def search_person(self, text):
return search_person_id(text)
def search_company(self, text):
return search_company_id(text)
def search_event(self, text):
return search_event_id(text)
def search_character(self, text):
return search_character_id(text)
def search_news(self, text):
return search_news_id(text)
"""
:returns: Parser contains all kind of movie information
"""
def company(self, title_id):
return company(title_id)
def critic_reviews(self, title_id):
return critic_reviews(title_id)
def external_reviews(self, title_id):
return external_reviews(title_id)
def external_sites(self, title_id):
return external_sites(title_id)
def full_cast_and_crew(self, title_id):
return full_cast_and_crew(title_id)
def imdb_charts(self):
return imdb_charts()
def movie(self, title_id):
return movie(title_id)
def parental_guide(self, title_id):
return parental_guide(title_id)
def plot(self, title_id):
return plot(title_id)
def plot_keywords(self, title_id):
return plot_keywords(title_id)
def ratings(self, title_id):
return ratings(title_id)
def release_info(self, title_id):
return release_info(title_id)
def taglines(self, title_id):
return taglines(title_id)
def technical_spec(self, title_id):
return technical_spec(title_id)
def top_india_charts(self):
return top_india_charts()
def trending_now_in_india(self):
return trending_now_in_india()
def upcoming_releases(self):
return upcoming_releases()
def user_reviews(self, title_id, remove_spoiler):
return user_reviews(title_id, remove_spoiler) | 31.796296 | 82 | 0.761503 | 1,992 | 0.580082 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.055911 |
46fc1f3a2a61d15198e5a0cff38cbad84fddfcdc | 402 | py | Python | authors/apps/profiles/migrations/0022_auto_20190123_1211.py | andela/ah-django-unchained | a4e5f6cd11fdc0b9422020693ac1200b849cf0f3 | [
"BSD-3-Clause"
] | null | null | null | authors/apps/profiles/migrations/0022_auto_20190123_1211.py | andela/ah-django-unchained | a4e5f6cd11fdc0b9422020693ac1200b849cf0f3 | [
"BSD-3-Clause"
] | 26 | 2019-01-07T14:22:05.000Z | 2019-02-28T17:11:48.000Z | authors/apps/profiles/migrations/0022_auto_20190123_1211.py | andela/ah-django-unchained | a4e5f6cd11fdc0b9422020693ac1200b849cf0f3 | [
"BSD-3-Clause"
] | 3 | 2019-09-19T22:16:09.000Z | 2019-10-16T21:16:16.000Z | # Generated by Django 2.1.4 on 2019-01-23 12:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('profiles', '0021_auto_20190122_1723'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='bio',
field=models.TextField(blank=True, max_length=200),
),
]
| 21.157895 | 63 | 0.606965 | 309 | 0.768657 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.248756 |
46fd9e6218a07bf5fa06674ee05f0d8cc3758da6 | 2,837 | py | Python | squidpy/instruments/srs.py | guenp/squidpy | 17af231cef7142325a483aaa95041671e4daaea4 | [
"MIT"
] | null | null | null | squidpy/instruments/srs.py | guenp/squidpy | 17af231cef7142325a483aaa95041671e4daaea4 | [
"MIT"
] | null | null | null | squidpy/instruments/srs.py | guenp/squidpy | 17af231cef7142325a483aaa95041671e4daaea4 | [
"MIT"
] | null | null | null | from squidpy.instrument import Instrument
import visa
class SR830(Instrument):
'''
Instrument driver for SR830
'''
def __init__(self, gpib_address='', name='SR830'):
self._units = {'amplitude': 'V', 'frequency': 'Hz'}
self._visa_handle = visa.ResourceManager().open_resource(gpib_address)
self._visa_handle.read_termination = '\n'
self.time_constant_options = {
"10 us": 0,
"30 us": 1,
"100 us": 2,
"300 us": 3,
"1 ms": 4,
"3 ms": 5,
"10 ms": 6,
"30 ms": 7,
"100 ms": 8,
"300 ms": 9,
"1 s": 10,
"3 s": 11,
"10 s": 12,
"30 s": 13,
"100 s": 14,
"300 s": 15,
"1 ks": 16,
"3 ks": 17,
"10 ks": 18,
"30 ks": 19
}
self.sensitivity_options = [
2e-9, 5e-9, 10e-9, 20e-9, 50e-9, 100e-9, 200e-9,
500e-9, 1e-6, 2e-6, 5e-6, 10e-6, 20e-6, 50e-6, 100e-6,
200e-6, 500e-6, 1e-3, 2e-3, 5e-3, 10e-3, 20e-3,
50e-3, 100e-3, 200e-3, 500e-3, 1]
super(SR830, self).__init__(name)
@property
def sensitivity(self):
'''Get the lockin sensitivity'''
return self.sensitivity_options[int(self._visa_handle.ask('SENS?'))]
@sensitivity.setter
def sensitivity(self, value):
'''Set the sensitivity'''
self._visa_handle.write('SENS%d' %self.sensitivity_options.index(value))
@property
def amplitude(self):
'''Get the output amplitude'''
return self._visa_handle.ask('SLVL?')
@amplitude.setter
def amplitude(self, value):
'''Set the amplitude.'''
self._visa_handle.write('SLVL %s' %value)
@property
def frequency(self):
return self._visa_handle.ask('FREQ?')
@frequency.setter
def frequency(self, value):
self._visa_handle.write('FREQ %s' %value)
@property
def X(self):
return float(self._visa_handle.ask('OUTP?1'))
@property
def Y(self):
return float(self._visa_handle.ask('OUTP?2'))
@property
def R(self):
return float(self._visa_handle.ask('OUTP?3'))
@property
def theta(self):
return float(self._visa_handle.ask('OUTP?4'))
@property
def time_constant(self):
options = {self.time_constant_options[key]: key for key in self.time_constant_options.keys()}
return options[int(self._visa_handle.ask('OFLT?'))]
@time_constant.setter
def time_constant(self, value):
self._visa_handle.write('OFLT %s' %self.time_constant_options[value])
def __del__(self):
self._visa_handle.close() | 29.863158 | 101 | 0.534367 | 2,782 | 0.980613 | 0 | 0 | 1,411 | 0.497356 | 0 | 0 | 425 | 0.149806 |
46fef5e639dc0c69be1d91fc51ec112c48717e44 | 3,602 | py | Python | src/hbcomp/app.py | zgoda/hbcomp | 0b787a05f2cd512c44363daaa560ec74cc9d6261 | [
"MIT"
] | null | null | null | src/hbcomp/app.py | zgoda/hbcomp | 0b787a05f2cd512c44363daaa560ec74cc9d6261 | [
"MIT"
] | null | null | null | src/hbcomp/app.py | zgoda/hbcomp | 0b787a05f2cd512c44363daaa560ec74cc9d6261 | [
"MIT"
] | null | null | null | import os
from logging.config import dictConfig
from typing import Optional
from flask import render_template, request, send_from_directory
from flask_babel import get_locale, lazy_gettext as _
from werkzeug.utils import ImportStringError
from .auth import auth_bp
from .comp import comp_bp
from .ext import babel, csrf, db, login_manager, pages
from .home import home_bp
from .models import User
from .profile import profile_bp
from .utils import pagination
from .utils.app import Application
__all__ = ['create_app']
def create_app(env: Optional[str] = None) -> Application:
flask_environment = os.environ.get('FLASK_ENV', '').lower()
if flask_environment == 'production':
configure_logging()
app = Application()
configure_app(app, env)
configure_extensions(app)
with app.app_context():
configure_blueprints(app)
configure_templates(app)
configure_error_handlers(app)
return app
def configure_app(app: Application, env: Optional[str]):
app.config.from_object('hbcomp.config')
if env is not None:
try:
app.config.from_object(f'hbcomp.config_{env}')
except ImportStringError:
app.logger.info(f'no environment configuration for {env}')
if app.config['DEBUG']:
@app.route('/favicon.ico')
def favicon():
return send_from_directory(
os.path.join(app.root_path, 'static'), 'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
def configure_blueprints(app: Application):
app.register_blueprint(home_bp)
app.register_blueprint(profile_bp, url_prefix='/profile')
app.register_blueprint(comp_bp, url_prefix='/comp')
app.register_blueprint(auth_bp, url_prefix='/auth')
def configure_extensions(app: Application):
db.init_app(app)
csrf.init_app(app)
pages.init_app(app)
pages.get('foo')
if not app.testing:
@babel.localeselector
def get_locale():
accept_languages = app.config.get('ACCEPT_LANGUAGES', ['pl', 'en'])
return request.accept_languages.best_match(accept_languages)
@login_manager.user_loader
def load_user(user_id):
return User.query.get(user_id)
babel.init_app(app)
login_manager.init_app(app)
login_manager.login_view = 'auth.login'
login_manager.login_message = _('Please log in to access this page')
login_manager.login_message_category = 'warning'
def configure_templates(app: Application):
app.jinja_env.globals.update({
'url_for_other_page': pagination.url_for_other_page,
'get_locale': get_locale,
})
def configure_logging():
dictConfig({
'version': 1,
'formatters': {
'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
}
},
'handlers': {
'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default',
}
},
'root': {
'level': 'INFO',
'handlers': ['wsgi'],
},
})
def configure_error_handlers(app: Application):
@app.errorhandler(403)
def forbidden_page(error):
return render_template('errors/403.html'), 403
@app.errorhandler(404)
def page_not_found(error):
return render_template('errors/404.html'), 404
@app.errorhandler(500)
def server_error_page(error):
return render_template('errors/500.html'), 500
| 29.048387 | 82 | 0.649917 | 0 | 0 | 0 | 0 | 853 | 0.236813 | 0 | 0 | 620 | 0.172127 |
20024a10fc3007490f9a135af56090f4d8d58d3e | 3,695 | py | Python | lib/taurus/external/test/test_qt.py | MikeFalowski/taurus | ef041bf35dd847caf08a7efbe072f4020d35522e | [
"CC-BY-3.0"
] | null | null | null | lib/taurus/external/test/test_qt.py | MikeFalowski/taurus | ef041bf35dd847caf08a7efbe072f4020d35522e | [
"CC-BY-3.0"
] | 1 | 2020-02-28T16:36:04.000Z | 2020-03-02T07:51:12.000Z | lib/taurus/external/test/test_qt.py | MikeFalowski/taurus | ef041bf35dd847caf08a7efbe072f4020d35522e | [
"CC-BY-3.0"
] | null | null | null | # -*- coding: utf-8 -*-
##############################################################################
##
## This file is part of Taurus
##
## http://taurus-scada.org
##
## Copyright 2011 CELLS / ALBA Synchrotron, Bellaterra, Spain
##
## Taurus is free software: you can redistribute it and/or modify
## it under the terms of the GNU Lesser General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Taurus is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Lesser General Public License for more details.
##
## You should have received a copy of the GNU Lesser General Public License
## along with Taurus. If not, see <http://www.gnu.org/licenses/>.
##
##############################################################################
import sys
import taurus
import unittest
def _import(name):
__import__(name)
return sys.modules[name]
class QtTestCase(unittest.TestCase):
_api_name = None
def setUp(self):
taurus.setLogLevel(taurus.Critical)
self.opt_mods = ("QtDesigner", "QtNetwork", "Qt", "QtSvg",
"QtUiTools", "QtWebKit", "Qwt5", "uic")
# store a "snapshot" of the currently loaded modules
self._orig_mods = set(sys.modules.keys())
# this import initializes Qt in case it is not loaded
from taurus.external.qt import Qt, API_NAME
self._api_name = API_NAME
self.__qt = Qt
def test_qt_base_import(self):
mods = set(sys.modules.keys())
other_apis = set(('PyQt5', 'PySide2', 'PyQt4', 'PySide'))
other_apis.remove(self._api_name)
# the selected API and the QtCore should be loaded
self.assertTrue(self._api_name in mods, self._api_name + " not loaded")
self.assertTrue(self._api_name + ".QtCore" in mods,
"QtCore not loaded")
# the other APIs should *not* be loaded
for other_api in other_apis:
self.assertFalse(
other_api in mods,
other_api + " loaded in " + self._api_name + " test")
# the other Qt submodules should *not* be loaded
for opt_mod in self.opt_mods:
mod = "{0}.{1}".format(self._api_name, opt_mod)
self.assertFalse(mod in mods - self._orig_mods, mod + " is loaded")
def __test_qt_module(self, qt_mod_name):
"""Checks that the given shim is complete"""
taurus_qt_mod_name = "taurus.external.qt.{0}".format(qt_mod_name)
orig_qt_mod_name = "{0}.{1}".format(self._api_name, qt_mod_name)
TaurusQtMod = _import(taurus_qt_mod_name)
OrigQtMod = _import(orig_qt_mod_name)
taurus_qt_mod_members = [m for m in dir(TaurusQtMod)
if not m.startswith("_")]
orig_qt_mod_members = [m for m in dir(OrigQtMod)
if not m.startswith("_")]
for orig_member_name in orig_qt_mod_members:
self.assertTrue(
orig_member_name in taurus_qt_mod_members,
"Taurus {0} does not contain {1}".format(qt_mod_name,
orig_member_name)
)
def test_qt_core(self):
"""Check the QtCore shim"""
return self.__test_qt_module("QtCore")
def test_qt_gui(self):
"""Check the QtGui shim"""
return self.__test_qt_module("QtGui")
def main():
unittest.main(verbosity=2)
if __name__ == "__main__":
main()
| 33.288288 | 79 | 0.59567 | 2,526 | 0.683627 | 0 | 0 | 0 | 0 | 0 | 0 | 1,558 | 0.421651 |
200282cb4ee521ea49ac92a99760f93beca4c7e9 | 7,762 | py | Python | tools/stage2/infer_cam.py | yaoqi-zd/SGAN | 43d8a859b03967e2423a73ef1ba332ee71714ba4 | [
"MIT"
] | 48 | 2020-02-19T02:31:30.000Z | 2021-12-24T23:58:13.000Z | tools/stage2/infer_cam.py | yaoqi-zd/SGAN | 43d8a859b03967e2423a73ef1ba332ee71714ba4 | [
"MIT"
] | 16 | 2020-02-28T14:56:58.000Z | 2021-07-05T09:35:10.000Z | tools/stage2/infer_cam.py | yaoqi-zd/SGAN | 43d8a859b03967e2423a73ef1ba332ee71714ba4 | [
"MIT"
] | 12 | 2020-01-14T15:46:11.000Z | 2021-12-17T08:57:07.000Z | import os, pickle
import os.path as osp
import numpy as np
import cv2
import scipy.ndimage as nd
import init_path
from lib.dataset.get_dataset import get_dataset
from lib.network.sgan import SGAN
import torch
from torch.utils.data import DataLoader
import argparse
from ipdb import set_trace
import matplotlib.pyplot as plt
from lib.utils import pyutils
classes=['background',
'aeroplane',
'bicycle',
'bird',
'boat',
'bottle',
'bus',
'car',
'cat',
'chair',
'cow',
'diningtable',
'dog',
'horse',
'motorbike',
'person',
'pottedplant',
'sheep',
'sofa',
'train',
'tvmonitor']
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--cfg_file", default=None, type=str)
args = parser.parse_args()
args = pyutils.read_yaml2cls(args.cfg_file)
return args
# mean pixel : in B-G-R channel order
mean_pixel = np.array([104.008, 116.669, 122.675])
def preprocess(image, size):
""" pre-process images with Opencv format"""
image = np.array(image)
H, W, _ = image.shape
image = nd.zoom(image.astype('float32'), (size / H, size / W, 1.0), order=1)
image = image - mean_pixel
image = image.transpose([2, 0, 1])
image = np.expand_dims(image, axis=0)
return torch.from_numpy(image)
def generate_seed_with_ignore(localization):
"""
This function generate seed ignoring all the conflicts
:param localization: (41, 41, 21) binary value
:return:
"""
h, w, c = localization.shape
assert (h == 41) & (w == 41) & (c == 21)
# set_trace()
# find conflict index
sum_loc = np.sum(localization, axis=2)
conflict_ind = np.where(sum_loc > 1)
# set conflict position to 0
localization[conflict_ind[0], conflict_ind[1], :] = 0
# generate seed
ind = np.where(localization)
mask = np.ones(shape=(h, w), dtype=np.int) * 21
mask[ind[0], ind[1]] = ind[2]
return mask
def generate_seed_wo_ignore(localization, train_boat=False):
"""
This function generate seed with priority strategy
:param localization:
:return:
"""
h, w, c = localization.shape
assert (h == 41) & (w == 41) & (c == 21)
# generate background seed
mask = np.ones((h, w), dtype=np.int) * 21
bg_ind = np.where(localization[:, :, 0])
mask[bg_ind[0], bg_ind[1]] = 0
# generate foreground seed in the order of their area
area = np.sum(localization, axis=(0, 1))
cls_order = np.argsort(area)[::-1] # area in descending order
for cls in cls_order:
if area[cls] == 0:
break
ind = np.where(localization[:, :, cls])
mask[ind[0], ind[1]] = cls
if train_boat:
train_boat_ind = np.where(((mask == 4) | (mask == 19)) & (localization[:, :, 0] == 1))
mask[train_boat_ind] = 0
return mask
def get_localization_cues_sec(att_maps, saliency, im_label, cam_thresh):
"""get localization cues with method in SEC paper
perform hard thresholding for each foreground class
Parameters
----------
att_maps: [41, 41, 20]
saliency: [H, W]
im_label: list of foreground classes
cam_thresh: hard threshold to extract foreground class cues
Return
------
seg_mask: [41, 41]
"""
h, w = att_maps.shape[:2]
im_h, im_w = saliency.shape[:2]
localization1 = np.zeros(shape=(h, w, 21))
for idx in im_label: # idx: aero=1
heat_map = att_maps[:, :, idx - 1]
localization1[:, :, idx] = heat_map > cam_thresh * np.max(heat_map)
# bg_cue = saliency.astype(np.float32)
# bg_cue = bg_cue / 255
bg_cue = nd.zoom(saliency, (h / im_h, h / im_w), order=1)
localization1[:, :, 0] = bg_cue < 0.06
# handle conflict seed
if args.ignore_conflict:
seg_mask = generate_seed_with_ignore(localization1)
else:
seg_mask = generate_seed_wo_ignore(localization1, train_boat=True)
return seg_mask
def get_localization_cues_dcsp(att_maps, saliency, im_label, bg_thresh):
"""get localization cues with method in DCSP paper
compute harmonic mean for each foreground class
Parameters
----------
att_maps: [41, 41, 20]
saliency: [H, W]
im_label: list of foreground classes
cam_thresh: hard threshold to extract foreground class cues
Return
------
seg_mask: [41, 41]
"""
h, w = att_maps.shape[:2]
im_h, im_w = saliency.shape[:2]
re_sal = nd.zoom(saliency, (h / im_h, w / im_w), order=1)
localization1 = np.zeros(shape=(h, w, 20))
for idx in im_label: # idx: aero=1
localization1[:, :, idx - 1] = 2 / ((1 / (att_maps[:, :, idx - 1] + 1e-7)) + (1 / (re_sal + 1e-7)))
hm_max = np.max(localization1, axis=2)
seg_mask = np.argmax(localization1, axis=2) + 1
seg_mask[hm_max < bg_thresh] = 0
return seg_mask
def filter_weight_dict(weight_dict, model_dict):
# filter the parameters that exist in the pretrained model
pretrained_dict = dict()
for k, v in weight_dict.items():
# keep compatable with the previous version of network definition
if "conv" in k and "backbone" not in k:
k = "backbone." + k
if k in model_dict:
pretrained_dict[k] = v
model_dict.update(pretrained_dict)
return model_dict
if __name__ == '__main__':
args = parse_args()
device = torch.device("cuda:0")
# input and output
im_tags = pickle.load(open(args.cue_file, "rb"))
if not osp.exists(args.res_path):
os.mkdir(args.res_path)
_, test_dataset = get_dataset(args.dataset_name, args)
batch_size = 8
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False, num_workers=8)
# load net and trained weights
model = SGAN(backbone_name=args.backbone)
weight_dict = torch.load(osp.join(args.save_model_path, args.cfg_name, "model_iter_" + str(args.max_iter) + ".pth"))
model_dict = filter_weight_dict(weight_dict, model.state_dict())
model.load_state_dict(model_dict)
model = model.to(device)
model.eval()
save_path = osp.join(args.res_path, args.cfg_name + args.test_cfg)
if not osp.exists(save_path):
os.makedirs(save_path)
# compute class activation map
with torch.no_grad():
for num, pack in enumerate(test_loader):
names, imgs, labels = pack[0], pack[1].to(device, dtype=torch.float32), \
pack[2].numpy()
fg_sim = pack[3].to(device, dtype=torch.float32)
bg_sim = pack[4].to(device, dtype=torch.float32)
sizes = pack[6].to("cpu").numpy()
if args.combine_seedseg:
_, segs, cams = model.forward_cam(imgs, fg_sim, bg_sim)
cams = cams + segs
# cams = segs
else:
_, _, cams = model.forward_cam(imgs, fg_sim, bg_sim)
np_cams = np.transpose(cams.cpu().numpy(), (0, 2, 3, 1))
_, h, w, c = np_cams.shape
for k, name in enumerate(names):
# get output cam
im_label = im_tags[name]
im_h, im_w = sizes[k]
np_cam = np_cams[k]
# get saliency
bg_cue = cv2.imread(osp.join(args.dataset_root, "sal", args.sdnet_path, name + ".png"), cv2.IMREAD_GRAYSCALE)
bg_cue = bg_cue.astype(np.float32)
bg_cue = bg_cue / 255
seg_mask = get_localization_cues_sec(np_cam, bg_cue, im_label, args.cam_thresh)
# save mask
write_mask = nd.zoom(seg_mask, (im_h / h, im_w / w), order=0)
cv2.imwrite(osp.join(save_path, name + ".png"), write_mask)
| 29.969112 | 125 | 0.611312 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,826 | 0.235249 |
200340700ae186f45e01e6dbb558b6dd954ca85a | 2,902 | py | Python | python/lib/behaviors/compute.py | newrelic-experimental/demo-pythontron | 0561d7e496da3a518c28102010c3c76445a47307 | [
"Apache-2.0"
] | null | null | null | python/lib/behaviors/compute.py | newrelic-experimental/demo-pythontron | 0561d7e496da3a518c28102010c3c76445a47307 | [
"Apache-2.0"
] | null | null | null | python/lib/behaviors/compute.py | newrelic-experimental/demo-pythontron | 0561d7e496da3a518c28102010c3c76445a47307 | [
"Apache-2.0"
] | null | null | null | from random import seed, random
from math import sqrt, log, cos, pi, ceil
import time
import json
from . import behavior
from ..app_logging import AppLogging
MAX_KEY = "max"
MIN_KEY = "min"
class Compute(behavior.Behavior):
def __init__(self, value):
super().__init__("COMPUTE", value)
def execute(self):
super().execute()
config = self.get_configuration(self.get_value())
if config is None:
return False
# Randomly select a duration length within the range
duration = self.sample(time.time(), config[MIN_KEY], config[MAX_KEY])
self.run_compute(duration)
return True
def run_compute(self, duration):
now = int(time.time() * 1000)
startTime = now
while (now - startTime) < duration:
for i in range(0, 50):
for j in range(i*50):
now / pow(pi, ceil(random() * 10))
now = int(time.time() * 1000)
def sample(self, seed_num, min_val, max_val):
seed(seed_num)
# Ensure there isn't a zero value
r_1 = 1 - random()
r_2 = 1 - random()
# This is a Box Muller transform. Given 2 indepenent samples from a uniform distribution
# this formula will generate a random variable that will follow a normal distribution.
# Source: https://en.wikipedia.org/wiki/Box%E2%80%93Muller_transform
box_muller = sqrt(-2.0 * log(r_1)) * cos(2.0 * pi + r_2)
# Convert to a value between 0 and 1
decimal_bm = box_muller / 10.0 + 0.5
value = min_val + (decimal_bm * (max_val+1 - min_val))
return int(value)
def is_value_valid(self, value):
if (
value is None or
not isinstance(value, list) or
len(value) < 2
):
AppLogging.warning(
"Could not get compute parameters for behavior, input expected is an array of 2 integers, got: {}"
.format(self.get_value()))
return False
return True
def is_range_valid(self, value):
if (
not isinstance(value[0], int) or
not isinstance(value[1], int) or
value[0] > value[1]
):
AppLogging.warning(
"Could not get valid compute parameters for behavior, min: {} max: {}"
.format(value[0], value[1]))
return False
return True
def parse_json(self, value):
parsed_value = None
try:
parsed_value = json.loads(value)
except json.decoder.JSONDecodeError:
# The block below will catch this case
AppLogging.warning(
"Unable to parse configuration for behavior. details: {}"
.format(value))
return None
return parsed_value
def get_configuration(self, value):
parsed_val = self.parse_json(value)
if not self.is_value_valid(parsed_val):
return None
if not self.is_range_valid(parsed_val):
return None
config = {}
config[MIN_KEY] = parsed_val[0]
config[MAX_KEY] = parsed_val[1]
return config
| 26.87037 | 108 | 0.636458 | 2,708 | 0.93315 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.222261 |
200345ba5604afcf2886258ab05830857aef6e34 | 2,128 | py | Python | tests/st/model_zoo_tests/ncf/test_ncf.py | ATestGroup233/mindspore | 5d81221b5896cf7d7c6adb44daef28d92cb43352 | [
"Apache-2.0"
] | 1 | 2021-06-01T12:34:37.000Z | 2021-06-01T12:34:37.000Z | tests/st/model_zoo_tests/ncf/test_ncf.py | ATestGroup233/mindspore | 5d81221b5896cf7d7c6adb44daef28d92cb43352 | [
"Apache-2.0"
] | null | null | null | tests/st/model_zoo_tests/ncf/test_ncf.py | ATestGroup233/mindspore | 5d81221b5896cf7d7c6adb44daef28d92cb43352 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import pytest
from tests.st.model_zoo_tests import utils
@pytest.mark.level0
@pytest.mark.platform_x86_ascend_training
@pytest.mark.platform_arm_ascend_training
@pytest.mark.env_onecard
def test_ncf():
cur_path = os.path.dirname(os.path.abspath(__file__))
model_path = "{}/../../../../model_zoo/official/recommend".format(cur_path)
model_name = "ncf"
utils.copy_files(model_path, cur_path, model_name)
cur_model_path = os.path.join(cur_path, model_name)
old_list = ["train_epochs 20"]
new_list = ["train_epochs 4"]
utils.exec_sed_command(old_list, new_list, os.path.join(cur_model_path, "scripts/run_train.sh"))
old_list = ["with open(cache_path, \\\"wb\\\")", "pickle.dump"]
new_list = ["\\# with open(cache_path, \\\"wb\\\")", "\\# pickle.dump"]
utils.exec_sed_command(old_list, new_list, os.path.join(cur_model_path, "src/dataset.py"))
dataset_path = os.path.join(utils.data_root, "MovieLens")
exec_network_shell = "cd ncf; bash scripts/run_train.sh {0} checkpoint/ > train.log 2>&1 &"\
.format(dataset_path)
os.system(exec_network_shell)
cmd = "ps -ef|grep python|grep train.py|grep train_epochs|grep -v grep"
ret = utils.process_check(100, cmd)
assert ret
log_file = os.path.join(cur_model_path, "train.log")
per_step_time = utils.get_perf_data(log_file)
assert per_step_time < 2.0
loss = utils.get_loss_data_list(log_file)[-1]
assert loss < 0.33
| 43.428571 | 100 | 0.698778 | 0 | 0 | 0 | 0 | 1,390 | 0.653195 | 0 | 0 | 1,035 | 0.486372 |
20052b62da3c16dd50cb7d3de8d055728c8e4c25 | 2,577 | py | Python | src/data/tests/test_osm_create_maps.py | j-t-t/crash-model | 898c200edb736584f52834252639655baf132f21 | [
"MIT"
] | null | null | null | src/data/tests/test_osm_create_maps.py | j-t-t/crash-model | 898c200edb736584f52834252639655baf132f21 | [
"MIT"
] | null | null | null | src/data/tests/test_osm_create_maps.py | j-t-t/crash-model | 898c200edb736584f52834252639655baf132f21 | [
"MIT"
] | null | null | null | import os
import shutil
from shapely.geometry import Polygon
from .. import osm_create_maps
from .. import util
TEST_FP = os.path.dirname(os.path.abspath(__file__))
def test_get_width():
assert osm_create_maps.get_width('15.2') == 15
assert osm_create_maps.get_width('') == 0
assert osm_create_maps.get_width("['14.9', '12.2']") == 0
assert osm_create_maps.get_width('t') == 0
def test_get_speed():
assert osm_create_maps.get_speed('') == 0
assert osm_create_maps.get_speed('signals') == 0
assert osm_create_maps.get_speed('60') == 60
assert osm_create_maps.get_speed("['90', '100']") == 100
def test_reproject_and_clean_feats(tmpdir):
tmppath = tmpdir.strpath
shutil.copy(
TEST_FP + '/data/processed/maps/osm_elements.geojson',
tmppath
)
# For now, just make sure it runs
osm_create_maps.clean_ways(
tmppath + '/osm_elements.geojson',
tmppath + '/docs'
)
def test_expand_polygon():
test_polygon = {
'type': 'Polygon',
'coordinates': [[[-71.0770265, 42.3364517], [-71.0810509, 42.3328703],
[-71.0721386, 42.3325241]]]
}
points_file = os.path.join(TEST_FP, 'data', 'osm_crash_file.json')
# Too many points fall outside of the polygon to buffer
result = osm_create_maps.expand_polygon(test_polygon, points_file)
assert result is None
polygon_coords = [util.get_reproject_point(
x[1], x[0], coords=True) for x in test_polygon['coordinates'][0]]
orig_shape = Polygon(polygon_coords)
result = osm_create_maps.expand_polygon(test_polygon, points_file,
max_percent=.7)
result_coords = [util.get_reproject_point(
x[1], x[0], coords=True) for x in result.exterior.coords]
result_shape = Polygon(result_coords)
# Check whether the new polygon has a larger area than the old one
assert result_shape.area > orig_shape.area
records = util.read_records(points_file, 'crash')
# The first two points are outside the original shape
# and the last point is within
assert orig_shape.contains(records[0].point) is False
assert orig_shape.contains(records[1].point) is False
assert orig_shape.contains(records[2].point)
# The first point should be within the new shape, but not the
# second point, since it was too far from the original shape
assert result_shape.contains(records[0].point)
assert result_shape.contains(records[1].point) is False
assert result_shape.contains(records[2].point)
| 32.620253 | 78 | 0.680248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 565 | 0.219247 |
2005c90121b8ada17b872ccdb477c07d716b48b8 | 444 | py | Python | megnet/tests/test_losses.py | abdalazizrashid/megnet | 8ad0fca246465bd57d66392f790c5310c610dfff | [
"BSD-3-Clause"
] | null | null | null | megnet/tests/test_losses.py | abdalazizrashid/megnet | 8ad0fca246465bd57d66392f790c5310c610dfff | [
"BSD-3-Clause"
] | null | null | null | megnet/tests/test_losses.py | abdalazizrashid/megnet | 8ad0fca246465bd57d66392f790c5310c610dfff | [
"BSD-3-Clause"
] | null | null | null | import unittest
import numpy as np
import tensorflow as tf
from megnet.losses import mean_squared_error_with_scale
class TestLosses(unittest.TestCase):
def test_mse(self):
x = np.array([0.1, 0.2, 0.3])
y = np.array([0.05, 0.15, 0.25])
loss = mean_squared_error_with_scale(x, y, scale=100)
self.assertAlmostEqual(loss.numpy(), np.mean((x - y) ** 2) * 100)
if __name__ == "__main__":
unittest.main()
| 23.368421 | 73 | 0.657658 | 275 | 0.619369 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.022523 |
20062f9c7e5fd8400e59369d471f42de2bae32b0 | 4,264 | py | Python | forbidden/forbidden.py | ukabes/forbidden | 0d2b70ffc28ce4242129795e35fb388685054145 | [
"MIT"
] | null | null | null | forbidden/forbidden.py | ukabes/forbidden | 0d2b70ffc28ce4242129795e35fb388685054145 | [
"MIT"
] | null | null | null | forbidden/forbidden.py | ukabes/forbidden | 0d2b70ffc28ce4242129795e35fb388685054145 | [
"MIT"
] | null | null | null | '''#The Forbidden module
---
The idea is simple;
1. Take a python data structure ( only dicts and lists for now )
and then return a serialized text format called *forbidden*.
2. Take an already serialized *forbidden* format and return the appropriate python data structure.
---
Examples:
---
#### 1.List & Tuples
| Python Lists & Tuples | Forbidden |
|:-----------------------------------------------------:|:-----------------------------------------------------------:|
| ['abc','def','ghi','jkl','mno','pqr','stu'] | abc`0``def`0``ghi`0``jkl`0``mno`0``pqr`0``stu |
| [['ab','cd','ef'],['gh','ij','kl'],['mn','op','qr']] | ab`1``cd`1``ef`1```0``gh`1``ij`1``kl`1```0``mn`1``op`1``qr` |
| [[['ab','cd'],['ef','gh']],[['ij','kl'],['mn','op']]] | ab`2``cd`1``ef`2``gh`0``ij`2``kl`1``mn`2``op |
#### 2.Dictionary
| Python Dictionaries | Forbidden |
|:-----------------------------------------------------:|:-----------------------------------------------------------:|
||
||
||
Note
===
There are only two data types possible here,
1. Numbers (float)
2. Strings
'''
import re
ALLOWED = "allowed"
FORBIDDEN = "forbidden"
def str_to_num(str):
return float(str) if '.' in str else int(str)
class Forbidden():
'''Forbidden Class'''
def __init__(self,data,forbidden_char='`',forbidden_alias=''):
'''Class constructor
:param data: could be a list, dictionary, set, array ...
it could also be a forbidden_string format ...
:param forbidden_char: is the only character that is not allowed in your data
the default character is the grave character, hence "forbidden grave".
'''
self.reset(data,forbidden_char,forbidden_alias)
def forbid(self):
'''
convert data to forbidden_string format using a hidden recursive sibbling
for now recieve only lists and dicts
'''
if self._data_type == FORBIDDEN:
return self._data
data = self._data
return self._forbid_recursive_no_keys(data)
def _forbid_recursive_no_keys(self,data,depth=0):
glue = self.join_at(depth)
if not any(isinstance(element,(list,tuple)) for element in data):
return data if isinstance(data,str) else glue.join(data)
else:
elements = []
for element in data:
elements.append(self._forbid_recursive_no_keys(element,depth+1))
return self._forbid_recursive_no_keys(elements,depth)
def allow(self):
'''
convert data to python data type, like list, dict ...
for now return only lists and dicts
'''
data = self.forbid() if self._data_type == ALLOWED else self._data
return self._allow_recursive_no_keys(data,depth=0)
def _allow_recursive_no_keys(self,data,depth=0):
glue = self.join_at(depth)
if isinstance(data,str) and glue in data:
elements = []
for element in data.split(glue):
elements.append(self._allow_recursive_no_keys(element,depth+1))
return elements
else:
return data.strip('"') if '"' in data else str_to_num(data)
def reset(self,data,forbidden_char='`',forbidden_alias=''):
'''
reset a forbidden object, in case you need to pass in different data
'''
self._data_type = FORBIDDEN if isinstance(data,str) else ALLOWED
self._data = data
self.forbidden_char = forbidden_char
self.forbidden_alias = forbidden_alias
self._clean()
def join_at(self,depth):
'''join_at defines the join string
:param depth: refers to the depth of the data, in particular the python data
'''
join_string = '{}{}{}'.format(self.forbidden_char,depth,self.forbidden_char*2)
return join_string
def _clean(self):
'''Replace forbidden_char from allowed data with forbidden_alias'''
if self._data_type == FORBIDDEN:
pass
else:
self._data = list(map(self._clean_recursive,self._data))
def _clean_recursive(self,data):
if isinstance(data,str):
return '"'+data.replace(self.forbidden_char,self.forbidden_alias)+'"'
elif isinstance(data,list):
return list(map(self._clean_recursive,data))
elif isinstance(data,tuple):
return tuple(map(self._clean_recursive,data))
elif isinstance(data,dict):
return dict(map(self._clean_recursive,data))
else:
return str(data)
| 32.8 | 119 | 0.626642 | 2,867 | 0.672373 | 0 | 0 | 0 | 0 | 0 | 0 | 2,122 | 0.497655 |
200653dc56bdb9fd9c626ec37fb718679c27c497 | 8,276 | py | Python | googledataprocauthenticator/tests/test_dataprocmagic.py | mollypi/dataprocmagic | 89ee5c2bf6daa786d0c280e37346bfb4310ec100 | [
"Apache-2.0"
] | 2 | 2021-04-06T20:08:04.000Z | 2021-07-27T14:40:48.000Z | googledataprocauthenticator/tests/test_dataprocmagic.py | mollypi/dataprocmagic | 89ee5c2bf6daa786d0c280e37346bfb4310ec100 | [
"Apache-2.0"
] | 1 | 2020-08-18T00:54:25.000Z | 2020-08-18T00:54:25.000Z | googledataprocauthenticator/tests/test_dataprocmagic.py | mollypi/dataprocmagic | 89ee5c2bf6daa786d0c280e37346bfb4310ec100 | [
"Apache-2.0"
] | 2 | 2020-11-26T16:58:41.000Z | 2021-09-19T10:49:05.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the `%manage_dataproc` and `%spark` magics"""
from mock import patch, MagicMock, PropertyMock
from nose.tools import raises, assert_equals, with_setup
from google.oauth2 import credentials
import googledataprocauthenticator
from googledataprocauthenticator.google import GoogleAuth
from googledataprocauthenticator.magics.dataprocmagics import DataprocMagics
from sparkmagic.livyclientlib.endpoint import Endpoint
from sparkmagic.livyclientlib.livysession import LivySession
from sparkmagic.livyclientlib.exceptions import BadUserConfigurationException
from sparkmagic.utils.utils import parse_argstring_or_throw, initialize_auth
from sparkmagic.utils.constants import SESSION_KIND_SPARK
magic = None
spark_controller = None
shell = None
ipython_display = None
def _setup():
with patch('googledataprocauthenticator.magics.dataprocmagics.DataprocMagics.self.db', new_callable=PropertyMock,
return_value=mocked_db):
global magic, spark_controller, shell, ipython_display
magic = DataprocMagics(shell=None, widget=MagicMock())
magic.shell = shell = MagicMock()
magic.ipython_display = ipython_display = MagicMock()
magic.spark_controller = spark_controller = MagicMock()
def _teardown():
pass
stored_endpoints = ("http://url.com", Endpoint("http://url.com", "default-credentials"))
get_session_id_to_name = {1234: 'my_session'}
sessions_mock = {'my_session': LivySession(http_client=MagicMock(), properties={"kind":SESSION_KIND_SPARK, \
"heartbeatTimeoutInSecond": 60}, ipython_display=ipython_display, session_id=1234)}
sessions_list_mock = [LivySession(http_client=MagicMock(), properties={"kind":SESSION_KIND_SPARK,\
"heartbeatTimeoutInSecond": 60}, ipython_display=ipython_display, session_id=1234)]
mocked_db = {'autorestore/stored_endpoints': stored_endpoints, 'autorestore/get_session_id_to_name': get_session_id_to_name,}
def make_credentials():
return credentials.Credentials(
token=None,
refresh_token='refresh',
token_uri='token_uri',
client_id='client_id',
client_secret='client_secret',
)
creds = make_credentials()
mock_credentialed_accounts_valid_accounts = ({'account@google.com'}, 'account@google.com')
AUTH_DESCRIBE_USER = '{"client_id": "client_id", \
"client_secret": "secret", "refresh_token": "refresh","type": "authorized_user"}'
@with_setup(_setup, _teardown)
def test_session_command_parses():
print_info_mock = MagicMock()
magic._print_local_info = print_info_mock
command = "session"
magic.spark(command)
print_info_mock.assert_called_once_with()
@with_setup(_setup, _teardown)
def test_session_endpoint_command_parses():
print_info_mock = MagicMock()
magic._print_endpoint_info = print_info_mock
command = "session -u http://url.com -i 1234"
spark_controller.get_all_sessions_endpoint_info = MagicMock(return_value=None)
magic.spark(command)
print_info_mock.assert_called_once_with(None, 1234)
@with_setup(_setup, _teardown)
def test_add_sessions_command_parses_google_default_credentials():
with patch('google.auth.default', return_value=(creds, 'project'), \
autospec=True):
add_sessions_mock = MagicMock()
spark_controller.add_session = add_sessions_mock
command = "add"
name = "-s name"
language = "-l python"
account = "-g default-credentials"
connection_string = "-u http://url.com -t Google"
line = " ".join([command, name, language, connection_string, account])
magic.spark(line)
args = parse_argstring_or_throw(DataprocMagics.spark, line)
auth_instance = initialize_auth(args)
add_sessions_mock.assert_called_once_with("name", Endpoint("http://url.com", initialize_auth(args)),
False, {"kind": "pyspark"})
assert_equals(auth_instance.url, "http://url.com")
isinstance(auth_instance, GoogleAuth)
assert_equals(auth_instance.active_credentials, 'default-credentials')
@with_setup(_setup, _teardown)
def test_add_sessions_command_parses_google_user_credentials():
with patch('sparkmagic.auth.google.list_credentialed_user_accounts', \
return_value=mock_credentialed_accounts_valid_accounts), patch('subprocess.check_output',\
return_value=AUTH_DESCRIBE_USER):
add_sessions_mock = MagicMock()
spark_controller.add_session = add_sessions_mock
command = "add"
name = "-s name"
language = "-l python"
account = "-g account@google.com"
connection_string = "-u http://url.com -t Google"
line = " ".join([command, name, language, connection_string, account])
magic.spark(line)
args = parse_argstring_or_throw(DataprocMagics.spark, line)
auth_instance = initialize_auth(args)
add_sessions_mock.assert_called_once_with("name", Endpoint("http://url.com", initialize_auth(args)),
False, {"kind": "pyspark"})
assert_equals(auth_instance.url, "http://url.com")
isinstance(auth_instance, GoogleAuth)
assert_equals(auth_instance.active_credentials, 'account@google.com')
@with_setup(_setup, _teardown)
def test_add_sessions_command_parses_session_already_exists():
spark_controller.get_all_sessions_endpoint = MagicMock(return_value=sessions_list_mock)
get_managed_clients_mock = MagicMock(return_value=sessions_mock)
spark_controller.get_managed_clients = get_managed_clients_mock
add_sessions_mock = MagicMock()
spark_controller.session_manager.add_session = add_sessions_mock
command = "add"
name = "-s my_session"
language = "-l python"
connection_string = "-u http://url.com -t {} -g account@google.com".format('Google')
line = " ".join([command, name, language, connection_string])
magic.spark(line)
assert_equals(magic.db['autorestore/stored_endpoints'], stored_endpoints)
assert_equals(magic.db['autorestore/get_session_id_to_name'], get_session_id_to_name)
add_sessions_mock.assert_not_called()
@raises(BadUserConfigurationException)
@with_setup(_setup, _teardown)
def test_add_sessions_command_raises_google_no_account():
with patch('google.auth.default', return_value=(creds, 'project'), \
autospec=True):
add_sessions_mock = MagicMock()
spark_controller.add_session = add_sessions_mock
command = "add"
name = "-s name"
language = "-l python"
connection_string = "-u http://url.com -t Google"
line = " ".join([command, name, language, connection_string])
magic.spark(line)
args = parse_argstring_or_throw(DataprocMagics.spark, line)
initialize_auth(args)
@with_setup(_setup, _teardown)
def test_restore_endpoints():
with patch('google.auth.default', return_value=(creds, 'project'),\
autospec=True):
assert_equals(magic.endpoints, stored_endpoints)
@with_setup(_setup, _teardown)
def test_restore_sessions():
with patch('google.auth.default', return_value=(creds, 'project'),\
autospec=True):
spark_controller.get_all_sessions_endpoint = MagicMock(return_value=sessions_list_mock)
spark_controller.get_managed_clients = []
add_sessions_mock = MagicMock()
spark_controller.session_manager.add_session = add_sessions_mock
add_sessions_mock.assert_called_once_with("my_session", LivySession(http_client=MagicMock(),\
properties={"kind":SESSION_KIND_SPARK, "heartbeatTimeoutInSecond": 60}, ipython_display=ipython_display, session_id=12345))
assert_equals(spark_controller, stored_endpoints)
| 45.224044 | 127 | 0.735863 | 0 | 0 | 0 | 0 | 5,281 | 0.63811 | 0 | 0 | 1,908 | 0.230546 |
2006d08163178b2ea091fe980837e1361d44718d | 1,147 | py | Python | p2p.py | barisser/hashfate | ab228706f148221861ea1cfd00fe73dc1a13b33e | [
"MIT"
] | null | null | null | p2p.py | barisser/hashfate | ab228706f148221861ea1cfd00fe73dc1a13b33e | [
"MIT"
] | null | null | null | p2p.py | barisser/hashfate | ab228706f148221861ea1cfd00fe73dc1a13b33e | [
"MIT"
] | null | null | null | import datetime
import requests
import socket
import random
import sys
import time
def now():
a=datetime.fromtimestamp(time.time())
return a.strftime("%H:%M:%S %Y-%m-%d")
def getmyip():
a=requests.get('http://checkip.dyndns.org')
a=a.content
b=a[76:89]
return b
class node:
def __init__(self, listeningport):
timestamp=time.time()
self.listeningport=listeningport
self.timestamp=timestamp
self.hashid=hashlib.sha256(str(timestamp+random.random()*1000000)).hexdigest()
inth=int(self.hashid,16)
self.hashvector=[0]*vectorlength
self.neighbors=[[-1,'',8888]]*max_neighbors #list of 2 element arrays of HASHID, IP ADDRESS, AND THEIR PORT
self.ip=homeip
self.logs=''
r=0
while inth>0:
self.hashvector[r]=int(inth%elementlength)
inth=inth/elementlength
r=r+1
self.sockets=[0]*(max_neighbors+1) #first socket should be SERVER socket
#listening socket
self.sockets[0]=self.create_socket('',self.listeningport)
#self.create_socket('',listeningport,0)
| 26.068182 | 117 | 0.633827 | 852 | 0.742807 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.181343 |
200b0486c0debf06bb8e2f57786c25b865a7e53f | 23,040 | py | Python | odps/models/resource.py | nurikk/aliyun-odps-python-sdk | 7887088998f03e2e4499c28be463e45fb916b919 | [
"Apache-2.0"
] | null | null | null | odps/models/resource.py | nurikk/aliyun-odps-python-sdk | 7887088998f03e2e4499c28be463e45fb916b919 | [
"Apache-2.0"
] | null | null | null | odps/models/resource.py | nurikk/aliyun-odps-python-sdk | 7887088998f03e2e4499c28be463e45fb916b919 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .core import LazyLoad
from .cache import cache, cache_parent
from .. import serializers, utils, types, errors, compat
from ..compat import Enum, six
RESOURCE_SIZE_MAX = 512 * 1024 * 1024 # a single resource's size must be at most 512M
class Resource(LazyLoad):
"""
Resource is useful when writing UDF or MapReduce. This is an abstract class.
Basically, resource can be either a file resource or a table resource.
File resource can be ``file``, ``py``, ``jar``, ``archive`` in details.
.. seealso:: :class:`odps.models.FileResource`, :class:`odps.models.PyResource`,
:class:`odps.models.JarResource`, :class:`odps.models.ArchiveResource`,
:class:`odps.models.TableResource`
"""
__slots__ = 'content_md5', 'is_temp_resource', 'volume_path', '_type_indicator'
class Type(Enum):
FILE = 'FILE'
JAR = 'JAR'
PY = 'PY'
ARCHIVE = 'ARCHIVE'
TABLE = 'TABLE'
VOLUMEFILE = 'VOLUMEFILE'
VOLUMEARCHIVE = 'VOLUMEARCHIVE'
UNKOWN = 'UNKOWN'
_type_indicator = 'type'
name = serializers.XMLNodeField('Name')
owner = serializers.XMLNodeField('Owner')
comment = serializers.XMLNodeField('Comment')
type = serializers.XMLNodeField('ResourceType', parse_callback=lambda t: Resource.Type(t.upper()))
creation_time = serializers.XMLNodeField('CreationTime', parse_callback=utils.parse_rfc822)
last_modified_time = serializers.XMLNodeField('LastModifiedTime', parse_callback=utils.parse_rfc822)
last_updator = serializers.XMLNodeField('LastUpdator')
size = serializers.XMLNodeField('ResourceSize', parse_callback=int)
source_table_name = serializers.XMLNodeField('TableName')
@classmethod
def _get_cls(cls, typo):
if typo is None:
return cls
if isinstance(typo, six.string_types):
typo = Resource.Type(typo.upper())
clz = lambda name: globals()[name]
if typo == Resource.Type.FILE:
return clz('FileResource')
elif typo == Resource.Type.JAR:
return clz('JarResource')
elif typo == Resource.Type.PY:
return clz('PyResource')
elif typo == Resource.Type.ARCHIVE:
return clz('ArchiveResource')
elif typo == Resource.Type.TABLE:
return clz('TableResource')
elif typo == Resource.Type.VOLUMEARCHIVE:
return clz('VolumeArchiveResource')
elif typo == Resource.Type.VOLUMEFILE:
return clz('VolumeFileResource')
else:
return cls
def create(self, overwrite=False, **kw):
raise NotImplementedError
@staticmethod
def _filter_cache(_, **kwargs):
return kwargs.get('type') is not None and kwargs['type'] != Resource.Type.UNKOWN
@cache
def __new__(cls, *args, **kwargs):
typo = kwargs.get('type')
if typo is not None or (cls != Resource and issubclass(cls, Resource)):
return object.__new__(cls._get_cls(typo))
kwargs['type'] = Resource.Type.UNKOWN
obj = Resource(**kwargs)
obj.reload()
return Resource(**obj.extract())
def __init__(self, **kwargs):
typo = kwargs.get('type')
if isinstance(typo, six.string_types):
kwargs['type'] = Resource.Type(typo.upper())
super(Resource, self).__init__(**kwargs)
@property
def _project(self):
return self._parent._parent.name
@property
def project(self):
return self._project
def reload(self):
url = self.resource()
resp = self._client.get(url, params={'meta': ''})
self.owner = resp.headers.get('x-odps-owner')
resource_type = resp.headers.get('x-odps-resource-type')
self.type = Resource.Type(resource_type.upper())
self.comment = resp.headers.get('x-odps-comment')
self.last_updator = resp.headers.get('x-odps-updator')
size = resp.headers.get('x-odps-resource-size')
self.size = None if size is None else int(size)
self.creation_time = utils.parse_rfc822(
resp.headers.get('x-odps-creation-time'))
self.last_modified_time = utils.parse_rfc822(
resp.headers.get('Last-Modified'))
self.source_table_name = resp.headers.get('x-odps-copy-table-source')
self.volume_path = resp.headers.get('x-odps-copy-file-source')
self.content_md5 = resp.headers.get('Content-MD5')
self._loaded = True
def _reload_size(self):
url = self.resource()
resp = self._client.get(url, params={'meta': ''})
size = resp.headers.get('x-odps-resource-size')
self.size = None if size is None else int(size)
def update(self, **kw):
raise NotImplementedError
def drop(self):
return self.parent.delete(self)
@cache_parent
class FileResource(Resource):
"""
File resource represents for a file.
Use ``open`` method to open this resource as an file-like object.
"""
__slots__ = '_fp', '_mode', '_opened', '_size', '_need_commit', \
'_open_binary', '_encoding'
class Mode(Enum):
READ = 'r'
WRITE = 'w'
APPEND = 'a'
READWRITE = 'r+'
TRUNCEREADWRITE = 'w+'
APPENDREADWRITE = 'a+'
def create(self, overwrite=False, **kw):
file_obj = kw.pop('file_obj', kw.pop('fileobj', None))
if file_obj is None:
raise ValueError('parameter `file_obj` cannot be None, either string or file-like object')
if isinstance(file_obj, six.text_type):
file_obj = file_obj.encode('utf-8')
if isinstance(file_obj, six.binary_type):
file_obj = six.BytesIO(file_obj)
if self.name is None or len(self.name.strip()) == 0:
raise errors.ODPSError('File Resource Name should not empty.')
method = self._client.post if not overwrite else self._client.put
url = self.parent.resource() if not overwrite else self.resource()
headers = {'Content-Type': 'application/octet-stream',
'Content-Disposition': 'attachment;filename=%s' % self.name,
'x-odps-resource-type': self.type.value.lower(),
'x-odps-resource-name': self.name}
if self._getattr('comment') is not None:
headers['x-odps-comment'] = self.comment
if self._getattr('is_temp_resource'):
headers['x-odps-resource-istemp'] = 'true' if self.is_temp_resource else 'false'
if not isinstance(file_obj, six.string_types):
file_obj.seek(0)
content = file_obj.read()
else:
content = file_obj
method(url, content, headers=headers)
if overwrite:
self.reload()
return self
def __init__(self, **kw):
super(FileResource, self).__init__(**kw)
self.type = Resource.Type.FILE
self._fp = None
self._mode = FileResource.Mode.READ
self._open_binary = False
self._encoding = None
self._size = 0
self._opened = False
self._need_commit = False
def _is_create(self):
if self._loaded:
return False
try:
self._reload_size()
return False
except errors.NoSuchObject:
return True
def open(self, mode='r', encoding='utf-8'):
"""
The argument ``mode`` stands for the open mode for this file resource.
It can be binary mode if the 'b' is inside. For instance,
'rb' means opening the resource as read binary mode
while 'r+b' means opening the resource as read+write binary mode.
This is most import when the file is actually binary such as tar or jpeg file,
so be aware of opening this file as a correct mode.
Basically, the text mode can be 'r', 'w', 'a', 'r+', 'w+', 'a+'
just like the builtin python ``open`` method.
* ``r`` means read only
* ``w`` means write only, the file will be truncated when opening
* ``a`` means append only
* ``r+`` means read+write without constraint
* ``w+`` will truncate first then opening into read+write
* ``a+`` can read+write, however the written content can only be appended to the end
:param mode: the mode of opening file, described as above
:param encoding: utf-8 as default
:return: file-like object
:Example:
>>> with resource.open('r') as fp:
>>> fp.read(1) # read one unicode character
>>> fp.write('test') # wrong, cannot write under read mode
>>>
>>> with resource.open('wb') as fp:
>>> fp.readlines() # wrong, cannot read under write mode
>>> fp.write('hello world') # write bytes
>>>
>>> with resource.open('test_resource', 'r+') as fp: # open as read-write mode
>>> fp.seek(5)
>>> fp.truncate()
>>> fp.flush()
"""
# TODO: when reading, do not read all the data at once
if 'b' in mode:
self._open_binary = True
mode = mode.replace('b', '')
self._mode = FileResource.Mode(mode)
self._encoding = encoding
if self._mode in (FileResource.Mode.WRITE, FileResource.Mode.TRUNCEREADWRITE):
io_clz = six.BytesIO if self._open_binary else six.StringIO
self._fp = io_clz()
self._size = 0
else:
self._fp = self.parent.read_resource(
self, text_mode=not self._open_binary, encoding=self._encoding)
self._reload_size()
self._sync_size()
self._opened = True
return self
def _check_read(self):
if not self._opened:
raise IOError('I/O operation on non-open resource')
if self._mode in (FileResource.Mode.WRITE, FileResource.Mode.APPEND):
raise IOError('Resource not open for reading')
def _sync_size(self):
curr_pos = self.tell()
self.seek(0, compat.SEEK_END)
self._size = self.tell()
self.seek(curr_pos)
def read(self, size=-1):
"""
Read the file resource, read all as default.
:param size: unicode or byte length depends on text mode or binary mode.
:return: unicode or bytes depends on text mode or binary mode
:rtype: str or unicode(Py2), bytes or str(Py3)
"""
self._check_read()
return self._fp.read(size)
def readline(self, size=-1):
"""
Read a single line.
:param size: If the size argument is present and non-negative,
it is a maximum byte count (including the trailing newline)
and an incomplete line may be returned.
When size is not 0,
an empty string is returned only when EOF is encountered immediately
:return: unicode or bytes depends on text mode or binary mode
:rtype: str or unicode(Py2), bytes or str(Py3)
"""
self._check_read()
return self._fp.readline(size)
def readlines(self, sizehint=-1):
"""
Read as lines.
:param sizehint: If the optional sizehint argument is present, instead of reading up to EOF,
whole lines totalling approximately sizehint bytes
(possibly after rounding up to an internal buffer size) are read.
:return: lines
:rtype: list
"""
self._check_read()
return self._fp.readlines(sizehint)
def _check_write(self):
if not self._opened:
raise IOError('I/O operation on non-open resource')
if self._mode == FileResource.Mode.READ:
raise IOError('Resource not open for writing')
def _check_size(self):
if self._size > RESOURCE_SIZE_MAX:
raise IOError('Single resource\'s max size is %sM' %
(RESOURCE_SIZE_MAX / (1024 ** 2)))
def _convert(self, content):
if self._open_binary and isinstance(content, six.text_type):
return content.encode(self._encoding)
elif not self._open_binary and isinstance(content, six.binary_type):
return content.decode(self._encoding)
return content
def write(self, content):
"""
Write content into the file resource
:param content: content to write
:return: None
"""
content = self._convert(content)
length = len(content)
self._check_write()
if self._mode in (FileResource.Mode.APPEND, FileResource.Mode.APPENDREADWRITE):
self.seek(0, compat.SEEK_END)
if length > 0:
self._need_commit = True
res = self._fp.write(content)
self._sync_size()
self._check_size()
return res
def writelines(self, seq):
"""
Write lines into the file resource.
:param seq: lines
:return: None
"""
seq = [self._convert(s) for s in seq]
length = sum(len(s) for s in seq)
self._check_write()
if self._mode in (FileResource.Mode.APPEND, FileResource.Mode.APPENDREADWRITE):
self.seek(0, compat.SEEK_END)
if length > 0:
self._need_commit = True
res = self._fp.writelines(seq)
self._sync_size()
self._check_size()
return res
def seek(self, pos, whence=compat.SEEK_SET): # io.SEEK_SET
"""
Seek to some place.
:param pos: position to seek
:param whence: if set to 2, will seek to the end
:return: None
"""
return self._fp.seek(pos, whence)
def tell(self):
"""
Tell the current position
:return: current position
"""
return self._fp.tell()
def truncate(self, size=None):
"""
Truncate the file resource's size.
:param size: If the optional size argument is present,
the file is truncated to (at most) that size.
The size defaults to the current position.
:return: None
"""
self._check_write()
curr_pos = self.tell()
self._fp.truncate(size)
self.seek(0, compat.SEEK_END)
self._size = self.tell()
self.seek(curr_pos)
self._need_commit = True
def flush(self):
"""
Commit the change to ODPS if any change happens.
Close will do this automatically.
:return: None
"""
if self._need_commit:
is_create = self._is_create()
resources = self.parent
if is_create:
resources.create(self=self, file_obj=self._fp)
else:
resources.update(obj=self, file_obj=self._fp)
self._need_commit = False
def close(self):
"""
Close this file resource.
:return: None
"""
self.flush()
self._fp = None
self._size = 0
self._need_commit = False
self._opened = False
def __iter__(self):
self._check_read()
return self._fp.__iter__()
def __next__(self):
self._check_read()
return next(self._fp)
next = __next__
def __enter__(self):
return self
def __exit__(self, *_):
self.close()
def update(self, file_obj):
return self._parent.update(self, file_obj=file_obj)
@cache_parent
class JarResource(FileResource):
"""
File resource representing for the .jar file.
"""
def __init__(self, **kw):
super(JarResource, self).__init__(**kw)
self.type = Resource.Type.JAR
@cache_parent
class PyResource(FileResource):
"""
File resource representing for the .py file.
"""
def __init__(self, **kw):
super(PyResource, self).__init__(**kw)
self.type = Resource.Type.PY
@cache_parent
class ArchiveResource(FileResource):
"""
File resource representing for the compressed file like .zip/.tgz/.tar.gz/.tar/jar
"""
def __init__(self, **kw):
super(ArchiveResource, self).__init__(**kw)
self.type = Resource.Type.ARCHIVE
@cache_parent
class TableResource(Resource):
"""
Take a table as a resource.
"""
def __init__(self, **kw):
project_name = kw.pop('project_name', None)
table_name = kw.pop('table_name', None)
partition_spec = kw.pop('partition', None)
super(TableResource, self).__init__(**kw)
self._init(project_name=project_name, table_name=table_name,
partition=partition_spec)
def create(self, overwrite=False, **kw):
if self.name is None or len(self.name.strip()) == 0:
raise errors.ODPSError('Table Resource Name should not be empty.')
method = self._client.post if not overwrite else self._client.put
url = self.parent.resource() if not overwrite else self.resource()
headers = {'Content-Type': 'text/plain',
'x-odps-resource-type': self.type.value.lower(),
'x-odps-resource-name': self.name,
'x-odps-copy-table-source': self.source_table_name}
if self._getattr('comment') is not None:
headers['x-odps-comment'] = self._getattr('comment')
method(url, '', headers=headers)
if overwrite:
del self.parent[self.name]
return self.parent[self.name]
return self
def _init(self, project_name=None, table_name=None, partition=None):
project_name = project_name or self._project
if project_name is not None and project_name != self._project:
from .projects import Projects
self._parent = Projects(_client=self._client)[project_name].resources
if table_name is not None:
self.source_table_name = '%s.%s' % (project_name, table_name)
if partition is not None:
if not isinstance(partition, types.PartitionSpec):
partition_spec = types.PartitionSpec(partition)
self.source_table_name = '%s partition(%s)' \
% (self.source_table_name.split(' partition(')[0],
partition_spec)
def get_source_table(self):
if self.source_table_name is None:
return
splits = self.source_table_name.split(' partition(')
src = splits[0]
if '.' not in src:
raise ValueError('Malformed source table name: %s' % src)
project_name, table_name = tuple(src.split('.', 1))
from .projects import Projects
return Projects(client=self._client)[project_name].tables[table_name]
def get_source_table_partition(self):
if self.source_table_name is None:
return
splits = self.source_table_name.split(' partition(')
if len(splits) < 2:
return
partition = splits[1].split(')', 1)[0].strip()
return types.PartitionSpec(partition)
@property
def table(self):
"""
Get the table object.
:return: source table
:rtype: :class:`odps.models.Table`
.. seealso:: :class:`odps.models.Table`
"""
return self.get_source_table()
@property
def partition(self):
"""
Get the source table partition.
:return: the source table partition
"""
pt = self.get_source_table_partition()
if pt is None:
return
return self.get_source_table().get_partition(pt)
def open_reader(self, **kwargs):
"""
Open reader on the table resource
"""
return self.get_source_table().open_reader(partition=self.get_source_table_partition(), **kwargs)
def open_writer(self, **kwargs):
"""
Open writer on the table resource
"""
return self.get_source_table().open_writer(partition=self.get_source_table_partition(), **kwargs)
def update(self, project_name=None, table_name=None, partition=None):
"""
Update this resource.
:param project_name: the source table's project
:param table_name: the source table's name
:param partition: the source table's partition
:return: self
"""
self._init(project_name=project_name, table_name=table_name,
partition=partition)
resources = self.parent
return resources.update(self)
@cache_parent
class VolumeResource(Resource):
def create(self, overwrite=False, **kw):
if self.name is None or len(self.name.strip()) == 0:
raise errors.ODPSError('Volume Resource Name should not be empty.')
method = self._client.post if not overwrite else self._client.put
url = self.parent.resource() if not overwrite else self.resource()
headers = {'Content-Type': 'text/plain',
'x-odps-resource-type': self.type.value.lower(),
'x-odps-resource-name': self.name,
'x-odps-copy-file-source': self.volume_path}
if self._getattr('comment') is not None:
headers['x-odps-comment'] = self._getattr('comment')
method(url, '', headers=headers)
if overwrite:
del self.parent[self.name]
return self.parent[self.name]
return self
@cache_parent
class VolumeFileResource(VolumeResource):
"""
Volume resource represents for a volume archive
"""
def __init__(self, **kw):
okw = kw.copy()
okw.pop('volume_file', None)
super(VolumeFileResource, self).__init__(**okw)
self.type = Resource.Type.VOLUMEFILE
def create(self, overwrite=False, **kw):
if 'volume_file' in kw:
vf = kw.pop('volume_file')
self.volume_path = vf.path
return super(VolumeFileResource, self).create(overwrite, **kw)
@cache_parent
class VolumeArchiveResource(VolumeFileResource):
"""
Volume archive resource represents for a volume archive
"""
def __init__(self, **kw):
super(VolumeArchiveResource, self).__init__(**kw)
self.type = Resource.Type.VOLUMEARCHIVE
| 31.735537 | 105 | 0.602214 | 22,012 | 0.955382 | 0 | 0 | 18,985 | 0.824002 | 0 | 0 | 7,824 | 0.339583 |
200d13d0ad19224b088e6f4e7f46fd1116b6eb06 | 525 | py | Python | src/rust/iced-x86-py/src/iced_x86/CC_g.py | clayne/iced | dcd3db725b1137fec4d2bda9b17587cead49bf4d | [
"MIT"
] | 1,018 | 2018-09-07T20:12:43.000Z | 2021-01-17T18:41:10.000Z | src/rust/iced-x86-py/src/iced_x86/CC_g.py | clayne/iced | dcd3db725b1137fec4d2bda9b17587cead49bf4d | [
"MIT"
] | 127 | 2018-09-07T19:33:48.000Z | 2021-01-17T22:20:33.000Z | src/rust/iced-x86-py/src/iced_x86/CC_g.py | clayne/iced | dcd3db725b1137fec4d2bda9b17587cead49bf4d | [
"MIT"
] | 146 | 2018-09-09T12:38:30.000Z | 2021-01-18T23:37:11.000Z | # SPDX-License-Identifier: MIT
# Copyright (C) 2018-present iced project and contributors
# ⚠️This file was generated by GENERATOR!🦹♂️
# pylint: disable=invalid-name
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
"""
Mnemonic condition code selector (eg. ``JG`` / ``JNLE``)
"""
import typing
if typing.TYPE_CHECKING:
from ._iced_x86_py import CC_g
else:
CC_g = int
G: CC_g = 0 # type: ignore
"""
``JG``, ``CMOVG``, ``SETG``
"""
NLE: CC_g = 1 # type: ignore
"""
``JNLE``, ``CMOVNLE``, ``SETNLE``
"""
| 18.75 | 58 | 0.655238 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 407 | 0.756506 |
200ec8c2db65887cd65514805e44e172c6bb42bb | 7,936 | py | Python | python/foglamp/plugins/south/http_south/http_south.py | ashwinscale/FogLAMP | dac6f286d31978b6ce00303df8398ea5b2031d79 | [
"Apache-2.0"
] | null | null | null | python/foglamp/plugins/south/http_south/http_south.py | ashwinscale/FogLAMP | dac6f286d31978b6ce00303df8398ea5b2031d79 | [
"Apache-2.0"
] | 1 | 2018-05-10T16:04:34.000Z | 2018-05-10T16:04:34.000Z | python/foglamp/plugins/south/http_south/http_south.py | ashwinscale/FogLAMP | dac6f286d31978b6ce00303df8398ea5b2031d79 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
"""HTTP Listener handler for sensor readings"""
import asyncio
import copy
import sys
from aiohttp import web
from foglamp.common import logger
from foglamp.common.web import middleware
from foglamp.plugins.common import utils
from foglamp.services.south.ingest import Ingest
__author__ = "Amarendra K Sinha"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
_LOGGER = logger.setup(__name__, level=20)
_CONFIG_CATEGORY_NAME = 'HTTP_SOUTH'
_CONFIG_CATEGORY_DESCRIPTION = 'South Plugin HTTP Listener'
_DEFAULT_CONFIG = {
'plugin': {
'description': 'South Plugin HTTP Listener',
'type': 'string',
'default': 'http_south'
},
'port': {
'description': 'Port to listen on',
'type': 'integer',
'default': '6683',
},
'host': {
'description': 'Address to accept data on',
'type': 'string',
'default': '0.0.0.0',
},
'uri': {
'description': 'URI to accept data on',
'type': 'string',
'default': 'sensor-reading',
},
'management_host': {
'description': 'Management host',
'type': 'string',
'default': '127.0.0.1',
}
}
def plugin_info():
return {
'name': 'http_south',
'version': '1.0',
'mode': 'async',
'type': 'south',
'interface': '1.0',
'config': _DEFAULT_CONFIG
}
def plugin_init(config):
"""Registers HTTP Listener handler to accept sensor readings
Args:
config: JSON configuration document for the South device configuration category
Returns:
handle: JSON object to be used in future calls to the plugin
Raises:
"""
handle = config
return handle
def plugin_start(data):
try:
host = data['host']['value']
port = data['port']['value']
uri = data['uri']['value']
loop = asyncio.get_event_loop()
app = web.Application(middlewares=[middleware.error_middleware])
app.router.add_route('POST', '/{}'.format(uri), HttpSouthIngest.render_post)
handler = app.make_handler()
server_coro = loop.create_server(handler, host, port)
future = asyncio.ensure_future(server_coro)
data['app'] = app
data['handler'] = handler
data['server'] = None
def f_callback(f):
# _LOGGER.info(repr(f.result()))
""" <Server sockets=
[<socket.socket fd=17, family=AddressFamily.AF_INET, type=2049,proto=6, laddr=('0.0.0.0', 6683)>]>"""
data['server'] = f.result()
future.add_done_callback(f_callback)
except Exception as e:
_LOGGER.exception(str(e))
def plugin_reconfigure(handle, new_config):
""" Reconfigures the plugin
it should be called when the configuration of the plugin is changed during the operation of the South device service;
The new configuration category should be passed.
Args:
handle: handle returned by the plugin initialisation call
new_config: JSON object representing the new configuration category for the category
Returns:
new_handle: new handle to be used in the future calls
Raises:
"""
_LOGGER.info("Old config for HTTP_SOUTH plugin {} \n new config {}".format(handle, new_config))
# Find diff between old config and new config
diff = utils.get_diff(handle, new_config)
# Plugin should re-initialize and restart if key configuration is changed
if 'port' in diff or 'host' in diff or 'management_host' in diff:
_plugin_stop(handle)
new_handle = plugin_init(new_config)
new_handle['restart'] = 'yes'
_LOGGER.info("Restarting HTTP_SOUTH plugin due to change in configuration keys [{}]".format(', '.join(diff)))
else:
new_handle = copy.deepcopy(handle)
new_handle['restart'] = 'no'
return new_handle
def _plugin_stop(handle):
""" Stops the plugin doing required cleanup, to be called prior to the South device service being shut down.
Args:
handle: handle returned by the plugin initialisation call
Returns:
Raises:
"""
_LOGGER.info('Stopping South HTTP plugin.')
try:
app = handle['app']
handler = handle['handler']
server = handle['server']
if server:
server.close()
asyncio.ensure_future(server.wait_closed())
asyncio.ensure_future(app.shutdown())
asyncio.ensure_future(handler.shutdown(60.0))
asyncio.ensure_future(app.cleanup())
except Exception as e:
_LOGGER.exception(str(e))
raise
def plugin_shutdown(handle):
""" Shutdowns the plugin doing required cleanup, to be called prior to the South device service being shut down.
Args:
handle: handle returned by the plugin initialisation call
Returns:
Raises:
"""
_plugin_stop(handle)
_LOGGER.info('South HTTP plugin shut down.')
# TODO: Implement FOGL-701 (implement AuditLogger which logs to DB and can be used by all ) for this class
class HttpSouthIngest(object):
"""Handles incoming sensor readings from HTTP Listener"""
@staticmethod
async def render_post(request):
"""Store sensor readings from CoAP to FogLAMP
Args:
request:
The payload decodes to JSON similar to the following:
.. code-block:: python
{
"timestamp": "2017-01-02T01:02:03.23232Z-05:00",
"asset": "pump1",
"key": "80a43623-ebe5-40d6-8d80-3f892da9b3b4",
"readings": {"humidity": 0.0, "temperature": -40.0}
}
}
Example:
curl -X POST http://localhost:6683/sensor-reading -d '{"timestamp": "2017-01-02T01:02:03.23232Z-05:00", "asset": "pump1", "key": "80a43623-ebe5-40d6-8d80-3f892da9b3b4", "readings": {"humidity": 0.0, "temperature": -40.0}}'
"""
# TODO: The payload is documented at
# https://docs.google.com/document/d/1rJXlOqCGomPKEKx2ReoofZTXQt9dtDiW_BHU7FYsj-k/edit#
# and will be moved to a .rst file
# TODO: Decide upon the correct format of message
message = {'result': 'success'}
try:
if not Ingest.is_available():
message = {'busy': True}
raise web.HTTPServiceUnavailable(reason=message)
try:
payload = await request.json()
except Exception:
raise ValueError('Payload must be a dictionary')
asset = payload['asset']
timestamp = payload['timestamp']
key = payload['key']
# readings or sensor_values are optional
try:
readings = payload['readings']
except KeyError:
readings = payload['sensor_values'] # sensor_values is deprecated
# if optional then
# TODO: confirm, do we want to check this?
if not isinstance(readings, dict):
raise ValueError('readings must be a dictionary')
await Ingest.add_readings(asset=asset, timestamp=timestamp, key=key, readings=readings)
except (KeyError, ValueError, TypeError) as e:
Ingest.increment_discarded_readings()
_LOGGER.exception("%d: %s", web.HTTPBadRequest.status_code, str(e))
raise web.HTTPBadRequest(reason=str(e))
except Exception as ex:
Ingest.increment_discarded_readings()
_LOGGER.exception("%d: %s", web.HTTPInternalServerError.status_code, str(ex))
raise web.HTTPInternalServerError(reason=str(ex))
return web.json_response(message)
| 32.658436 | 234 | 0.610509 | 2,709 | 0.341356 | 0 | 0 | 2,611 | 0.329007 | 2,593 | 0.326739 | 3,956 | 0.498488 |
200f798e7e0f95b3ed264166571c7a17426aefcb | 4,924 | py | Python | roberta/inference/run_classifier_infer_cv.py | LiRunyi2001/cnSoftBei | 72b90033ade1e926d3fb23621f5c67fa8eec9bb4 | [
"MIT"
] | 1 | 2021-11-29T08:33:00.000Z | 2021-11-29T08:33:00.000Z | roberta/inference/run_classifier_infer_cv.py | LiRunyi2001/cnSoftBei | 72b90033ade1e926d3fb23621f5c67fa8eec9bb4 | [
"MIT"
] | null | null | null | roberta/inference/run_classifier_infer_cv.py | LiRunyi2001/cnSoftBei | 72b90033ade1e926d3fb23621f5c67fa8eec9bb4 | [
"MIT"
] | null | null | null | """
This script provides an exmaple to wrap UER-py for classification inference (cross validation).
"""
import sys
import os
import argparse
import torch
import torch.nn as nn
import numpy as np
uer_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
sys.path.append(uer_dir)
from uer.utils.vocab import Vocab
from uer.utils.constants import *
from uer.utils import *
from uer.utils.config import load_hyperparam
from uer.utils.seed import set_seed
from uer.model_loader import load_model
from uer.opts import model_opts
from finetune.run_classifier import Classifier
from inference.run_classifier_infer import *
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Path options.
parser.add_argument("--load_model_path", default=None, type=str,
help="Path of the classfier model.")
parser.add_argument("--vocab_path", default=None, type=str,
help="Path of the vocabulary file.")
parser.add_argument("--spm_model_path", default=None, type=str,
help="Path of the sentence piece model.")
parser.add_argument("--test_path", type=str,
help="Path of the testset.")
parser.add_argument("--test_features_path", default=None, type=str,
help="Path of the test features for stacking.")
parser.add_argument("--config_path", default="models/bert/base_config.json", type=str,
help="Path of the config file.")
# Model options.
model_opts(parser)
parser.add_argument("--pooling", choices=["mean", "max", "first", "last"], default="first",
help="Pooling type.")
# Inference options.
parser.add_argument("--batch_size", type=int, default=64,
help="Batch size.")
parser.add_argument("--seq_length", type=int, default=128,
help="Sequence length.")
parser.add_argument("--labels_num", type=int, required=True,
help="Number of prediction labels.")
# Tokenizer options.
parser.add_argument("--tokenizer", choices=["bert", "char", "space"], default="bert",
help="Specify the tokenizer."
"Original Google BERT uses bert tokenizer on Chinese corpus."
"Char tokenizer segments sentences into characters."
"Space tokenizer segments sentences into words according to space.")
# Output options.
parser.add_argument("--output_logits", action="store_true", help="Write logits to output file.")
parser.add_argument("--output_prob", action="store_true", help="Write probabilities to output file.")
# Cross validation options.
parser.add_argument("--folds_num", type=int, default=5,
help="The number of folds for cross validation.")
args = parser.parse_args()
# Load the hyperparameters from the config file.
args = load_hyperparam(args)
# Build tokenizer.
args.tokenizer = str2tokenizer[args.tokenizer](args)
# Build classification model and load parameters.
args.soft_targets, args.soft_alpha = False, False
dataset = read_dataset(args, args.test_path)
src = torch.LongTensor([sample[0] for sample in dataset])
seg = torch.LongTensor([sample[1] for sample in dataset])
batch_size = args.batch_size
instances_num = src.size()[0]
print("The number of prediction instances: ", instances_num)
test_features = [[] for _ in range(args.folds_num)]
for fold_id in range(args.folds_num):
load_model_name = ".".join(args.load_model_path.split(".")[:-1])
load_model_suffix = args.load_model_path.split(".")[-1]
model = Classifier(args)
model = load_model(model, load_model_name+"-fold_"+str(fold_id)+"."+load_model_suffix)
# For simplicity, we use DataParallel wrapper to use multiple GPUs.
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
if torch.cuda.device_count() > 1:
print("{} GPUs are available. Let's use them.".format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
model.eval()
for i, (src_batch, seg_batch) in enumerate(batch_loader(batch_size, src, seg)):
src_batch = src_batch.to(device)
seg_batch = seg_batch.to(device)
with torch.no_grad():
_, logits = model(src_batch, None, seg_batch)
prob = nn.Softmax(dim=1)(logits)
prob = prob.cpu().numpy().tolist()
test_features[fold_id].extend(prob)
test_features = np.array(test_features)
test_features = np.mean(test_features, axis=0)
np.save(args.test_features_path, test_features)
if __name__ == "__main__":
main()
| 39.392 | 105 | 0.648253 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,421 | 0.288587 |
20108249c7501a803109aa38a4367c232811fb45 | 6,491 | py | Python | uis/horsy_package.py | horsy-ml/horsy | 1161df2e83c201784ea674bd1d53e76831b15a0f | [
"MIT"
] | null | null | null | uis/horsy_package.py | horsy-ml/horsy | 1161df2e83c201784ea674bd1d53e76831b15a0f | [
"MIT"
] | null | null | null | uis/horsy_package.py | horsy-ml/horsy | 1161df2e83c201784ea674bd1d53e76831b15a0f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'D:\RAZNOE\prgrming\horsy\Source\client\uis\horsy_package.ui'
#
# Created by: PyQt5 UI code generator 5.15.6
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(331, 433)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setStyleSheet("QWidget{\n"
" background-color: rgb(30, 30, 30);\n"
"}\n"
"")
self.centralwidget.setObjectName("centralwidget")
self.packagename_box = QtWidgets.QLineEdit(self.centralwidget)
self.packagename_box.setGeometry(QtCore.QRect(20, 20, 151, 31))
self.packagename_box.setStyleSheet("background-color: rgb(74, 76, 83);\n"
"border-radius: 5px; \n"
"color: rgb(242, 242, 242);")
self.packagename_box.setText("")
self.packagename_box.setReadOnly(True)
self.packagename_box.setObjectName("packagename_box")
self.main_exe_box = QtWidgets.QLineEdit(self.centralwidget)
self.main_exe_box.setGeometry(QtCore.QRect(20, 305, 291, 31))
self.main_exe_box.setStyleSheet("background-color: rgb(74, 76, 83);\n"
"border-radius: 5px; \n"
"color: rgb(242, 242, 242);")
self.main_exe_box.setObjectName("main_exe_box")
self.source_url_box = QtWidgets.QLineEdit(self.centralwidget)
self.source_url_box.setGeometry(QtCore.QRect(20, 200, 291, 31))
self.source_url_box.setStyleSheet("background-color: rgb(74, 76, 83);\n"
"border-radius: 5px; \n"
"color: rgb(242, 242, 242);")
self.source_url_box.setObjectName("source_url_box")
self.url_of_exe_box = QtWidgets.QLineEdit(self.centralwidget)
self.url_of_exe_box.setGeometry(QtCore.QRect(20, 165, 291, 31))
self.url_of_exe_box.setStyleSheet("background-color: rgb(74, 76, 83);\n"
"border-radius: 5px; \n"
"color: rgb(242, 242, 242);")
self.url_of_exe_box.setObjectName("url_of_exe_box")
self.dependency_url_box = QtWidgets.QLineEdit(self.centralwidget)
self.dependency_url_box.setGeometry(QtCore.QRect(20, 235, 291, 31))
self.dependency_url_box.setStyleSheet("background-color: rgb(74, 76, 83);\n"
"border-radius: 5px; \n"
"color: rgb(242, 242, 242);")
self.dependency_url_box.setObjectName("dependency_url_box")
self.dependency_run_box = QtWidgets.QLineEdit(self.centralwidget)
self.dependency_run_box.setGeometry(QtCore.QRect(20, 270, 291, 31))
self.dependency_run_box.setStyleSheet("background-color: rgb(74, 76, 83);\n"
"border-radius: 5px; \n"
"color: rgb(242, 242, 242);")
self.dependency_run_box.setObjectName("dependency_run_box")
self.package_desc_box = QtWidgets.QTextBrowser(self.centralwidget)
self.package_desc_box.setGeometry(QtCore.QRect(20, 60, 256, 101))
self.package_desc_box.setStyleSheet("background-color: rgb(74, 76, 83);\n"
"border-radius: 5px; \n"
"color: rgb(242, 242, 242);")
self.package_desc_box.setAcceptRichText(False)
self.package_desc_box.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByKeyboard|QtCore.Qt.LinksAccessibleByMouse|QtCore.Qt.TextBrowserInteraction|QtCore.Qt.TextEditable|QtCore.Qt.TextEditorInteraction|QtCore.Qt.TextSelectableByKeyboard|QtCore.Qt.TextSelectableByMouse)
self.package_desc_box.setObjectName("package_desc_box")
self.update_button = QtWidgets.QPushButton(self.centralwidget)
self.update_button.setEnabled(True)
self.update_button.setGeometry(QtCore.QRect(20, 360, 291, 50))
self.update_button.setMinimumSize(QtCore.QSize(0, 50))
self.update_button.setStyleSheet("QPushButton {\n"
" color: rgb(204, 204, 204);\n"
" border-width: 1px;\n"
" border-radius:6px;\n"
" border-style: solid;\n"
" background-color: rgb(28, 30, 33);\n"
" border-color: rgb(66, 143, 225);\n"
"}\n"
"QPushButton:hover{\n"
" border-width: 2px;\n"
"}\n"
"QPushButton:pressed{\n"
" background-color: rgb(50, 60, 63);\n"
"}\n"
"QPushButton:disabled{\n"
" border-width: 0px;\n"
" background-color: rgb(92, 99, 109);\n"
"}")
self.update_button.setObjectName("update_button")
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "horsy - editing package"))
self.packagename_box.setPlaceholderText(_translate("MainWindow", "Editing package"))
self.main_exe_box.setPlaceholderText(_translate("MainWindow", "Main executable command (file.exe, python main.py, etc)"))
self.source_url_box.setPlaceholderText(_translate("MainWindow", "Url of source (project on GitHub, source archive)"))
self.url_of_exe_box.setPlaceholderText(_translate("MainWindow", "Url of executable (ends on .exe or .zip)"))
self.dependency_url_box.setPlaceholderText(_translate("MainWindow", "Dependency URL (installer in .exe)"))
self.dependency_run_box.setPlaceholderText(_translate("MainWindow", "Dependency run (run this during installation)"))
self.package_desc_box.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p style=\"-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><br /></p></body></html>"))
self.package_desc_box.setPlaceholderText(_translate("MainWindow", "Package description. It should be a short text under 256 characters"))
self.update_button.setText(_translate("MainWindow", "Update"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 52.346774 | 278 | 0.706671 | 5,878 | 0.905562 | 0 | 0 | 0 | 0 | 0 | 0 | 2,641 | 0.406871 |
2010d181957b6e1cf34b27e8fce9413591be7902 | 31,084 | py | Python | tiledb/tests/test_pandas_dataframe.py | Shelnutt2/TileDB-Py | cbc710de461a1c3abfaeb3587b1c0af209583618 | [
"MIT"
] | null | null | null | tiledb/tests/test_pandas_dataframe.py | Shelnutt2/TileDB-Py | cbc710de461a1c3abfaeb3587b1c0af209583618 | [
"MIT"
] | null | null | null | tiledb/tests/test_pandas_dataframe.py | Shelnutt2/TileDB-Py | cbc710de461a1c3abfaeb3587b1c0af209583618 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
try:
import pandas as pd
import pandas._testing as tm
import_failed = False
except ImportError:
import_failed = True
import unittest, os
import warnings
import string, random, copy
import numpy as np
from numpy.testing import assert_array_equal, assert_array_almost_equal
from pathlib import Path
import tiledb
from tiledb.tests.common import *
if sys.version_info > (3, 0):
str_type = str
else:
str_type = unicode
def make_dataframe_basic1(col_size=10):
# ensure no duplicates when using as string dim
chars = list()
for _ in range(col_size):
next = rand_ascii_bytes(2)
while next in chars:
next = rand_ascii_bytes(2)
chars.append(next)
data_dict = {
"time": rand_datetime64_array(col_size),
"x": np.array([rand_ascii(4).encode("UTF-8") for _ in range(col_size)]),
"chars": np.array(chars),
"cccc": np.arange(0, col_size),
"q": np.array([rand_utf8(np.random.randint(1, 100)) for _ in range(col_size)]),
"t": np.array([rand_utf8(4) for _ in range(col_size)]),
"r": np.array(
[rand_ascii_bytes(np.random.randint(1, 100)) for _ in range(col_size)]
),
"s": np.array([rand_ascii() for _ in range(col_size)]),
"u": np.array([rand_ascii_bytes().decode() for _ in range(col_size)]),
"v": np.array([rand_ascii_bytes() for _ in range(col_size)]),
"vals_int64": np.random.randint(
dtype_max(np.int64), size=col_size, dtype=np.int64
),
"vals_float64": np.random.rand(col_size),
}
# TODO: dump this dataframe to pickle/base64 so that it can be reconstructed if
# there are weird failures on CI?
df = pd.DataFrame.from_dict(data_dict)
return df
def make_dataframe_basic2():
# This code is from Pandas feather i/o tests "test_basic" function:
# https://github.com/pandas-dev/pandas/blob/master/pandas/tests/io/test_feather.py
# (available under BSD 3-clause license
# https://github.com/pandas-dev/pandas/blob/master/LICENSE
import pandas as pd
df = pd.DataFrame(
{
"string": list("abc"),
"int": list(range(1, 4)),
"uint": np.arange(3, 6).astype("u1"),
"float": np.arange(4.0, 7.0, dtype="float64"),
# TODO "float_with_null": [1.0, np.nan, 3],
"bool": [True, False, True],
# TODO "bool_with_null": [True, np.nan, False],
# "cat": pd.Categorical(list("abc")),
"dt": pd.date_range("20130101", periods=3),
# "dttz": pd.date_range("20130101", periods=3, tz="US/Eastern"),
# "dt_with_null": [
# pd.Timestamp("20130101"),
# pd.NaT,
# pd.Timestamp("20130103"),
# ],
"dtns": pd.date_range("20130101", periods=3, freq="ns"),
}
)
return df
def make_dataframe_basic3(col_size=10, time_range=(None, None)):
df_dict = {
"time": rand_datetime64_array(
col_size, start=time_range[0], stop=time_range[1]
),
"double_range": np.linspace(-1000, 1000, col_size),
"int_vals": np.random.randint(
dtype_max(np.int64), size=col_size, dtype=np.int64
),
}
df = pd.DataFrame(df_dict)
return df
class PandasDataFrameRoundtrip(DiskTestCase):
def setUp(self):
if import_failed:
self.skipTest("Pandas not available")
else:
super(PandasDataFrameRoundtrip, self).setUp()
def test_dataframe_basic_rt1_manual(self):
uri = self.path("dataframe_basic_rt1_manual")
ctx = tiledb.Ctx()
dom = tiledb.Domain(
tiledb.Dim(name="i_chars", domain=(0, 10000), tile=10, dtype=np.uint64),
tiledb.Dim(
name="datetime",
domain=(0, np.iinfo(np.uint64).max - 3600 * 1000000000),
tile=3600 * 1000000000,
dtype=np.uint64,
),
tiledb.Dim(
name="cccc",
domain=(0, dtype_max(np.uint64) - 1),
tile=dtype_max(np.uint64),
dtype=np.uint64,
),
ctx=ctx,
)
compression = tiledb.FilterList([tiledb.ZstdFilter(level=-1)])
attrs = [
tiledb.Attr(name="x", dtype="S", filters=compression, ctx=ctx),
tiledb.Attr(name="chars", dtype="|S2", filters=compression, ctx=ctx),
tiledb.Attr(name="q", dtype="U", filters=compression, ctx=ctx),
tiledb.Attr(name="r", dtype="S", filters=compression, ctx=ctx),
tiledb.Attr(name="s", dtype="U", filters=compression, ctx=ctx),
tiledb.Attr(
name="vals_int64", dtype=np.int64, filters=compression, ctx=ctx
),
tiledb.Attr(
name="vals_float64", dtype=np.float64, filters=compression, ctx=ctx
),
tiledb.Attr(name="t", dtype="U", filters=compression, ctx=ctx),
tiledb.Attr(name="u", dtype="U", filters=compression, ctx=ctx),
tiledb.Attr(name="v", dtype="S", filters=compression, ctx=ctx),
]
schema = tiledb.ArraySchema(domain=dom, sparse=True, attrs=attrs, ctx=ctx)
tiledb.SparseArray.create(uri, schema)
df = make_dataframe_basic1()
incr = 0
with tiledb.SparseArray(uri, "w") as A:
s_ichars = []
for s in df["chars"]:
s_ichars.append(incr)
incr += 1
times = df["time"]
cccc = df["cccc"]
df = df.drop(columns=["time", "cccc"], axis=1)
A[s_ichars, times, cccc] = df.to_dict(orient="series")
with tiledb.SparseArray(uri) as A:
df_res = pd.DataFrame.from_dict(A[:])
for col in df.columns:
# TileDB default return is unordered, so must sort to compare
assert_array_equal(df[col].sort_values(), df_res[col].sort_values())
def test_dataframe_basic1(self):
uri = self.path("dataframe_basic_rt1")
df = make_dataframe_basic1()
ctx = tiledb.Ctx()
tiledb.from_dataframe(uri, df, sparse=False, ctx=ctx)
df_readback = tiledb.open_dataframe(uri)
tm.assert_frame_equal(df, df_readback)
uri = self.path("dataframe_basic_rt1_unlimited")
tiledb.from_dataframe(uri, df, full_domain=True, sparse=False, ctx=ctx)
df_readback = tiledb.open_dataframe(uri)
tm.assert_frame_equal(df, df_readback)
with tiledb.open(uri) as A:
df_arrow = A.query(use_arrow=True).df[:]
tm.assert_frame_equal(df, df_arrow)
def test_dataframe_basic2(self):
uri = self.path("dataframe_basic_rt2")
df = make_dataframe_basic2()
tiledb.from_dataframe(uri, df, sparse=False)
df_readback = tiledb.open_dataframe(uri)
tm.assert_frame_equal(df, df_readback)
with tiledb.open(uri) as B:
tm.assert_frame_equal(df, B.df[:], check_index_type=False)
def test_dataframe_csv_rt1(self):
def rand_dtype(dtype, size):
import os
nbytes = size * np.dtype(dtype).itemsize
randbytes = os.urandom(nbytes)
return np.frombuffer(randbytes, dtype=dtype)
uri = self.path("dataframe_csv_rt1")
os.mkdir(uri)
col_size = 15
data_dict = {
"dates": np.array(
rand_dtype(np.uint64, col_size), dtype=np.dtype("datetime64[ns]")
),
"float64s": rand_dtype(np.float64, col_size),
"ints": rand_dtype(np.int64, col_size),
"strings": [rand_utf8(5) for _ in range(col_size)],
}
df_orig = pd.DataFrame.from_dict(data_dict)
csv_uri = os.path.join(uri, "test.csv")
# note: encoding must be specified to avoid printing the b'' bytes
# prefix, see https://github.com/pandas-dev/pandas/issues/9712
df_orig.to_csv(csv_uri, mode="w")
csv_array_uri = os.path.join(uri, "tiledb_csv")
tiledb.from_csv(
csv_array_uri, csv_uri, index_col=0, parse_dates=[1], sparse=False
)
ctx = tiledb.default_ctx()
df_from_array = tiledb.open_dataframe(csv_array_uri, ctx=ctx)
tm.assert_frame_equal(df_orig, df_from_array)
# Test reading via TileDB VFS. The main goal is to support reading
# from a remote VFS, using local with `file://` prefix as a test for now.
with tiledb.FileIO(tiledb.VFS(), csv_uri, "rb") as fio:
csv_uri_unc = "file:///" + csv_uri
csv_array_uri2 = "file:///" + os.path.join(csv_array_uri + "_2")
tiledb.from_csv(
csv_array_uri2, csv_uri_unc, index_col=0, parse_dates=[1], sparse=False
)
df_from_array2 = tiledb.open_dataframe(csv_array_uri2)
tm.assert_frame_equal(df_orig, df_from_array2)
# test timestamp write
uri2 = self.path("dataframe_csv_timestamp")
timestamp = random.randint(0, np.iinfo(np.int64).max)
tiledb.from_csv(uri2, csv_uri, timestamp=0, index_col=0)
tiledb.from_pandas(
uri2,
df_orig,
timestamp=timestamp,
mode="append",
row_start_idx=0,
index_col=0,
)
with tiledb.open(uri2, timestamp=0) as A:
self.assertEqual(A.timestamp, 0)
with tiledb.open(uri2, timestamp=timestamp) as A:
self.assertEqual(A.timestamp, timestamp)
def test_dataframe_index_to_sparse_dims(self):
# This test
# - loops over all of the columns from make_basic_dataframe,
# - sets the index to the current column
# - creates a dataframe
# - check that indexing the nonempty_domain of the resulting
# dimension matches the input
# TODO should find a way to dump the whole dataframe dict to a
# (print-safe) bytestring in order to debug generated output
df = make_dataframe_basic1(100)
for col in df.columns:
uri = self.path("df_indx_dim+{}".format(str(col)))
# ensure that all column which will be used as string dim index
# is sorted, because that is how it will be returned
if df.dtypes[col] == "O":
df.sort_values(col, inplace=True)
# also ensure that string columns are converted to bytes
# b/c only TILEDB_ASCII supported for string dimension
if type(df[col][0]) == str_type:
df[col] = [x.encode("UTF-8") for x in df[col]]
new_df = df.drop_duplicates(subset=col)
new_df.set_index(col, inplace=True)
tiledb.from_dataframe(uri, new_df, sparse=True)
with tiledb.open(uri) as A:
self.assertEqual(A.domain.dim(0).name, col)
nonempty = A.nonempty_domain()[0]
res = A.multi_index[nonempty[0] : nonempty[1]]
index = pd.Index(res.pop(col), name=col)
res_df = pd.DataFrame(res, index=index)
tm.assert_frame_equal(new_df, res_df, check_like=True)
def test_dataframe_multiindex_dims(self):
uri = self.path("df_multiindex_dims")
col_size = 10
df = make_dataframe_basic3(col_size)
df_dict = df.to_dict(orient="series")
df.set_index(["time", "double_range"], inplace=True)
tiledb.from_dataframe(uri, df, sparse=True)
with tiledb.open(uri) as A:
ned_time = A.nonempty_domain()[0]
ned_dbl = A.nonempty_domain()[1]
res = A.multi_index[slice(*ned_time), :]
assert_array_equal(res["time"], df_dict["time"])
assert_array_equal(res["double_range"], df_dict["double_range"])
assert_array_equal(res["int_vals"], df.int_vals.values)
# test .df[] indexing
df_idx_res = A.df[slice(*ned_time), :]
tm.assert_frame_equal(df_idx_res, df)
# test .df[] indexing with query
df_idx_res = A.query(attrs=["int_vals"]).df[slice(*ned_time), :]
tm.assert_frame_equal(df_idx_res, df)
# test .df[] with Arrow
df_idx_res = A.query(use_arrow=True).df[slice(*ned_time), :]
tm.assert_frame_equal(df_idx_res, df)
df_idx_res = A.query(use_arrow=False).df[slice(*ned_time), :]
tm.assert_frame_equal(df_idx_res, df)
def test_csv_dense(self):
col_size = 10
df_data = {
"index": np.arange(0, col_size),
"chars": np.array([rand_ascii(4).encode("UTF-8") for _ in range(col_size)]),
"vals_float64": np.random.rand(col_size),
}
df = pd.DataFrame(df_data).set_index("index")
# Test 1: basic round-trip
tmp_dir = self.path("csv_dense")
os.mkdir(tmp_dir)
tmp_csv = os.path.join(tmp_dir, "generated.csv")
df.to_csv(tmp_csv)
tmp_array = os.path.join(tmp_dir, "array")
tiledb.from_csv(
tmp_array,
tmp_csv,
index_col=["index"],
dtype={"index": np.uint64},
sparse=False,
)
tmp_array2 = os.path.join(tmp_dir, "array2")
tiledb.from_csv(tmp_array2, tmp_csv, sparse=False)
def test_csv_col_to_sparse_dims(self):
df = make_dataframe_basic3(20)
# Test 1: basic round-trip
tmp_dir = self.path("csv_col_to_sparse_dims")
os.mkdir(tmp_dir)
tmp_csv = os.path.join(tmp_dir, "generated.csv")
df.sort_values("time", inplace=True)
df.to_csv(tmp_csv, index=False)
df.set_index(["time", "double_range"], inplace=True)
tmp_array = os.path.join(tmp_dir, "array")
tiledb.from_csv(
tmp_array,
tmp_csv,
sparse=True,
index_col=["time", "double_range"],
parse_dates=["time"],
)
df_bk = tiledb.open_dataframe(tmp_array)
tm.assert_frame_equal(df, df_bk)
# Test 2: check from_csv `sparse` and `allows_duplicates` keyword args
df = make_dataframe_basic3(20)
tmp_csv2 = os.path.join(tmp_dir, "generated2.csv")
tmp_array2a = os.path.join(tmp_dir, "array2a")
tmp_array2b = os.path.join(tmp_dir, "array2b")
# create a duplicate value
df.loc[0, "int_vals"] = df.int_vals[1]
df.sort_values("int_vals", inplace=True)
df.to_csv(tmp_csv2, index=False)
# try once and make sure error is raised because of duplicate value
with self.assertRaisesRegex(
tiledb.TileDBError, "Duplicate coordinates \\(.*\\) are not allowed"
):
tiledb.from_csv(
tmp_array2a,
tmp_csv2,
index_col=["int_vals"],
sparse=True,
allows_duplicates=False,
)
# try again, check from_csv(allows_duplicates=True, sparse=True)
tiledb.from_csv(
tmp_array2b,
tmp_csv2,
index_col=["int_vals"],
parse_dates=["time"],
sparse=True,
allows_duplicates=True,
float_precision="round_trip",
)
with tiledb.open(tmp_array2b) as A:
self.assertTrue(A.schema.sparse)
res_df = A.df[:]
# the duplicate value is on the dimension and can be retrieved in arbitrary
# order. we need to re-sort in order to compare, to avoid spurious failures.
res_df.sort_values("time", inplace=True)
cmp_df = df.set_index("int_vals").sort_values(by="time")
tm.assert_frame_equal(res_df, cmp_df)
def test_dataframe_csv_schema_only(self):
col_size = 10
df = make_dataframe_basic3(col_size)
tmp_dir = self.path("csv_schema_only")
os.mkdir(tmp_dir)
tmp_csv = os.path.join(tmp_dir, "generated.csv")
df.sort_values("time", inplace=True)
df.to_csv(tmp_csv, index=False)
attrs_filters = tiledb.FilterList([tiledb.ZstdFilter(1)])
# from_dataframe default is 1, so use 7 here to check
# the arg is correctly parsed/passed
coords_filters = tiledb.FilterList([tiledb.ZstdFilter(7)])
tmp_assert_dir = os.path.join(tmp_dir, "array")
# this should raise an error
with self.assertRaises(ValueError):
tiledb.from_csv(tmp_assert_dir, tmp_csv, tile="abc")
with self.assertRaises(ValueError):
tiledb.from_csv(tmp_assert_dir, tmp_csv, tile=(3, 1.0))
tmp_array = os.path.join(tmp_dir, "array")
tiledb.from_csv(
tmp_array,
tmp_csv,
index_col=["time", "double_range"],
parse_dates=["time"],
mode="schema_only",
capacity=1001,
sparse=True,
tile={"time": 5},
coords_filters=coords_filters,
)
t0, t1 = df.time.min(), df.time.max()
import numpy
ref_schema = tiledb.ArraySchema(
domain=tiledb.Domain(
*[
tiledb.Dim(
name="time",
domain=(t0.to_datetime64(), t1.to_datetime64()),
tile=5,
dtype="datetime64[ns]",
),
tiledb.Dim(
name="double_range",
domain=(-1000.0, 1000.0),
tile=1000,
dtype="float64",
),
]
),
attrs=[
tiledb.Attr(name="int_vals", dtype="int64", filters=attrs_filters),
],
coords_filters=coords_filters,
cell_order="row-major",
tile_order="row-major",
capacity=1001,
sparse=True,
allows_duplicates=False,
)
# note: filters omitted
array_nfiles = len(tiledb.VFS().ls(tmp_array))
self.assertEqual(array_nfiles, 3)
with tiledb.open(tmp_array) as A:
self.assertEqual(A.schema, ref_schema)
# TODO currently no equality check for filters
self.assertEqual(A.schema.coords_filters[0].level, coords_filters[0].level)
self.assertEqual(A.schema.attr(0).filters[0].level, attrs_filters[0].level)
# Test mode='append' for from_csv
tiledb.from_csv(tmp_array, tmp_csv, mode="append", row_start_idx=0)
df2 = make_dataframe_basic3(10, time_range=(t0, t1))
df2.sort_values("time", inplace=True)
df2.set_index(["time", "double_range"], inplace=True)
# Test mode='append' for from_pandas
tiledb.from_pandas(tmp_array, df2, row_start_idx=len(df2), mode="append")
with tiledb.open(tmp_array) as A:
df_bk = A.df[:]
df.set_index(["time", "double_range"], inplace=True)
df_combined = pd.concat([df, df2])
df_combined.sort_index(level="time", inplace=True)
df_bk.sort_index(level="time", inplace=True)
tm.assert_frame_equal(df_bk, df_combined)
def test_dataframe_csv_chunked(self):
col_size = 200
df = make_dataframe_basic3(col_size)
tmp_dir = self.path("csv_chunked")
os.mkdir(tmp_dir)
tmp_csv = os.path.join(tmp_dir, "generated.csv")
df.sort_values("time", inplace=True)
df.to_csv(tmp_csv, index=False)
# Test sparse chunked
tmp_array = os.path.join(tmp_dir, "array")
tiledb.from_csv(
tmp_array,
tmp_csv,
index_col=["double_range"],
parse_dates=["time"],
date_spec={"time": "%Y-%m-%dT%H:%M:%S.%f"},
chunksize=10,
sparse=True,
)
with tiledb.open(tmp_array) as A:
res = A[:]
df_bk = pd.DataFrame(res)
df_bk.set_index(["double_range"], inplace=True)
df_ck = df.set_index(["double_range"])
tm.assert_frame_equal(df_bk, df_ck)
# Test dense chunked
tmp_array_dense = os.path.join(tmp_dir, "array_dense")
tiledb.from_csv(
tmp_array_dense, tmp_csv, parse_dates=["time"], sparse=False, chunksize=25
)
with tiledb.open(tmp_array_dense) as A:
# with sparse=False and no index column, we expect to have unlimited domain
self.assertEqual(A.schema.domain.dim(0).domain[1], 18446744073709541615)
# chunked writes go to unlimited domain, so we must only read nonempty
ned = A.nonempty_domain()[0]
# TODO should support numpy scalar here
res = A.multi_index[int(ned[0]) : int(ned[1])]
df_bk = pd.DataFrame(res)
tm.assert_frame_equal(df_bk, df)
# test .df[] indexing
df_idx_res = A.df[int(ned[0]) : int(ned[1])]
tm.assert_frame_equal(df_idx_res, df, check_index_type=False)
# test .df[] indexing with query
df_idx_res = A.query(attrs=["time"]).df[int(ned[0]) : int(ned[1])]
tm.assert_frame_equal(df_idx_res, df[["time"]])
df_idx_res = A.query(attrs=["double_range"]).df[int(ned[0]) : int(ned[1])]
tm.assert_frame_equal(df_idx_res, df[["double_range"]])
# test .df[] indexing with arrow
df_idx_res = A.query(use_arrow=True, attrs=["time"]).df[
int(ned[0]) : int(ned[1])
]
tm.assert_frame_equal(df_idx_res, df[["time"]])
df_idx_res = A.query(use_arrow=True, attrs=["double_range"]).df[
int(ned[0]) : int(ned[1])
]
tm.assert_frame_equal(df_idx_res, df[["double_range"]])
# disable coordinate dimension/index
df_idx_res = A.query(coords=False).df[int(ned[0]) : int(ned[1])]
tm.assert_frame_equal(df_idx_res, df.reset_index(drop=True))
def test_csv_fillna(self):
col_size = 10
data = np.random.rand(10) * 100 # make some integers for the 2nd test
data[4] = np.nan
df = pd.DataFrame({"v": data})
tmp_dir = self.path("csv_fillna")
os.mkdir(tmp_dir)
tmp_csv = os.path.join(tmp_dir, "generated.csv")
df.to_csv(tmp_csv, index=False, na_rep="NaN")
tmp_array = os.path.join(tmp_dir, "array")
# TODO: test Dense too
tiledb.from_csv(tmp_array, tmp_csv, fillna={"v": 0}, sparse=True)
def check_array(path, df):
# update the value in the original dataframe to match what we expect on read-back
df["v"][4] = 0
with tiledb.open(path) as A:
df_bk = A.df[:]
tm.assert_frame_equal(df_bk, df, check_index_type=False)
check_array(tmp_array, copy.deepcopy(df))
# Test writing a StringDtype in newer pandas versions
if hasattr(pd, "StringDtype"):
tmp_array2 = os.path.join(tmp_dir, "array2")
tiledb.from_csv(
tmp_array2,
tmp_csv,
fillna={"v": 0},
column_types={"v": pd.Int64Dtype},
sparse=True,
)
df_to_check = copy.deepcopy(df)
df_to_check["v"][4] = 0
df_to_check = df_to_check.astype({"v": np.int64})
check_array(tmp_array2, df_to_check)
def test_csv_multi_file(self):
col_size = 10
csv_dir = self.path("csv_multi_dir")
os.mkdir(csv_dir)
# Write a set of CSVs with 10 rows each
input_dfs = list()
for i in range(20):
df = make_dataframe_basic3(col_size)
output_path = os.path.join(csv_dir, "csv_{}.csv".format(i))
df.to_csv(output_path, index=False)
input_dfs.append(df)
tmp_dir = self.path("csv_multi_array_dir")
os.mkdir(tmp_dir)
# Create TileDB array with flush every 25 rows
csv_paths = glob.glob(csv_dir + "/*.csv")
tmp_array = os.path.join(tmp_dir, "array")
tiledb.from_csv(
tmp_array,
csv_paths,
index_col=["time"],
parse_dates=["time"],
chunksize=25,
sparse=True,
)
# Check number of fragments
# * should equal 8 based on chunksize=25
# * 20 files, 10 rows each, 200 rows == 8 writes:
fragments = glob.glob(tmp_array + "/*.ok")
self.assertEqual(len(fragments), 8)
# Check the returned data
# note: tiledb returns sorted values
df_orig = pd.concat(input_dfs, axis=0).set_index(["time"]).sort_values("time")
with tiledb.open(tmp_array) as A:
df_bk = A.df[:]
# TileDB default return is unordered, so sort to compare
df_bk = df_bk.sort_index()
tm.assert_frame_equal(df_bk, df_orig)
def test_dataframe_misc(self):
uri = self.path("test_small_domain_range")
df = pd.DataFrame({"data": [2]}, index=[0])
tiledb.from_pandas(uri, df)
data = {
"data": np.array([1, 2, 3]),
"raw": np.array([4, 5, 6]),
"index": np.array(["a", "b", "c"], dtype=np.dtype("|S")),
"indey": np.array([0.0, 0.5, 0.9]),
}
df = pd.DataFrame.from_dict(data)
df = df.set_index(["index", "indey"])
uri = self.path("test_string_index_infer")
tiledb.from_pandas(uri, df)
with tiledb.open(uri) as A:
self.assertTrue(A.schema.domain.dim(0).dtype == np.dtype("|S"))
# test setting Attr and Dim filter list by override
uri = self.path("test_df_attrs_filters1")
bz_filter = [tiledb.Bzip2Filter(4)]
def_filter = [tiledb.GzipFilter(-1)]
tiledb.from_pandas(uri, df, attr_filters=bz_filter, dim_filters=bz_filter)
with tiledb.open(uri) as A:
self.assertTrue(A.schema.attr("data").filters == bz_filter)
self.assertTrue(A.schema.attr("raw").filters == bz_filter)
self.assertTrue(A.schema.domain.dim("index").filters == bz_filter)
self.assertTrue(A.schema.domain.dim("indey").filters == bz_filter)
# test setting Attr and Dim filter list by dict
uri = self.path("test_df_attrs_filters2")
tiledb.from_pandas(
uri, df, attr_filters={"data": bz_filter}, dim_filters={"index": bz_filter}
)
with tiledb.open(uri) as A:
self.assertTrue(A.schema.attr("data").filters == bz_filter)
self.assertTrue(A.schema.attr("raw").filters == tiledb.FilterList())
self.assertTrue(A.schema.domain.dim("index").filters == bz_filter)
self.assertTrue(A.schema.domain.dim("indey").filters == tiledb.FilterList())
def test_dataframe_query(self):
uri = self.path("df_query")
col_size = 10
df = make_dataframe_basic3(col_size)
df.set_index(["time"], inplace=True)
tiledb.from_dataframe(uri, df, sparse=True)
with tiledb.open(uri) as A:
with self.assertRaises(tiledb.TileDBError):
A.query(dims=["nodimnodim"])
with self.assertRaises(tiledb.TileDBError):
A.query(attrs=["noattrnoattr"])
res_df = A.query(dims=["time"], attrs=["int_vals"]).df[:]
self.assertTrue("time" == res_df.index.name)
self.assertTrue("int_vals" in res_df)
self.assertTrue("double_range" not in res_df)
# try index_col alone: should have *only* the default RangeIndex column
res_df2 = A.query(index_col=None).df[:]
self.assertTrue(isinstance(res_df2.index, pd.RangeIndex))
# try no dims, index_col None: should only value cols and default index
res_df3 = A.query(dims=False, index_col=None).df[:]
self.assertTrue("time" not in res_df3)
self.assertTrue("int_vals" in res_df3)
self.assertTrue("double_range" in res_df3)
self.assertTrue(isinstance(res_df3.index, pd.RangeIndex))
# try attr as index_col:
res_df4 = A.query(dims=False, index_col=["int_vals"]).df[:]
self.assertTrue("time" not in res_df4)
self.assertTrue("double_range" in res_df4)
self.assertTrue("int_vals" == res_df4.index.name)
def test_read_parquet(self):
uri = Path(self.path("test_read_parquet"))
os.mkdir(uri)
def try_rt(name, df, pq_args={}):
tdb_uri = str(uri.joinpath(f"{name}.tdb"))
pq_uri = str(uri.joinpath(f"{name}.pq"))
df.to_parquet(
pq_uri,
# this is required to losslessly serialize timestamps
# until Parquet 2.0 is default.
use_deprecated_int96_timestamps=True,
**pq_args,
)
tiledb.from_parquet(str(tdb_uri), str(pq_uri))
with tiledb.open(tdb_uri) as T:
tm.assert_frame_equal(df, T.df[:], check_index_type=False)
basic1 = make_dataframe_basic1()
try_rt("basic1", basic1)
try_rt("basic2", make_dataframe_basic2())
basic3 = make_dataframe_basic3()
try_rt("basic3", basic3)
def test_nullable_integers(self):
nullable_int_dtypes = (
pd.Int64Dtype(),
pd.Int32Dtype(),
pd.Int16Dtype(),
pd.Int8Dtype(),
pd.UInt64Dtype(),
pd.UInt32Dtype(),
pd.UInt16Dtype(),
pd.UInt8Dtype(),
)
col_size = 100
null_count = 20
for pdtype in nullable_int_dtypes:
uri = self.path(f"test_nullable_{str(pdtype)}")
nptype = pdtype.numpy_dtype
data = np.random.randint(
dtype_max(nptype), size=col_size, dtype=nptype
).astype("O")
null_idxs = np.random.randint(col_size, size=null_count)
data[null_idxs] = None
series = pd.Series(data, dtype=pdtype)
df = pd.DataFrame({"data": series})
tiledb.from_pandas(uri, df)
with tiledb.open(uri) as A:
tm.assert_frame_equal(df, A.df[:], check_index_type=False)
def test_nullable_bool(self):
uri = self.path("test_nullable_bool")
col_size = 100
null_count = 20
data = np.random.randint(2, size=col_size, dtype=np.uint8).astype("O")
null_idxs = np.random.randint(col_size, size=null_count)
data[null_idxs] = None
series = pd.Series(data, dtype="boolean")
df = pd.DataFrame({"data": series})
tiledb.from_pandas(uri, df)
with tiledb.open(uri) as A:
tm.assert_frame_equal(df, A.df[:], check_index_type=False)
| 36.228438 | 93 | 0.574926 | 27,701 | 0.891166 | 0 | 0 | 0 | 0 | 0 | 0 | 5,880 | 0.189165 |
201150abd59f44043c0cf22c47036ec2f4759cde | 871 | py | Python | day11/test_lib.py | heijp06/AoC-2021 | f6afead5e1fe9a839d608a5792f84e54803742c1 | [
"MIT"
] | null | null | null | day11/test_lib.py | heijp06/AoC-2021 | f6afead5e1fe9a839d608a5792f84e54803742c1 | [
"MIT"
] | null | null | null | day11/test_lib.py | heijp06/AoC-2021 | f6afead5e1fe9a839d608a5792f84e54803742c1 | [
"MIT"
] | null | null | null | import pytest
from lib import flashing_neighbours, part1, part2
def test_part1():
assert part1(data) == 1656
def test_part2():
assert part2(data) == 195
@pytest.mark.parametrize("steps", range(1, 3))
def test_part1_small(steps):
assert part1(small, steps=1) == 9
@pytest.mark.parametrize(("grid", "expected"), ((["98"], 2), (["988", 3])))
def test_part1_ripple(grid, expected):
assert part1(grid, 1) == expected
def test_octopus_only_flashes_once():
assert part1(["96", "08"], 1) == 2
def test_flashing_neighbours():
assert flashing_neighbours([[10, 9]], 0, 1) == 1
small = [
"11111",
"19991",
"19191",
"19991",
"11111"
]
data = [
"5483143223",
"2745854711",
"5264556173",
"6141336146",
"6357385478",
"4167524645",
"2176841721",
"6882881134",
"4846848554",
"5283751526"
]
| 17.078431 | 75 | 0.614237 | 0 | 0 | 0 | 0 | 265 | 0.304248 | 0 | 0 | 195 | 0.223881 |
201157d2a3d68e419b263ee0880e9abef86ce731 | 7,728 | py | Python | data/external/repositories/178307/kaggle-otto-master/predictor.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/178307/kaggle-otto-master/predictor.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/178307/kaggle-otto-master/predictor.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | import pandas as pd
import numpy as np
from sklearn import ensemble, calibration, metrics, cross_validation
from sklearn import feature_extraction, preprocessing
import xgboost as xgb
import keras.models as kermod
import keras.layers.core as kerlay
import keras.layers.advanced_activations as keradv
import keras.layers.normalization as kernorm
import scipy.optimize as scopt
# read all the data
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
sample = pd.read_csv('sampleSubmission.csv')
# encode the labels
le = preprocessing.LabelEncoder()
train_y = train_data.target
train_y = le.fit_transform(train_y)
# drop id and target from the attributes
train_X = train_data.drop('id', axis=1)
train_X = train_X.drop('target', axis=1)
# drop id from the test set
test_X = test_data.drop('id', axis=1)
# simple sklearn compatible wrapper around a keras NN
class NNPredictor:
def __init__(self, scaler):
self.scaler = scaler
def fit(self, X, y):
train_X = self.scaler.fit_transform(X.values.astype(float))
try:
train_X = train_X.toarray()
except:
pass
train_y = preprocessing.OneHotEncoder(sparse=False, n_values=9).fit_transform(list(map(lambda x: [x], y)))
self.nn = kermod.Sequential()
self.nn.add(kerlay.Dropout(0.1))
self.nn.add(kerlay.Dense(train_X.shape[1], 1024, init='glorot_uniform'))
self.nn.add(keradv.PReLU(1024,))
self.nn.add(kernorm.BatchNormalization((1024,), mode=1))
self.nn.add(kerlay.Dropout(0.5))
self.nn.add(kerlay.Dense(1024, 512, init='glorot_uniform'))
self.nn.add(keradv.PReLU(512,))
self.nn.add(kernorm.BatchNormalization((512,), mode=1))
self.nn.add(kerlay.Dropout(0.5))
self.nn.add(kerlay.Dense(512, 256, init='glorot_uniform'))
self.nn.add(keradv.PReLU(256,))
self.nn.add(kernorm.BatchNormalization((256,), mode=1))
self.nn.add(kerlay.Dropout(0.5))
self.nn.add(kerlay.Dense(256, 9, init='glorot_uniform', activation='softmax'))
self.nn.compile(loss='categorical_crossentropy', optimizer='adam')
# shuffle the training set
sh = np.array(range(len(train_X)))
np.random.shuffle(sh)
train_X = train_X[sh]
train_y = train_y[sh]
self.nn.fit(train_X, train_y, nb_epoch=60, batch_size=2048, verbose=0)
def predict(self, X):
pass
def predict_proba(self, X):
scaled_X = self.scaler.transform(X.values.astype(float))
try:
scaled_X = scaled_X.toarray()
except:
pass
return self.nn.predict_proba(scaled_X)
def get_params(self, deep=False):
return {}
def score(self, X, y):
return metrics.log_loss(y, self.predict_proba(X))
# NN ensemble, 10 NNs
# 5 of them with StandarScaler, 5 of them with TfidfTransformer
class NNEnsemble:
def fit(self, X, y):
self.clfs = []
i = 0
skf = cross_validation.StratifiedKFold(y, n_folds=10)
for train_idx, test_idx in skf:
i += 1
if i % 2 == 0:
clf = NNPredictor(scaler=preprocessing.StandardScaler())
else:
clf = NNPredictor(scaler=feature_extraction.text.TfidfTransformer())
clf.fit(X.iloc[train_idx], y[train_idx])
self.clfs.append(clf)
print(clf.score(X.iloc[test_idx], y[test_idx]))
def predict(self, X):
pass
def predict_proba(self, X):
pred = 0
for clf in self.clfs:
pred += 0.1*preprocessing.normalize(clf.predict_proba(X), axis=1, norm='l1')
return pred
def get_params(self, deep=False):
return {}
def score(self, X, y):
return metrics.log_loss(y, self.predict_proba(X))
# the main predictor, ensemble of NNPredictor, calibrated Random Forest and
# and XGBoost
class OttoPredictor:
def fit(self, X, y):
# keep 5% for calibration later
sss = cross_validation.StratifiedShuffleSplit(y, test_size=0.05)
for tr, cal in sss:
break
# define the two classifiers
self.clf1 = xgb.XGBClassifier(objective="multi:softprob",
n_estimators=400,
max_depth=8)
self.clf2 = calibration.CalibratedClassifierCV(
ensemble.RandomForestClassifier(
n_estimators=1000,
n_jobs=8,
class_weight='auto'),
method='isotonic')
self.clf3 = NNEnsemble()
# fit the classifiers
self.clf1.fit(X.iloc[tr], y[tr])
self.clf2.fit(X.iloc[tr], y[tr])
self.clf3.fit(X.iloc[tr], y[tr])
# predict everything before ensembling
self.pr1 = self.clf1.predict_proba(X.iloc[cal])
self.pr2 = self.clf2.predict_proba(X.iloc[cal])
self.pr3 = self.clf3.predict_proba(X.iloc[cal])
self.pr1 = preprocessing.normalize(self.pr1, axis=1, norm='l1')
self.pr2 = preprocessing.normalize(self.pr2, axis=1, norm='l1')
self.pr3 = preprocessing.normalize(self.pr3, axis=1, norm='l1')
print("XGB log loss:", metrics.log_loss(y[cal], self.pr1))
print("RF log loss:", metrics.log_loss(y[cal], self.pr2))
print("NN log loss:", metrics.log_loss(y[cal], self.pr3))
print("XGB+RF+NN log loss:", metrics.log_loss(y[cal], (self.pr1+self.pr2+self.pr3)/3))
self.clfs = [self.clf1, self.clf2, self.clf3]
predictions = []
for clf in self.clfs:
predictions.append(clf.predict_proba(X.iloc[cal]))
self.cal_y = y[cal]
def log_loss_func(weights):
''' scipy minimize will pass the weights as a numpy array '''
final_prediction = 0
for weight, prediction in zip(weights, predictions):
final_prediction += weight*prediction
return metrics.log_loss(self.cal_y, final_prediction)
scores = []
wghts = []
for i in range(20):
if not i:
starting_values = [1/3]*len(self.clfs)
else:
starting_values = np.random.uniform(size=len(self.clfs))
cons = ({'type': 'eq', 'fun': lambda w: 1-sum(w)})
bounds = [(0, 1)]*len(predictions)
res = scopt.minimize(log_loss_func, starting_values, method='SLSQP', bounds=bounds, constraints=cons)
scores.append(res['fun'])
wghts.append(res['x'])
bestSC = np.min(scores)
bestWght = wghts[np.argmin(scores)]
self.weights = bestWght
print('Ensamble Score: {best_score}'.format(best_score=bestSC))
print('Best Weights: {weights}'.format(weights=bestWght))
def predict(self, X):
pass
def predict_proba(self, X):
pred = 0
for weight, clf in zip(self.weights, self.clfs):
pred += weight*clf.predict_proba(X)
return pred
def get_params(self, deep=False):
return {}
def score(self, X, y):
return metrics.log_loss(y, self.predict_proba(X))
# train the main predictor
clf = OttoPredictor()
clf.fit(train_X, train_y)
# predict the test sets by smaller batches to reduce the amount of req. memory
preds = [clf.predict_proba(test_X[10000 * i:10000 * (i + 1)]) for i in range(14)]
preds.append(clf.predict_proba(test_X[140000:]))
preds = np.vstack(preds)
preds = pd.DataFrame(preds, index=sample.id.values, columns=sample.columns[1:])
preds.to_csv('submission.csv', index_label='id')
| 33.025641 | 114 | 0.610766 | 6,223 | 0.805254 | 0 | 0 | 0 | 0 | 0 | 0 | 1,020 | 0.131988 |
201171ed4ed2acece5307ef9d3b0fd5bbf3bf1a5 | 114 | py | Python | pyeem/instruments/horiba/__init__.py | drewmee/PyEEM | 283c01405bf51da6827ba434be53acd580b7642b | [
"MIT"
] | 4 | 2020-09-01T08:27:28.000Z | 2022-03-12T09:11:15.000Z | pyeem/instruments/horiba/__init__.py | drewmee/PyEEM | 283c01405bf51da6827ba434be53acd580b7642b | [
"MIT"
] | 2 | 2021-06-11T18:20:47.000Z | 2021-11-19T14:11:09.000Z | pyeem/instruments/horiba/__init__.py | drewmee/PyEEM | 283c01405bf51da6827ba434be53acd580b7642b | [
"MIT"
] | null | null | null | from .aqualog import Aqualog
from .fluorolog import Fluorolog
name = "Horiba"
instruments = [Aqualog, Fluorolog]
| 19 | 34 | 0.780702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 | 0.070175 |
20118083c189727b7a83f1144f6b191415bf0b96 | 1,432 | py | Python | Model/Download2Mp3.py | BETRU21/DownloadToMp3 | a0499c9039348aeab1b05ca493d02f79dc8f5d1e | [
"Apache-2.0"
] | null | null | null | Model/Download2Mp3.py | BETRU21/DownloadToMp3 | a0499c9039348aeab1b05ca493d02f79dc8f5d1e | [
"Apache-2.0"
] | null | null | null | Model/Download2Mp3.py | BETRU21/DownloadToMp3 | a0499c9039348aeab1b05ca493d02f79dc8f5d1e | [
"Apache-2.0"
] | null | null | null | import youtube_dl
class Download2Mp3:
def __init__(self, CallableHook=None):
self.hook = CallableHook
self.notDownloaded = []
self.setupDownloadParam()
# Public functions
def downloadMusicFile(self, url):
if type(url) is not str:
raise TypeError("url argument is not a string.")
try:
with youtube_dl.YoutubeDL(self.ydlParams) as ydl:
ydl.download([url])
except Exception as e:
e = str(e)
self.notDownloaded.append(url)
raise RuntimeError(e)
def musicsNotDownloaded(self):
return self.notDownloaded
# Non-Public functions
def setupDownloadParam(self):
if self.hook == None:
self.ydlParams = {
'outtmpl': 'downloadMusics/%(title)s.%(ext)s',
'noplaylist': True,
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',}],}
else:
self.ydlParams = {
"progress_hooks": [self.hook],
'outtmpl': 'downloadMusics/%(title)s.%(ext)s',
'noplaylist': True,
'format': 'bestaudio/best',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192',}],}
| 30.468085 | 61 | 0.539106 | 1,412 | 0.986034 | 0 | 0 | 0 | 0 | 0 | 0 | 415 | 0.289804 |
2013ed9ff566c0c9215f3514a117ffdd2d27c869 | 529 | py | Python | src/Python/01_Interakcja_z_konsola/Zad7.py | djeada/Nauka-programowania | b1eb6840c15b830acf552f0a0fc5cc692759152f | [
"MIT"
] | 3 | 2020-09-19T21:38:30.000Z | 2022-03-30T11:02:26.000Z | src/Python/01_Interakcja_z_konsola/Zad7.py | djeada/Nauka-programowania | b1eb6840c15b830acf552f0a0fc5cc692759152f | [
"MIT"
] | null | null | null | src/Python/01_Interakcja_z_konsola/Zad7.py | djeada/Nauka-programowania | b1eb6840c15b830acf552f0a0fc5cc692759152f | [
"MIT"
] | 1 | 2022-02-04T09:13:20.000Z | 2022-02-04T09:13:20.000Z | if __name__ == "__main__":
"""
Pobierz podstawe i wysokosc trojkata i wypisz pole.
"""
print("podaj podstawe i wysokosc trojkata:")
a = int(input())
h = int(input())
print(
"pole trojkata o podstawie ", a, " i wysokosci ", h, " jest rowne ", a * h / 2
)
"""
Pobierz dlugosci bokow prostokata i wypisz pole.
"""
print("podaj dlogosci bokow prostokata:")
a = int(input())
b = int(input())
print("pole prostokata o bokach ", a, " i ", b, " jest rowne ", a * b)
| 20.346154 | 86 | 0.561437 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 310 | 0.586011 |
20180273094d0e160bca16ce1c37d08abdc4c3cc | 803 | py | Python | message/models.py | bopopescu/storyboard | 0258fd6f80b6bbd9d0ca493cbaaae87c3a6d16e2 | [
"MIT"
] | null | null | null | message/models.py | bopopescu/storyboard | 0258fd6f80b6bbd9d0ca493cbaaae87c3a6d16e2 | [
"MIT"
] | null | null | null | message/models.py | bopopescu/storyboard | 0258fd6f80b6bbd9d0ca493cbaaae87c3a6d16e2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
models.py
Created by Darcy Liu on 2012-03-03.
Copyright (c) 2012 Close To U. All rights reserved.
"""
from django.db import models
from django.contrib.auth.models import User
# class Message(models.Model):
# key = models.AutoField(primary_key=True)
# title = models.CharField(max_length=256,verbose_name='title')
# text = models.TextField(blank=True, verbose_name='text')
# sender = models.ForeignKey(User,verbose_name='sender')
# receiver = models.ForeignKey(User,verbose_name='receiver')
# created = models.DateTimeField(auto_now_add=True,verbose_name='created')
# updated = models.DateTimeField(auto_now=True,verbose_name='updated')
# def __unicode__(self):
# result = self.title
# return unicode(result)
| 33.458333 | 78 | 0.714819 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 714 | 0.889166 |
2018bf68650c49cd5b523600df50d3ef8a5228b2 | 2,772 | py | Python | model_helpers.py | Dantistnfs/binerals-classification-task | 994a38dc4c11d05c066e595a426e7fc3320a0229 | [
"MIT"
] | null | null | null | model_helpers.py | Dantistnfs/binerals-classification-task | 994a38dc4c11d05c066e595a426e7fc3320a0229 | [
"MIT"
] | null | null | null | model_helpers.py | Dantistnfs/binerals-classification-task | 994a38dc4c11d05c066e595a426e7fc3320a0229 | [
"MIT"
] | null | null | null | """
Helper functions for model training, loading, testing etc
"""
import pickle
import tqdm
import torch
import numpy as np
def train(t_model, optimizer, loss_function, train_dataset,
validation_dataset):
epoch = 0
while 1:
epoch += 1
train_loss = 0.0
train_accu = 0.0
train_length = 0
t_model.train() # turn on training mode
for x, y in tqdm.tqdm(train_dataset):
train_length += len(x[0])
optimizer.zero_grad()
preds = t_model(x)
loss = loss_function(preds, y)
loss.backward()
optimizer.step()
preds = 1 / (1 + torch.exp(-preds))
train_accu += torch.max(y, 1)[1].eq(torch.max(preds,
1)[1]).sum().item()
train_loss += loss.data * x.size(0)
train_loss /= train_length
train_accu /= train_length
# calculate the validation loss for this epoch
val_loss = 0.0
val_accu = 0.0
val_length = 0
t_model.eval() # turn on evaluation mode
for x, y in validation_dataset:
val_length += len(x[0])
preds = t_model(x)
loss = loss_function(preds, y)
val_loss += loss.data * x.size(0)
preds = 1 / (1 + torch.exp(-preds))
val_accu += torch.max(y, 1)[1].eq(torch.max(preds,
1)[1]).sum().item()
val_loss /= val_length
val_accu /= val_length
print(
f'Epoch: {epoch}, Training Loss: {train_loss:.4f}, Validation Loss: {val_loss:.4f}'
)
print(
f'Epoch: {epoch}, Training Accuracy: {train_accu:.4f}, Validation Accuracy: {val_accu:.4f}'
)
def predict(t_model, test_dataset):
test_accu = 0.0
test_length = 0
test_preds = []
t_model.eval() # turn on evaluation mode
for x, y in test_dataset:
test_length += len(x[0])
preds = t_model(x)
preds = 1 / (1 + torch.exp(-preds))
test_preds.append(preds.data.cpu().numpy())
test_accu += torch.max(y, 1)[1].eq(torch.max(preds, 1)[1]).sum().item()
test_accu /= test_length
test_preds = np.vstack(test_preds)
print(f'Test Accuracy: {test_accu:.4f}')
return test_preds
def save_model(model, filename):
torch.save(model, filename)
def save_vocab(vocab, filename):
with open(filename, 'wb') as output_f:
pickle.dump(vocab, output_f)
def load_model(filename, **kwargs):
model = torch.load(filename, **kwargs)
return model
def load_vocab(filename):
with open(filename, 'rb') as input_f:
vocab = pickle.load(input_f)
return vocab
| 28 | 103 | 0.556277 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 399 | 0.143939 |
2018fb840212bddc171185fe57f1bf72ee64dbb1 | 790 | py | Python | camera/PIR_camera.py | jutako/raspi | f69d15a48765c85960e7d7da175d4f96cb1dfee3 | [
"MIT"
] | null | null | null | camera/PIR_camera.py | jutako/raspi | f69d15a48765c85960e7d7da175d4f96cb1dfee3 | [
"MIT"
] | null | null | null | camera/PIR_camera.py | jutako/raspi | f69d15a48765c85960e7d7da175d4f96cb1dfee3 | [
"MIT"
] | null | null | null | #!/usr/bin/python
import RPi.GPIO as GPIO
from picamera import PiCamera
import time
import datetime
PIN = 12
GPIO.setmode(GPIO.BCM)
GPIO.setup(PIN, GPIO.IN)
camera = PiCamera()
camera.rotation = 180
camera.resolution = (1024, 576)
#camera.start_preview()
#sleep(20)
#camera.stop_preview()
while True:
time.sleep(0.5)
#print("waiting...")
if (GPIO.input(PIN)):
print("Taking a picture...")
#camera.start_preview()
time.sleep(0.2)
ts = time.time()
tstr = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S')
fname = "/home/pi/figs/image_%s.jpg" % (tstr)
camera.capture(fname)
time.sleep(3)
#camera.stop_preview()
else:
print("PIN down...")
| 17.954545 | 80 | 0.589873 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 218 | 0.275949 |
201969c88f34f0fa220bab32fdd8cbaf6e2e16f3 | 2,689 | py | Python | ca_node/scripts/ranking_controller.py | hidmic/create_autonomy | 3aec14c9a6aa2d9a7b817d119bfb82b089e60219 | [
"BSD-3-Clause"
] | null | null | null | ca_node/scripts/ranking_controller.py | hidmic/create_autonomy | 3aec14c9a6aa2d9a7b817d119bfb82b089e60219 | [
"BSD-3-Clause"
] | 4 | 2019-10-24T17:19:50.000Z | 2020-02-20T01:06:27.000Z | ca_node/scripts/ranking_controller.py | hidmic/create_autonomy | 3aec14c9a6aa2d9a7b817d119bfb82b089e60219 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import rospy
import threading
from ca_msgs.msg import Bumper
from geometry_msgs.msg import Twist, Vector3
class StateMachine(object):
def __init__(self):
self.pub = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
self.goal_queue = []
def rotate(self, ang_vel):
self.move(0., ang_vel)
def rotate_left(self, ang_vel):
self.rotate(ang_vel)
def rotate_right(self, ang_vel):
self.rotate(-ang_vel)
def set_goal(self, data):
if data.is_left_pressed and data.is_right_pressed:
self.goal_queue.append({'goal': self.move_backward, 'velocity': 0.1, 'duration': 3.})
if data.is_left_pressed:
self.goal_queue.append({'goal': self.move_backward, 'velocity': 0.1, 'duration': 1.5})
self.goal_queue.append({'goal': self.rotate_right, 'velocity': 0.3, 'duration': 2.})
elif data.is_right_pressed:
self.goal_queue.append({'goal': self.move_backward, 'velocity': 0.1, 'duration': 1.5})
self.goal_queue.append({'goal': self.rotate_left, 'velocity': 0.3, 'duration': 2.})
else:
self.goal_queue.append({'goal': self.move_straight, 'velocity': 0.2, 'duration': 0.})
def stop(self):
self.move(0., 0.)
def close(self):
self.stop()
self.goal_queue = []
def move(self, lin_vel, ang_vel):
msg = Twist()
msg.linear.x = lin_vel
msg.angular.z = ang_vel
self.pub.publish(msg)
def move_straight(self, lin_vel):
self.move(lin_vel, 0.)
def move_backward(self, lin_vel):
self.move_straight(-lin_vel)
def run(self):
if len(self.goal_queue) > 0:
# Execute next goal
goal = self.goal_queue.pop()
end_time = rospy.Time.now().secs + goal.get('duration')
while end_time > rospy.Time.now().secs:
goal.get('goal')(goal.get('velocity'))
else:
# Move straight
self.move_straight(0.2)
class RankingController():
def __init__(self):
rospy.init_node("ranking_controller", log_level=rospy.INFO)
self.sub = rospy.Subscriber("bumper", Bumper, self.callback)
self.state_machine = StateMachine()
self.rate = rospy.Rate(10) # Hz
rospy.on_shutdown(self.stop)
threading.Thread(name="ranking_controller", target=self.run).start()
rospy.spin()
def callback(self, data):
rospy.logdebug("{} {}".format(data.is_left_pressed, data.is_right_pressed))
self.state_machine.set_goal(data)
def stop(self):
rospy.loginfo("Thread stopped.")
self.state_machine.close()
def run(self):
rospy.loginfo("Thread started.")
while not rospy.is_shutdown():
self.state_machine.run()
self.rate.sleep()
if __name__ == "__main__":
rc = RankingController()
| 29.549451 | 92 | 0.664559 | 2,502 | 0.930457 | 0 | 0 | 0 | 0 | 0 | 0 | 350 | 0.13016 |
20199c75685354f03349a09a3834f7b34b31bc25 | 103 | py | Python | LeetCode/python3/1025.py | ZintrulCre/LeetCode_Archiver | de23e16ead29336b5ee7aa1898a392a5d6463d27 | [
"MIT"
] | 279 | 2019-02-19T16:00:32.000Z | 2022-03-23T12:16:30.000Z | LeetCode/python3/1025.py | ZintrulCre/LeetCode_Archiver | de23e16ead29336b5ee7aa1898a392a5d6463d27 | [
"MIT"
] | 2 | 2019-03-31T08:03:06.000Z | 2021-03-07T04:54:32.000Z | LeetCode/python3/1025.py | ZintrulCre/LeetCode_Crawler | de23e16ead29336b5ee7aa1898a392a5d6463d27 | [
"MIT"
] | 12 | 2019-01-29T11:45:32.000Z | 2019-02-04T16:31:46.000Z | class Solution:
def divisorGame(self, N: int) -> bool:
return True if N % 2 == 0 else False | 34.333333 | 44 | 0.61165 | 103 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
201e557f74f0335327de9be24c57129ec7d8f08b | 915 | py | Python | gabbi/exception.py | scottwallacesh/gabbi | 5e76332ac06cd075c11477b384ad5da1d2eaa9d5 | [
"Apache-2.0"
] | 145 | 2015-01-16T23:19:35.000Z | 2022-03-15T00:21:54.000Z | gabbi/exception.py | scottwallacesh/gabbi | 5e76332ac06cd075c11477b384ad5da1d2eaa9d5 | [
"Apache-2.0"
] | 250 | 2015-01-02T11:20:06.000Z | 2022-03-22T19:55:18.000Z | gabbi/exception.py | scottwallacesh/gabbi | 5e76332ac06cd075c11477b384ad5da1d2eaa9d5 | [
"Apache-2.0"
] | 49 | 2015-01-14T16:14:52.000Z | 2022-03-21T11:37:26.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Gabbi specific exceptions."""
class GabbiDataLoadError(ValueError):
"""An exception to alert when data streams cannot be loaded."""
pass
class GabbiFormatError(ValueError):
"""An exception to encapsulate poorly formed test data."""
pass
class GabbiSyntaxWarning(SyntaxWarning):
"""A warning about syntax that is not desirable."""
pass
| 31.551724 | 75 | 0.742077 | 326 | 0.356284 | 0 | 0 | 0 | 0 | 0 | 0 | 739 | 0.80765 |
201f3558e4dbdd368aeeee6e9f098d5308313493 | 235 | py | Python | music/filename.py | JohanLi/uncharted-waters-2-research | fe6d40a28baed38e894a301da85a80c89e7153fa | [
"MIT"
] | null | null | null | music/filename.py | JohanLi/uncharted-waters-2-research | fe6d40a28baed38e894a301da85a80c89e7153fa | [
"MIT"
] | null | null | null | music/filename.py | JohanLi/uncharted-waters-2-research | fe6d40a28baed38e894a301da85a80c89e7153fa | [
"MIT"
] | null | null | null | import os
path = './converted/'
for filename in os.listdir(path):
newFilename = filename.lower().replace(' ', '-').replace('’', '')
os.rename(path + filename, path + newFilename.lower())
then = os.listdir(path)
print(then)
| 19.583333 | 69 | 0.642553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.113924 |
2020fcd0b6330a2f620511b1f0629988385a2358 | 2,792 | py | Python | django_rest_resetpassword/tests.py | fasfoxcom/django-rest-resetpassword | b459c44f4ff426b190cb7303b32a23bf7b06b823 | [
"MIT"
] | 4 | 2020-01-14T14:25:57.000Z | 2021-03-21T10:51:48.000Z | django_rest_resetpassword/tests.py | fasfoxcom/django-rest-resetpassword | b459c44f4ff426b190cb7303b32a23bf7b06b823 | [
"MIT"
] | 3 | 2020-09-16T14:09:58.000Z | 2021-03-07T10:53:29.000Z | django_rest_resetpassword/tests.py | fasfoxcom/django-rest-resetpassword | b459c44f4ff426b190cb7303b32a23bf7b06b823 | [
"MIT"
] | 3 | 2020-04-07T10:11:39.000Z | 2022-03-07T04:25:33.000Z | from django.conf import settings
from django.contrib.auth.models import User
from django.urls import reverse
from rest_framework.test import APITestCase
class BaseAPITest(APITestCase):
def setUp(self, password=None) -> None:
self.user = User(username="John Smith", email="john@example.com")
self.user.set_password("123")
self.user.save()
self.client.force_authenticate(user=self.user)
def user_factory(self, username="peter", email="peter@example.com", password="123"):
user = User(username=username, email=email, password=password)
user.save()
return user
class ResetPasswordAPITest(BaseAPITest):
def test_request_password_with_no_settings(self):
# make sure that if no setting, the default password request reset field is the email.
user = self.user_factory()
data = {"email": user.username}
response = self.client.post(reverse("reset-password-request"), data=data)
self.assertEqual(response.status_code, 400)
data = {"email": user.email}
response = self.client.post(reverse("reset-password-request"), data=data)
self.assertEqual(response.status_code, 200)
msg = "A password reset token has been sent to the provided email address"
self.assertEqual(response.data["message"], msg)
def test_request_password_with_django_rest_lookup_field_setting(self):
# Make sure we can still use DJANGO_REST_LOOKUP_FIELD setting for backward compatibility.
settings.DJANGO_REST_LOOKUP_FIELD = "username"
user = self.user_factory()
data = {"email": user.username}
response = self.client.post(reverse("reset-password-request"), data=data)
self.assertEqual(response.status_code, 200)
msg = "A password reset token has been sent to the provided email address"
self.assertEqual(response.data["message"], msg)
def test_request_password_with_django_rest_lookup_fields_setting(self):
# Make sure new users can use DJANGO_REST_LOOKUP_FIELDS setting.
settings.DJANGO_REST_LOOKUP_FIELDS = ["email", "username"]
user = self.user_factory()
data = {"email": user.username}
response = self.client.post(reverse("reset-password-request"), data=data)
self.assertEqual(response.status_code, 200)
msg = "A password reset token has been sent to the provided email address"
self.assertEqual(response.data["message"], msg)
data = {"email": user.email}
response = self.client.post(reverse("reset-password-request"), data=data)
self.assertEqual(response.status_code, 200)
msg = "A password reset token has been sent to the provided email address"
self.assertEqual(response.data["message"], msg)
| 47.322034 | 98 | 0.69735 | 2,633 | 0.943052 | 0 | 0 | 0 | 0 | 0 | 0 | 798 | 0.285817 |
2020fead74782498dcbbc501d6a342b6a06a76e1 | 948 | py | Python | day2/netmiko_ex1rudy.py | rudy5rudy/pynet-ons-feb19 | 1fa0b30af35aaae73ced2f77c04ab1cb5f2ac5fc | [
"Apache-2.0"
] | null | null | null | day2/netmiko_ex1rudy.py | rudy5rudy/pynet-ons-feb19 | 1fa0b30af35aaae73ced2f77c04ab1cb5f2ac5fc | [
"Apache-2.0"
] | null | null | null | day2/netmiko_ex1rudy.py | rudy5rudy/pynet-ons-feb19 | 1fa0b30af35aaae73ced2f77c04ab1cb5f2ac5fc | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Exercises using Netmiko"""
from __future__ import print_function
from getpass import getpass
from netmiko import ConnectHandler
#def save_file(filename, show_run):
# """Save the show run to a file"""
# with open(filename, "w") as f:
# f.write(show_run)
def main():
"""Exercises using Netmiko"""
password = getpass()
cisco3 = {
"device_type": "cisco_ios",
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": password,
}
netconnect = ConnectHandler(**cisco3)
print(netconnect.find_prompt())
output = netconnect.send_command("show ver")
print(output)
output = netconnect.send_command("show run")
print(output)
save_file("cisc003.txt",output)
#write the file
def save_file(filename, show_run):
"""Save the show run to a file"""
with open(filename, "w") as f:
f.write(show_run)
main()
#save_file()
| 21.066667 | 48 | 0.64135 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 387 | 0.408228 |
20214e44beff67803045cc13f9f17cbaa929c06c | 326 | py | Python | tests/test_pytorch.py | szymonmaszke/torchtraining | 1ddf169325b7239d6d6686b20072a406b69a0180 | [
"MIT"
] | 3 | 2020-08-26T06:11:58.000Z | 2020-08-27T08:11:15.000Z | tests/test_pytorch.py | klaudiapalasz/torchtraining | 7ac54009eea2fd84aa635b6f3cbfe306f317d087 | [
"MIT"
] | 1 | 2020-08-25T19:19:43.000Z | 2020-08-25T19:19:43.000Z | tests/test_pytorch.py | klaudiapalasz/torchtraining | 7ac54009eea2fd84aa635b6f3cbfe306f317d087 | [
"MIT"
] | 1 | 2021-04-15T18:55:57.000Z | 2021-04-15T18:55:57.000Z | """Core pytorch operations regarding optimization (optimize, schedule) are placed in general tests."""
import pytest
import torch
import torchtraining.pytorch as P
def test_backward():
backward = P.Backward()
x = torch.randn(10, requires_grad=True)
y = x ** 2
backward(y.sum())
assert x.grad is not None
| 25.076923 | 102 | 0.708589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 102 | 0.312883 |
2022e211ed69af3b8b01c2021ce2046666e70c5f | 1,464 | py | Python | tests/protein_test.py | LauraKaschnitz/advanced_python_2021-22_HD | 63ed116d75dd0a2e8bf9288ef95aea5b5ce860ae | [
"CC0-1.0"
] | null | null | null | tests/protein_test.py | LauraKaschnitz/advanced_python_2021-22_HD | 63ed116d75dd0a2e8bf9288ef95aea5b5ce860ae | [
"CC0-1.0"
] | null | null | null | tests/protein_test.py | LauraKaschnitz/advanced_python_2021-22_HD | 63ed116d75dd0a2e8bf9288ef95aea5b5ce860ae | [
"CC0-1.0"
] | null | null | null | import sys
from pathlib import Path
# -------- START of inconvenient addon block --------
# This block is not necessary if you have installed your package
# using e.g. pip install -e (requires setup.py)
# or have a symbolic link in your sitepackages (my preferend way)
sys.path.append(
str(Path(__file__).parent.parent.resolve())
)
# It make import peak_finder possible
# This is a demo hack for the course :)
# -------- END of inconvenient addon block --------
import proteins
# Name
# Sequenz
# Plot Methode
def test_proteins_name():
test_protein = proteins.basic.Protein("Test_Name", "AAAAA")
name_test = test_protein.name
assert name_test == "Test_Name"
def test_proteins_sequence():
test_protein = proteins.basic.Protein("Test_Name", "AAAAA")
seq = test_protein.sequence
assert seq == "AAAAA"
def test_proteins_find_metric_values():
test_protein = proteins.basic.Protein("Test_Name", "AR")
metric_values = test_protein.find_metric_values()
assert metric_values == [1.8, -4.5]
def test_proteins_calculate_sliding_window():
test_protein = proteins.basic.Protein("Test_Name", "ARARA")
mean_value = test_protein.calculate_sliding_window()
assert mean_value == [1.8, -1.35, -0.30000000000000004, -1.35, -0.7200000000000001]
def test_proteins_create_positions():
test_protein = proteins.basic.Protein("Test_Name", "ARARA")
pos = test_protein.create_positions()
assert pos == [0, 1, 2, 3, 4]
| 32.533333 | 87 | 0.715164 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 492 | 0.336066 |
2023ddc41607e5925cec13b5cb85d8acf690b482 | 2,092 | py | Python | src/treq/test/test_utils.py | chevah/treq | 2d45c8227246583bc96cb4924722d9f79e95d4d7 | [
"MIT"
] | 20 | 2015-01-06T11:13:26.000Z | 2019-12-04T02:22:03.000Z | src/treq/test/test_utils.py | chevah/treq | 2d45c8227246583bc96cb4924722d9f79e95d4d7 | [
"MIT"
] | 13 | 2015-01-02T17:46:30.000Z | 2015-03-31T12:57:14.000Z | src/treq/test/test_utils.py | chevah/treq | 2d45c8227246583bc96cb4924722d9f79e95d4d7 | [
"MIT"
] | 10 | 2015-01-02T23:17:05.000Z | 2021-06-05T12:03:25.000Z | import mock
from twisted.trial.unittest import TestCase
from treq._utils import default_reactor, default_pool, set_global_pool
class DefaultReactorTests(TestCase):
def test_passes_reactor(self):
mock_reactor = mock.Mock()
self.assertEqual(default_reactor(mock_reactor), mock_reactor)
def test_uses_default_reactor(self):
from twisted.internet import reactor
self.assertEqual(default_reactor(None), reactor)
class DefaultPoolTests(TestCase):
def setUp(self):
set_global_pool(None)
pool_patcher = mock.patch('treq._utils.HTTPConnectionPool')
self.HTTPConnectionPool = pool_patcher.start()
self.addCleanup(pool_patcher.stop)
self.reactor = mock.Mock()
def test_persistent_false(self):
self.assertEqual(
default_pool(self.reactor, None, False),
self.HTTPConnectionPool.return_value
)
self.HTTPConnectionPool.assert_called_once_with(
self.reactor, persistent=False
)
def test_pool_none_persistent_none(self):
self.assertEqual(
default_pool(self.reactor, None, None),
self.HTTPConnectionPool.return_value
)
self.HTTPConnectionPool.assert_called_once_with(
self.reactor, persistent=True
)
def test_pool_none_persistent_true(self):
self.assertEqual(
default_pool(self.reactor, None, True),
self.HTTPConnectionPool.return_value
)
self.HTTPConnectionPool.assert_called_once_with(
self.reactor, persistent=True
)
def test_cached_global_pool(self):
pool1 = default_pool(self.reactor, None, None)
self.HTTPConnectionPool.return_value = mock.Mock()
pool2 = default_pool(self.reactor, None, True)
self.assertEqual(pool1, pool2)
def test_specified_pool(self):
pool = mock.Mock()
self.assertEqual(
default_pool(self.reactor, pool, None),
pool
)
self.HTTPConnectionPool.assert_not_called()
| 26.820513 | 70 | 0.666348 | 1,957 | 0.935468 | 0 | 0 | 0 | 0 | 0 | 0 | 32 | 0.015296 |
202403eba79cfd93e6eb428ba43c48862239800d | 12,792 | py | Python | pygments_lexer_solidity/lexer.py | veox/pygments-lexer-solidity | e99ccf980337ceaad4fbc7ee11795e91d7fab0ae | [
"BSD-2-Clause"
] | 2 | 2018-05-24T14:36:59.000Z | 2019-06-29T23:50:08.000Z | pygments_lexer_solidity/lexer.py | veox/pygments-lexer-solidity | e99ccf980337ceaad4fbc7ee11795e91d7fab0ae | [
"BSD-2-Clause"
] | null | null | null | pygments_lexer_solidity/lexer.py | veox/pygments-lexer-solidity | e99ccf980337ceaad4fbc7ee11795e91d7fab0ae | [
"BSD-2-Clause"
] | 1 | 2019-11-11T23:24:17.000Z | 2019-11-11T23:24:17.000Z | # -*- coding: utf-8 -*-
"""
pygments.lexers.solidity
~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for the Solidity language.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import (
RegexLexer,
bygroups,
combined,
default,
include,
inherit,
this,
using,
words,
)
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Other
__all__ = ['SolidityLexer', 'YulLexer']
class BaseLexer(RegexLexer):
"""Common for both Solidity and Yul."""
tokens = {
'assembly': [
include('comments'),
include('numbers'),
include('strings'),
include('whitespace'),
(r'\{', Punctuation, '#push'),
(r'\}', Punctuation, '#pop'),
(r'[(),]', Punctuation),
(r':=|=:', Operator),
(r'(let)(\s*)(\w*\b)',
bygroups(Operator.Word, Text, Name.Variable)),
# evm instructions; ordered by opcode
(r'(stop|add|mul|sub|div|sdiv|mod|smod|addmod|mulmod|exp|'
r'signextend|'
r'lt|gt|slt|sgt|eq|iszero|and|or|xor|not|byte|shl|shr|sar|'
r'keccak256|'
r'address|balance|origin|caller|'
r'callvalue|calldataload|calldatasize|calldatacopy|'
r'codesize|codecopy|gasprice|extcodesize|extcodecopy|'
r'returndatasize|returndatacopy|extcodehash|'
r'blockhash|coinbase|timestamp|number|difficulty|gaslimit|'
r'chainid|selfbalance|'
r'pop|mload|mstore|mstore8|sload|sstore|'
r'pc|msize|gas|'
r'log0|log1|log2|log3|log4|'
r'create|call|callcode|return|delegatecall|create2|'
r'staticcall|revert|'
r'invalid|selfdestruct)\b',
Name.Function),
# obsolete aliases for keccak256 and selfdestruct
(r'(sha3|suicide)\b', Name.Function),
(r'(case|default|for|if|switch)\b', Keyword),
# everything else is either a local/external var, or label
('[a-zA-Z_]\w*', Name)
],
'comment-parse-single': [
include('natspec'),
include('spdx'),
(r'\n', Comment.Single, '#pop'),
(r'[^\n]', Comment.Single),
],
'comment-parse-multi': [
include('natspec'),
include('spdx'),
(r'[^*/]', Comment.Multiline),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline),
],
'comments': [
(r'//', Comment.Single, 'comment-parse-single'),
(r'/[*]', Comment.Multiline, 'comment-parse-multi'),
],
'natspec': [
(r'@(author|dev|inheritdoc|notice|param|return|title|'
r'custom:[a-z][a-z-]*)\b',
Comment.Special),
],
'spdx': [
(r'SPDX-License-Identifier:',
Comment.Special),
],
'numbers': [
(r'0[xX][0-9a-fA-F]+', Number.Hex),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?', Number.Float),
(r'[0-9]+([eE][0-9]+)?', Number.Integer),
],
'string-parse-common': [
# escapes
(r'\\(u[0-9a-fA-F]{4}|x..|[^x])', String.Escape),
# almost everything else is plain characters
(r'[^\\"\'\n]+', String),
# line continuation
(r'\\\n', String),
# stray backslash
(r'\\', String)
],
'string-parse-double': [
(r'"', String, '#pop'),
(r"'", String)
],
'string-parse-single': [
(r"'", String, '#pop'),
(r'"', String)
],
'strings': [
# hexadecimal string literals
(r"hex'[0-9a-fA-F]+'", String),
(r'hex"[0-9a-fA-F]+"', String),
# unicode string literals
(r'unicode"', String, combined('string-parse-common',
'string-parse-double')),
(r"unicode'", String, combined('string-parse-common',
'string-parse-single')),
# usual strings
(r'"', String, combined('string-parse-common',
'string-parse-double')),
(r"'", String, combined('string-parse-common',
'string-parse-single'))
],
'whitespace': [
(r'\s+', Text)
],
} # tokens
class SolidityLexer(BaseLexer):
"""For Solidity source code."""
name = 'Solidity'
aliases = ['sol', 'solidity']
filenames = ['*.sol', '*.solidity']
mimetypes = ['text/x-solidity']
flags = re.DOTALL | re.UNICODE | re.MULTILINE
def type_names(prefix, sizerange):
"""
Helper for type name generation, like: bytes1 .. bytes32
Assumes that the size range passed in is valid.
"""
namelist = []
for i in sizerange: namelist.append(prefix + str(i))
return tuple(namelist)
def type_names_mn(prefix, sizerangem, sizerangen):
"""
Helper for type name generation, like: fixed8x0 .. fixed26x80
Assumes that the size range passed in is valid.
"""
lm = []
ln = []
namelist = []
# construct lists out of ranges
for i in sizerangem: lm.append(i)
for i in sizerangen: ln.append(i)
validpairs = [tuple([m,n]) for m in lm for n in ln]
for i in validpairs:
namelist.append(prefix + str(i[0]) + 'x' + str(i[1]))
return tuple(namelist)
tokens = {
'assembly': [
# labels
(r'(\w*\b)(\:[^=])',
bygroups(Name.Label, Punctuation)),
# evm instructions present in `assembly` but not Yul
(r'(jump|jumpi|jumpdest)\b', Name.Function),
# TODO: rename helper: instructions, not types
(words(type_names('dup', range(1, 16+1)),
suffix=r'\b'), Keyword.Function),
(words(type_names('swap', range(1, 16+1)),
suffix=r'\b'), Keyword.Function),
(words(type_names('push', range(1, 32+1)),
suffix=r'\b'), Keyword.Function),
inherit,
],
'keywords-builtins': [
# compiler built-ins
(r'(balance|now)\b', Name.Builtin),
(r'selector\b', Name.Builtin),
(r'(super|this)\b', Name.Builtin),
],
'keywords-functions': [
# receive/fallback functions
(r'(receive|fallback)\b', Keyword.Function),
# like block.hash and msg.gas in `keywords-nested`
(r'(blockhash|gasleft)\b', Name.Function),
# single-instruction yet function syntax
(r'(selfdestruct|suicide)\b', Name.Function),
# processed into many-instructions
(r'(send|transfer|call|callcode|delegatecall)\b',
Name.Function),
(r'(assert|revert|require)\b', Name.Function),
(r'push\b', Name.Function),
# built-in functions and/or precompiles
(words(('addmod', 'ecrecover', 'keccak256', 'mulmod',
'sha256', 'sha3', 'ripemd160'),
suffix=r'\b'), Name.Function),
],
'keywords-types': [
(words(('address', 'bool', 'byte', 'bytes', 'int', 'string',
'uint'),
suffix=r'\b'), Keyword.Type),
(words(type_names('bytes', range(1, 32+1)),
suffix=r'\b'), Keyword.Type),
(words(type_names('int', range(8, 256+1, 8)),
suffix=r'\b'), Keyword.Type),
(words(type_names('uint', range(8, 256+1, 8)),
suffix=r'\b'), Keyword.Type),
# not yet fully implemented, therefore reserved types
(words(('fixed', 'ufixed'), suffix=r'\b'), Keyword.Reserved),
(words(type_names_mn('fixed',
range(8, 256+1, 8),
range(0, 80+1, 1)),
suffix=r'\b'), Keyword.Reserved),
(words(type_names_mn('ufixed',
range(8, 256+1, 8),
range(0, 80+1, 1)),
suffix=r'\b'), Keyword.Reserved),
],
'keywords-nested': [
(r'abi\.encode(|Packed|WithSelector|WithSignature)\b',
Name.Builtin),
(r'block\.(blockhash|chainid|coinbase|difficulty|gaslimit|hash|'
r'number|timestamp)\b', Name.Builtin),
(r'msg\.(data|gas|sender|value)\b', Name.Builtin),
(r'tx\.(gasprice|origin)\b', Name.Builtin),
],
'keywords-other': [
(words(('for', 'in', 'while', 'do', 'break', 'return',
'returns', 'continue', 'if', 'else', 'throw',
'new', 'delete', 'try', 'catch'),
suffix=r'\b'), Keyword),
(r'assembly\b', Keyword, 'assembly'),
(words(('contract', 'interface', 'enum', 'event', 'function',
'constructor', 'library', 'mapping', 'modifier',
'struct', 'var'),
suffix=r'\b'), Keyword.Declaration),
(r'(import|using)\b', Keyword.Namespace),
# pragmas are not pragmatic in their formatting :/
(r'pragma( experimental| solidity| abicoder|)\b', Keyword),
# misc keywords
(r'(_|as|constant|from|is)\b', Keyword),
(r'emit\b', Keyword),
# built-in modifier
(r'payable\b', Keyword),
# variable location specifiers
(r'(calldata|memory|storage)\b', Keyword),
# method visibility specifiers
(r'(external|internal|private|public)\b', Keyword),
# event parameter specifiers
(r'(anonymous|indexed)\b', Keyword),
# added in solc v0.4.0, not covered elsewhere
(r'(abstract|pure|static|view)\b', Keyword),
# added in solc v0.6.0, not covered elsewhere
(r'(override|virtual)\b', Keyword),
# added in solc v0.8.0
(r'(unchecked)\b', Keyword),
# type information
(r'type\(.*\)\.(min|max|interfaceId|creationCode|runtimeCode|name)\b', Keyword),
# reserved for future use since don't-remember-when
(words(('after', 'case', 'default', 'final', 'in', 'inline',
'let', 'match', 'null', 'of', 'relocatable', 'static',
'switch', 'typeof'),
suffix=r'\b'), Keyword.Reserved),
# reserved for future use since solc v0.5.0
(words(('alias', 'apply', 'auto', 'copyof', 'define',
'immutable', 'implements', 'macro', 'mutable',
'partial', 'promise', 'reference',
'sealed', 'sizeof', 'supports', 'typedef'),
suffix=r'\b'), Keyword.Reserved),
# built-in constants
(r'(true|false)\b', Keyword.Constant),
(r'(wei|finney|szabo|ether)\b', Keyword.Constant),
(r'(seconds|minutes|hours|days|weeks|years)\b',
Keyword.Constant),
],
'root': [
inherit,
include('comments'),
include('keywords-builtins'),
include('keywords-functions'),
include('keywords-types'),
include('keywords-nested'),
include('keywords-other'),
include('numbers'),
include('strings'),
include('whitespace'),
(r'\+\+|--|\*\*|\?|:|~|&&|\|\||=>|==?|!=?|'
r'(<<|>>>?|[-<>+*%&|^/])=?', Operator),
(r'[{(\[;,]', Punctuation),
(r'[})\].]', Punctuation),
# everything else is a var/function name
('[a-zA-Z$_]\w*', Name),
],
} # tokens
class YulLexer(BaseLexer):
"""For Yul stand-alone source code."""
name = 'Yul'
aliases = ['yul']
filenames = ['*.yul']
mimetypes = ['text/x-yul']
flags = re.DOTALL | re.UNICODE | re.MULTILINE
tokens = {
'root': [
inherit,
# Yul-specific
(r'(code|data|function|object)\b', Keyword),
(r'data(copy|offset|size)\b', Keyword.Function),
(r'->', Operator),
include('assembly'),
# ':' - variable declaration type hint - catch it last
(':', Punctuation),
],
} # tokens
| 34.95082 | 92 | 0.482489 | 12,232 | 0.956223 | 0 | 0 | 0 | 0 | 0 | 0 | 5,579 | 0.436132 |
2024077dd459a76b5147e2c0b8b573e999001432 | 2,305 | py | Python | challenge 10/Calculator.py | caroldf07/100daysofcode-python | 508ad5464904193b0bd58fc73d26b21d8adc011a | [
"Apache-2.0"
] | 1 | 2021-11-02T22:24:13.000Z | 2021-11-02T22:24:13.000Z | challenge 10/Calculator.py | caroldf07/100challengesofcode-python | 508ad5464904193b0bd58fc73d26b21d8adc011a | [
"Apache-2.0"
] | null | null | null | challenge 10/Calculator.py | caroldf07/100challengesofcode-python | 508ad5464904193b0bd58fc73d26b21d8adc011a | [
"Apache-2.0"
] | 1 | 2021-10-15T23:46:34.000Z | 2021-10-15T23:46:34.000Z | logo = """
_____________________
| _________________ |
| | Pythonista 0. | | .----------------. .----------------. .----------------. .----------------.
| |_________________| | | .--------------. || .--------------. || .--------------. || .--------------. |
| ___ ___ ___ ___ | | | ______ | || | __ | || | _____ | || | ______ | |
| | 7 | 8 | 9 | | + | | | | .' ___ | | || | / \ | || | |_ _| | || | .' ___ | | |
| |___|___|___| |___| | | | / .' \_| | || | / /\ \ | || | | | | || | / .' \_| | |
| | 4 | 5 | 6 | | - | | | | | | | || | / ____ \ | || | | | _ | || | | | | |
| |___|___|___| |___| | | | \ `.___.'\ | || | _/ / \ \_ | || | _| |__/ | | || | \ `.___.'\ | |
| | 1 | 2 | 3 | | x | | | | `._____.' | || ||____| |____|| || | |________| | || | `._____.' | |
| |___|___|___| |___| | | | | || | | || | | || | | |
| | . | 0 | = | | / | | | '--------------' || '--------------' || '--------------' || '--------------' |
| |___|___|___| |___| | '----------------' '----------------' '----------------' '----------------'
|_____________________|
"""
print(logo)
def add(x, y):
return x+y
def subtract(x, y):
return x-y
def division(x, y):
if(y == 0):
print("It can't divide per 0'")
else:
return x/y
def multiply(x, y):
return x*y
operators = {
"+": add,
"-": subtract,
"/": division,
"*": multiply
}
def calculator():
x = float(input("Please enter a number: "))
keep_going = "y"
while(keep_going == "y"):
operator_chosen = input("Please enter the desired operation: ")
y = float(input("Please enter a number: "))
calculation = operators[operator_chosen]
answer = calculation(x, y)
print(f"{x} {operator_chosen} {y} = {answer}")
keep_going = input(
f"Type 'y' to continue calculating with {answer} or type 'n' to exit: ").lower()
if(keep_going == "y"):
x = answer
"""Here we have a recursion when the user does not want to use the same answer before and it want to start a new iteration"""
else:
calculator()
calculator()
| 33.897059 | 137 | 0.358351 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,601 | 0.694577 |
20267ec9911e5557412955f8827404aed1e89346 | 2,863 | py | Python | env/lib/python3.7/site-packages/materialx/emoji.py | ritchadh/docs-like-code-demo | 23d189e074b9ecf136b7b91df3826bcfa51cd124 | [
"BSD-3-Clause"
] | null | null | null | env/lib/python3.7/site-packages/materialx/emoji.py | ritchadh/docs-like-code-demo | 23d189e074b9ecf136b7b91df3826bcfa51cd124 | [
"BSD-3-Clause"
] | null | null | null | env/lib/python3.7/site-packages/materialx/emoji.py | ritchadh/docs-like-code-demo | 23d189e074b9ecf136b7b91df3826bcfa51cd124 | [
"BSD-3-Clause"
] | null | null | null | """
Emoji extras for Material.
Override the indexes with an extended version that includes short names for Material icons, FontAwesome, etc.
"""
import os
import glob
import copy
import codecs
import inspect
import material
import pymdownx
from pymdownx.emoji import TWEMOJI_SVG_CDN, add_attriubtes
import xml.etree.ElementTree as etree # noqa: N813
OPTION_SUPPORT = pymdownx.__version_info__ >= (7, 1, 0)
RESOURCES = os.path.dirname(inspect.getfile(material))
def _patch_index(options):
"""Patch the given index."""
import pymdownx.twemoji_db as twemoji_db
# Copy the Twemoji index
index = {
"name": 'twemoji',
"emoji": copy.deepcopy(twemoji_db.emoji) if not OPTION_SUPPORT else twemoji_db.emoji,
"aliases": copy.deepcopy(twemoji_db.aliases) if not OPTION_SUPPORT else twemoji_db.aliases
}
icon_locations = options.get('custom_icons', [])
icon_locations.append(os.path.join(RESOURCES, '.icons'))
# Find our icons
for icon_path in icon_locations:
norm_base = icon_path.replace('\\', '/') + '/'
for result in glob.glob(icon_path.replace('\\', '/') + '/**/*.svg', recursive=True):
name = ':{}:'.format(result.replace('\\', '/').replace(norm_base, '', 1).replace('/', '-').lstrip('.')[:-4])
if name not in index['emoji'] and name not in index['aliases']:
# Easiest to just store the path and pull it out from the index
index["emoji"][name] = {'name': name, 'path': result}
return index
if OPTION_SUPPORT: # pragma: no cover
def twemoji(options, md):
"""Provide a copied Twemoji index with additional codes for Material included icons."""
return _patch_index(options)
else: # pragma: no cover
def twemoji():
"""Provide a copied Twemoji index with additional codes for Material included icons."""
return _patch_index({})
def to_svg(index, shortname, alias, uc, alt, title, category, options, md):
"""Return SVG element."""
is_unicode = uc is not None
if is_unicode:
# Handle Twemoji emoji.
svg_path = TWEMOJI_SVG_CDN
attributes = {
"class": options.get('classes', index),
"alt": alt,
"src": "%s%s.svg" % (
options.get('image_path', svg_path),
uc
)
}
if title:
attributes['title'] = title
add_attriubtes(options, attributes)
return etree.Element("img", attributes)
else:
# Handle Material SVG assets.
el = etree.Element('span', {"class": options.get('classes', index)})
svg_path = md.inlinePatterns['emoji'].emoji_index['emoji'][shortname]['path']
with codecs.open(svg_path, 'r', encoding='utf-8') as f:
el.text = md.htmlStash.store(f.read())
return el
| 31.811111 | 120 | 0.624869 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 827 | 0.288858 |
20299f247043cda7e41287f499114f04606c876f | 86 | py | Python | 2rd.py | chidanandpujar/Python_scripts | 0ee70e07ef4ab4d8c04955466ea9b305bdac0a53 | [
"Unlicense"
] | null | null | null | 2rd.py | chidanandpujar/Python_scripts | 0ee70e07ef4ab4d8c04955466ea9b305bdac0a53 | [
"Unlicense"
] | null | null | null | 2rd.py | chidanandpujar/Python_scripts | 0ee70e07ef4ab4d8c04955466ea9b305bdac0a53 | [
"Unlicense"
] | null | null | null | fd = open('f',"r")
buffer = fd.read(2)
print("first 2 chars in f:",buffer)
fd.close()
| 17.2 | 35 | 0.616279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 27 | 0.313953 |
202b728e02702f8a2d89aa679eb963120719716a | 1,388 | py | Python | availability/__init__.py | Leader0721/ManyIP | 2964c213b67d3cde7d72f75caa4f79b2b0b8b777 | [
"MIT"
] | 629 | 2017-09-04T04:03:08.000Z | 2022-03-27T19:49:47.000Z | availability/__init__.py | Leader0721/ManyIP | 2964c213b67d3cde7d72f75caa4f79b2b0b8b777 | [
"MIT"
] | 7 | 2017-09-04T11:19:17.000Z | 2021-04-10T02:43:33.000Z | availability/__init__.py | Leader0721/ManyIP | 2964c213b67d3cde7d72f75caa4f79b2b0b8b777 | [
"MIT"
] | 119 | 2017-09-04T04:03:11.000Z | 2021-12-20T07:58:22.000Z | # -*- coding: UTF-8 -*-
import config
import gevent
import availability.check
from persistence import persister
import time
def crawl_worker(queue_verification, queue_persistence):
"""
爬取下来的代理检测可用性的进程
:param queue_verification: 待验证代理队列
:param queue_persistence: 已验证待保存代理队列
:return:
"""
while True:
spawns = list()
for i in range(config.COROUTINE_NUM):
proxy = queue_verification.get()
spawns.append(gevent.spawn(availability.check.crawl_handle, 'http', proxy, queue_persistence))
spawns.append(gevent.spawn(availability.check.crawl_handle, 'https', proxy, queue_persistence))
gevent.joinall(spawns)
def store_worker():
"""
已保存的代理每隔一段时间重新验证可用性的进程
"""
while True:
all_proxies = persister.list(count='all', columns='all')
spawns = list()
for proxy in all_proxies:
if proxy['protocol'] == 'http':
spawns.append(gevent.spawn(availability.check.store_handle, 'http', proxy, persister))
else:
spawns.append(gevent.spawn(availability.check.store_handle, 'https', proxy, persister))
if len(spawns) == config.COROUTINE_NUM:
gevent.joinall(spawns)
spawns.clear()
gevent.joinall(spawns)
spawns.clear()
time.sleep(config.PROXY_STORE_CHECK_SEC)
| 31.545455 | 107 | 0.64121 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 345 | 0.230615 |
202fea4fea9302a54ade62e00c206ce92370b06d | 401 | py | Python | tests/heuristics/test_dataset_id_heuristic.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | 2 | 2022-01-29T03:46:31.000Z | 2022-02-14T14:06:35.000Z | tests/heuristics/test_dataset_id_heuristic.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | null | null | null | tests/heuristics/test_dataset_id_heuristic.py | HPI-Information-Systems/TimeEval | 9b2717b89decd57dd09e04ad94c120f13132d7b8 | [
"MIT"
] | null | null | null | import unittest
import tests.fixtures.heuristics_fixtures as fixtures
from timeeval.heuristics import DatasetIdHeuristic
class TestDatasetIdHeuristic(unittest.TestCase):
def test_heuristic(self):
heuristic = DatasetIdHeuristic()
value = heuristic(fixtures.algorithm, fixtures.dataset, fixtures.real_test_dataset_path)
self.assertEqual(value, fixtures.dataset.datasetId)
| 33.416667 | 96 | 0.795511 | 276 | 0.688279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
2030a605cd752d14f209c7587eb4ae5e80ff522c | 551 | py | Python | src/grokcore/component/tests/adapter/importedmodel.py | zopefoundation/grokcore.component | ae027df4c0bccf59ab8358b46495456682158837 | [
"ZPL-2.1"
] | 1 | 2018-03-19T01:53:45.000Z | 2018-03-19T01:53:45.000Z | src/grokcore/component/tests/adapter/importedmodel.py | zopefoundation/grokcore.component | ae027df4c0bccf59ab8358b46495456682158837 | [
"ZPL-2.1"
] | 6 | 2015-04-21T13:26:52.000Z | 2020-11-24T07:03:27.000Z | src/grokcore/component/tests/adapter/importedmodel.py | zopefoundation/grokcore.component | ae027df4c0bccf59ab8358b46495456682158837 | [
"ZPL-2.1"
] | 4 | 2015-04-03T04:48:13.000Z | 2018-01-12T06:50:02.000Z | """
Imported model and adapter won't be grokked:
>>> import grokcore.component as grok
>>> grok.testing.grok(__name__)
>>> from grokcore.component.tests.adapter.adapter import IHome
>>> cave = Cave()
>>> home = IHome(cave)
Traceback (most recent call last):
...
TypeError: ('Could not adapt', <grokcore.component.tests.adapter.adapter.Cave object at ...>, <InterfaceClass grokcore.component.tests.adapter.adapter.IHome>)
""" # noqa: E501 line too long
from grokcore.component.tests.adapter.adapter import Cave, Home # noqa: F401
| 36.733333 | 160 | 0.713249 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 482 | 0.874773 |
20318ce6f98ad7d15054368aa42e0b37d5feb5f0 | 862 | py | Python | tools/intogen/runtime/pyenv/lib/python2.7/site-packages/wok/core/utils/proxies.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | 1 | 2021-02-05T13:19:58.000Z | 2021-02-05T13:19:58.000Z | chapter2/wok/master/wok/core/utils/proxies.py | chris-zen/phd-thesis | 1eefdff8e7ca1910304e27ae42551dc64496b101 | [
"Unlicense"
] | null | null | null | chapter2/wok/master/wok/core/utils/proxies.py | chris-zen/phd-thesis | 1eefdff8e7ca1910304e27ae42551dc64496b101 | [
"Unlicense"
] | null | null | null | # from http://www.voidspace.org.uk/python/weblog/arch_d7_2007_03_17.shtml#e664
def ReadOnlyProxy(obj):
class _ReadOnlyProxy(object):
def __getattr__(self, name):
return getattr(obj, name)
def __setattr__(self, name, value):
raise AttributeError("Attributes can't be set on this object")
for name in dir(obj):
if not (name[:2] == '__' == name[-2:]):
continue
if name in ('__new__', '__init__', '__class__', '__bases__'):
continue
if not callable(getattr(obj, name)):
continue
def get_proxy_method(name):
def proxy_method(self, *args, **keywargs):
return getattr(obj, name)(*args, **keywargs)
return proxy_method
setattr(_ReadOnlyProxy, name, get_proxy_method(name))
return _ReadOnlyProxy() | 33.153846 | 78 | 0.603248 | 224 | 0.259861 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.189095 |
2032029ba048fe481ac31525f79cc7152354c67a | 2,834 | py | Python | donkeycar/parts/pigpio_enc.py | asepnh/donkeycar | af3883c6ea5462878de7b9f01482816ee10c20b2 | [
"MIT"
] | null | null | null | donkeycar/parts/pigpio_enc.py | asepnh/donkeycar | af3883c6ea5462878de7b9f01482816ee10c20b2 | [
"MIT"
] | null | null | null | donkeycar/parts/pigpio_enc.py | asepnh/donkeycar | af3883c6ea5462878de7b9f01482816ee10c20b2 | [
"MIT"
] | 1 | 2021-05-24T23:47:20.000Z | 2021-05-24T23:47:20.000Z | import pigpio
import time
class OdomDist(object):
"""
Take a tick input from odometry and compute the distance travelled
"""
def __init__(self, mm_per_tick, debug=False):
self.mm_per_tick = mm_per_tick
self.m_per_tick = mm_per_tick / 1000.0
self.meters = 0
self.last_time = time.time()
self.meters_per_second = 0
self.debug = debug
self.prev_ticks = 0
self.distance = 0
self.prev_distance = 0
self.ave_velocity = []
for i in range(10):
self.ave_velocity.append(0)
def run(self, ticks, throttle):
"""
inputs => total ticks since start
inputs => throttle, used to determine positive or negative vel
return => total dist (m), current vel (m/s), delta dist (m)
"""
#save off the last time interval and reset the timer
start_time = self.last_time
end_time = time.time()
self.last_time = end_time
#calculate elapsed time and distance traveled
seconds = end_time - start_time
self.distance = ticks * self.m_per_tick #converted to meters here
# if throttle < 0.0:
# print("throttle is negative")
# self.distance = self.distance * -1.0
delta_distance = self.distance - self.prev_distance
instant_velocity = delta_distance/seconds
for i in range(9): # do a moving average over a 1/2 second window (10 readings of a 20Hz feed)
self.ave_velocity[9-i] = self.ave_velocity[8-i] # move the time window down one
self.ave_velocity[0] = instant_velocity # stick the latest reading at the start
velocity = sum(self.ave_velocity)/10 # moving average
#update the odometer values
self.meters += delta_distance
self.meters_per_second = velocity
self.prev_distance = self.distance
#console output for debugging
if(self.debug):
print('distance (m):', round(self.meters,3))
print('velocity (m/s):', round(self.meters_per_second,3))
return self.meters, self.meters_per_second, self.distance
class PiPGIOEncoder():
def __init__(self, pin, pi):
self.pin = pin
self.pi = pi
self.pi.set_mode(pin, pigpio.INPUT)
self.pi.set_pull_up_down(pin, pigpio.PUD_UP)
self.cb = pi.callback(self.pin, pigpio.FALLING_EDGE, self._cb)
self.count = 0
def _cb(self, pin, level, tick):
self.count += 1
def run(self):
return self.count
def shutdown(self):
if self.cb != None:
self.cb.cancel()
self.cb = None
self.pi.stop()
if __name__ == "__main__":
pi = pigpio.pi()
e = PiPGIOEncoder(4, pi)
while True:
time.sleep(0.1)
e.run()
| 31.488889 | 103 | 0.60374 | 2,666 | 0.94072 | 0 | 0 | 0 | 0 | 0 | 0 | 756 | 0.266761 |
20328fd176b3f935bb3bf0db906c7f944bf98a06 | 6,370 | py | Python | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/action_smn_forwarding.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2021-04-16T07:59:28.000Z | 2021-04-16T07:59:28.000Z | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/action_smn_forwarding.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | null | null | null | huaweicloud-sdk-iotda/huaweicloudsdkiotda/v5/model/action_smn_forwarding.py | wuchen-huawei/huaweicloud-sdk-python-v3 | 3683d703f4320edb2b8516f36f16d485cff08fc2 | [
"Apache-2.0"
] | 1 | 2022-01-17T02:24:18.000Z | 2022-01-17T02:24:18.000Z | # coding: utf-8
import pprint
import re
import six
class ActionSmnForwarding:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'region_name': 'str',
'project_id': 'str',
'theme_name': 'str',
'topic_urn': 'str',
'message_content': 'str',
'message_title': 'str'
}
attribute_map = {
'region_name': 'region_name',
'project_id': 'project_id',
'theme_name': 'theme_name',
'topic_urn': 'topic_urn',
'message_content': 'message_content',
'message_title': 'message_title'
}
def __init__(self, region_name=None, project_id=None, theme_name=None, topic_urn=None, message_content=None, message_title=None):
"""ActionSmnForwarding - a model defined in huaweicloud sdk"""
self._region_name = None
self._project_id = None
self._theme_name = None
self._topic_urn = None
self._message_content = None
self._message_title = None
self.discriminator = None
self.region_name = region_name
self.project_id = project_id
self.theme_name = theme_name
self.topic_urn = topic_urn
self.message_content = message_content
self.message_title = message_title
@property
def region_name(self):
"""Gets the region_name of this ActionSmnForwarding.
SMN服务对应的region区域
:return: The region_name of this ActionSmnForwarding.
:rtype: str
"""
return self._region_name
@region_name.setter
def region_name(self, region_name):
"""Sets the region_name of this ActionSmnForwarding.
SMN服务对应的region区域
:param region_name: The region_name of this ActionSmnForwarding.
:type: str
"""
self._region_name = region_name
@property
def project_id(self):
"""Gets the project_id of this ActionSmnForwarding.
SMN服务对应的projectId信息
:return: The project_id of this ActionSmnForwarding.
:rtype: str
"""
return self._project_id
@project_id.setter
def project_id(self, project_id):
"""Sets the project_id of this ActionSmnForwarding.
SMN服务对应的projectId信息
:param project_id: The project_id of this ActionSmnForwarding.
:type: str
"""
self._project_id = project_id
@property
def theme_name(self):
"""Gets the theme_name of this ActionSmnForwarding.
SMN服务对应的主题名称
:return: The theme_name of this ActionSmnForwarding.
:rtype: str
"""
return self._theme_name
@theme_name.setter
def theme_name(self, theme_name):
"""Sets the theme_name of this ActionSmnForwarding.
SMN服务对应的主题名称
:param theme_name: The theme_name of this ActionSmnForwarding.
:type: str
"""
self._theme_name = theme_name
@property
def topic_urn(self):
"""Gets the topic_urn of this ActionSmnForwarding.
SMN服务对应的topic的主题URN
:return: The topic_urn of this ActionSmnForwarding.
:rtype: str
"""
return self._topic_urn
@topic_urn.setter
def topic_urn(self, topic_urn):
"""Sets the topic_urn of this ActionSmnForwarding.
SMN服务对应的topic的主题URN
:param topic_urn: The topic_urn of this ActionSmnForwarding.
:type: str
"""
self._topic_urn = topic_urn
@property
def message_content(self):
"""Gets the message_content of this ActionSmnForwarding.
短信或邮件的内容。
:return: The message_content of this ActionSmnForwarding.
:rtype: str
"""
return self._message_content
@message_content.setter
def message_content(self, message_content):
"""Sets the message_content of this ActionSmnForwarding.
短信或邮件的内容。
:param message_content: The message_content of this ActionSmnForwarding.
:type: str
"""
self._message_content = message_content
@property
def message_title(self):
"""Gets the message_title of this ActionSmnForwarding.
短信或邮件的主题。
:return: The message_title of this ActionSmnForwarding.
:rtype: str
"""
return self._message_title
@message_title.setter
def message_title(self, message_title):
"""Sets the message_title of this ActionSmnForwarding.
短信或邮件的主题。
:param message_title: The message_title of this ActionSmnForwarding.
:type: str
"""
self._message_title = message_title
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ActionSmnForwarding):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.541667 | 133 | 0.595447 | 6,507 | 0.991014 | 0 | 0 | 3,448 | 0.525129 | 0 | 0 | 3,181 | 0.484465 |
2037cfe78c4cb57f5b145bb1327426566cfe164f | 8,190 | py | Python | slack_sdk/scim/v1/user.py | priya1puresoftware/python-slack-sdk | 3503182feaaf4d41b57fd8bf10038ebc99f1f3c7 | [
"MIT"
] | 2,486 | 2016-11-03T14:31:43.000Z | 2020-10-26T23:07:44.000Z | slack_sdk/scim/v1/user.py | priya1puresoftware/python-slack-sdk | 3503182feaaf4d41b57fd8bf10038ebc99f1f3c7 | [
"MIT"
] | 721 | 2016-11-03T21:26:56.000Z | 2020-10-26T12:41:29.000Z | slack_sdk/scim/v1/user.py | priya1puresoftware/python-slack-sdk | 3503182feaaf4d41b57fd8bf10038ebc99f1f3c7 | [
"MIT"
] | 627 | 2016-11-02T19:04:19.000Z | 2020-10-25T19:21:13.000Z | from typing import Optional, Any, List, Dict, Union
from .default_arg import DefaultArg, NotGiven
from .internal_utils import _to_dict_without_not_given, _is_iterable
from .types import TypeAndValue
class UserAddress:
country: Union[Optional[str], DefaultArg]
locality: Union[Optional[str], DefaultArg]
postal_code: Union[Optional[str], DefaultArg]
primary: Union[Optional[bool], DefaultArg]
region: Union[Optional[str], DefaultArg]
street_address: Union[Optional[str], DefaultArg]
unknown_fields: Dict[str, Any]
def __init__(
self,
*,
country: Union[Optional[str], DefaultArg] = NotGiven,
locality: Union[Optional[str], DefaultArg] = NotGiven,
postal_code: Union[Optional[str], DefaultArg] = NotGiven,
primary: Union[Optional[bool], DefaultArg] = NotGiven,
region: Union[Optional[str], DefaultArg] = NotGiven,
street_address: Union[Optional[str], DefaultArg] = NotGiven,
**kwargs,
) -> None:
self.country = country
self.locality = locality
self.postal_code = postal_code
self.primary = primary
self.region = region
self.street_address = street_address
self.unknown_fields = kwargs
def to_dict(self) -> dict:
return _to_dict_without_not_given(self)
class UserEmail(TypeAndValue):
pass
class UserPhoneNumber(TypeAndValue):
pass
class UserRole(TypeAndValue):
pass
class UserGroup:
display: Union[Optional[str], DefaultArg]
value: Union[Optional[str], DefaultArg]
unknown_fields: Dict[str, Any]
def __init__(
self,
*,
display: Union[Optional[str], DefaultArg] = NotGiven,
value: Union[Optional[str], DefaultArg] = NotGiven,
**kwargs,
) -> None:
self.display = display
self.value = value
self.unknown_fields = kwargs
def to_dict(self) -> dict:
return _to_dict_without_not_given(self)
class UserMeta:
created: Union[Optional[str], DefaultArg]
location: Union[Optional[str], DefaultArg]
unknown_fields: Dict[str, Any]
def __init__(
self,
created: Union[Optional[str], DefaultArg] = NotGiven,
location: Union[Optional[str], DefaultArg] = NotGiven,
**kwargs,
) -> None:
self.created = created
self.location = location
self.unknown_fields = kwargs
def to_dict(self) -> dict:
return _to_dict_without_not_given(self)
class UserName:
family_name: Union[Optional[str], DefaultArg]
given_name: Union[Optional[str], DefaultArg]
unknown_fields: Dict[str, Any]
def __init__(
self,
family_name: Union[Optional[str], DefaultArg] = NotGiven,
given_name: Union[Optional[str], DefaultArg] = NotGiven,
**kwargs,
) -> None:
self.family_name = family_name
self.given_name = given_name
self.unknown_fields = kwargs
def to_dict(self) -> dict:
return _to_dict_without_not_given(self)
class UserPhoto:
type: Union[Optional[str], DefaultArg]
value: Union[Optional[str], DefaultArg]
unknown_fields: Dict[str, Any]
def __init__(
self,
type: Union[Optional[str], DefaultArg] = NotGiven,
value: Union[Optional[str], DefaultArg] = NotGiven,
**kwargs,
) -> None:
self.type = type
self.value = value
self.unknown_fields = kwargs
def to_dict(self) -> dict:
return _to_dict_without_not_given(self)
class User:
active: Union[Optional[bool], DefaultArg]
addresses: Union[Optional[List[UserAddress]], DefaultArg]
display_name: Union[Optional[str], DefaultArg]
emails: Union[Optional[List[TypeAndValue]], DefaultArg]
external_id: Union[Optional[str], DefaultArg]
groups: Union[Optional[List[UserGroup]], DefaultArg]
id: Union[Optional[str], DefaultArg]
meta: Union[Optional[UserMeta], DefaultArg]
name: Union[Optional[UserName], DefaultArg]
nick_name: Union[Optional[str], DefaultArg]
phone_numbers: Union[Optional[List[TypeAndValue]], DefaultArg]
photos: Union[Optional[List[UserPhoto]], DefaultArg]
profile_url: Union[Optional[str], DefaultArg]
roles: Union[Optional[List[TypeAndValue]], DefaultArg]
schemas: Union[Optional[List[str]], DefaultArg]
timezone: Union[Optional[str], DefaultArg]
title: Union[Optional[str], DefaultArg]
user_name: Union[Optional[str], DefaultArg]
unknown_fields: Dict[str, Any]
def __init__(
self,
*,
active: Union[Optional[bool], DefaultArg] = NotGiven,
addresses: Union[
Optional[List[Union[UserAddress, Dict[str, Any]]]], DefaultArg
] = NotGiven,
display_name: Union[Optional[str], DefaultArg] = NotGiven,
emails: Union[
Optional[List[Union[TypeAndValue, Dict[str, Any]]]], DefaultArg
] = NotGiven,
external_id: Union[Optional[str], DefaultArg] = NotGiven,
groups: Union[
Optional[List[Union[UserGroup, Dict[str, Any]]]], DefaultArg
] = NotGiven,
id: Union[Optional[str], DefaultArg] = NotGiven,
meta: Union[Optional[Union[UserMeta, Dict[str, Any]]], DefaultArg] = NotGiven,
name: Union[Optional[Union[UserName, Dict[str, Any]]], DefaultArg] = NotGiven,
nick_name: Union[Optional[str], DefaultArg] = NotGiven,
phone_numbers: Union[
Optional[List[Union[TypeAndValue, Dict[str, Any]]]], DefaultArg
] = NotGiven,
photos: Union[
Optional[List[Union[UserPhoto, Dict[str, Any]]]], DefaultArg
] = NotGiven,
profile_url: Union[Optional[str], DefaultArg] = NotGiven,
roles: Union[
Optional[List[Union[TypeAndValue, Dict[str, Any]]]], DefaultArg
] = NotGiven,
schemas: Union[Optional[List[str]], DefaultArg] = NotGiven,
timezone: Union[Optional[str], DefaultArg] = NotGiven,
title: Union[Optional[str], DefaultArg] = NotGiven,
user_name: Union[Optional[str], DefaultArg] = NotGiven,
**kwargs,
) -> None:
self.active = active
self.addresses = ( # type: ignore
[a if isinstance(a, UserAddress) else UserAddress(**a) for a in addresses]
if _is_iterable(addresses)
else addresses
)
self.display_name = display_name
self.emails = ( # type: ignore
[a if isinstance(a, TypeAndValue) else TypeAndValue(**a) for a in emails]
if _is_iterable(emails)
else emails
)
self.external_id = external_id
self.groups = ( # type: ignore
[a if isinstance(a, UserGroup) else UserGroup(**a) for a in groups]
if _is_iterable(groups)
else groups
)
self.id = id
self.meta = ( # type: ignore
UserMeta(**meta) if meta is not None and isinstance(meta, dict) else meta
)
self.name = ( # type: ignore
UserName(**name) if name is not None and isinstance(name, dict) else name
)
self.nick_name = nick_name
self.phone_numbers = ( # type: ignore
[
a if isinstance(a, TypeAndValue) else TypeAndValue(**a)
for a in phone_numbers
]
if _is_iterable(phone_numbers)
else phone_numbers
)
self.photos = ( # type: ignore
[a if isinstance(a, UserPhoto) else UserPhoto(**a) for a in photos]
if _is_iterable(photos)
else photos
)
self.profile_url = profile_url
self.roles = ( # type: ignore
[a if isinstance(a, TypeAndValue) else TypeAndValue(**a) for a in roles]
if _is_iterable(roles)
else roles
)
self.schemas = schemas
self.timezone = timezone
self.title = title
self.user_name = user_name
self.unknown_fields = kwargs
def to_dict(self):
return _to_dict_without_not_given(self)
def __repr__(self):
return f"<slack_sdk.scim.{self.__class__.__name__}: {self.to_dict()}>"
| 33.842975 | 86 | 0.627961 | 7,963 | 0.972283 | 0 | 0 | 0 | 0 | 0 | 0 | 175 | 0.021368 |
20387f22863064de341858a8ef9a26184e68af97 | 4,875 | py | Python | typhoon/core/glue.py | typhoon-data-org/typhoon-orchestrator | f24c4807b0e1ee38713ba1468db761119724dcf2 | [
"Apache-2.0"
] | 21 | 2021-04-10T20:57:49.000Z | 2022-03-24T06:45:30.000Z | typhoon/core/glue.py | typhoon-data-org/typhoon-orchestrator | f24c4807b0e1ee38713ba1468db761119724dcf2 | [
"Apache-2.0"
] | 7 | 2021-11-06T16:10:41.000Z | 2021-12-12T11:41:32.000Z | typhoon/core/glue.py | typhoon-data-org/typhoon-orchestrator | f24c4807b0e1ee38713ba1468db761119724dcf2 | [
"Apache-2.0"
] | 1 | 2022-03-08T21:22:33.000Z | 2022-03-08T21:22:33.000Z | """Contains code that stitches together different parts of the library. By containing most side effects here the rest
of the code can be more deterministic and testable.
This code should not be unit tested.
"""
import os
from pathlib import Path
from typing import Union, List, Tuple, Dict, Optional
import yaml
from pydantic import ValidationError
from typhoon.core.components import Component
from typhoon.core.dags import DAGDefinitionV2, add_yaml_constructors
from typhoon.core.settings import Settings
from typing_extensions import Literal
from typhoon.introspection.introspect_extensions import get_typhoon_extensions_info
def transpile_dag_and_store(dag: dict, output_folder_path: Union[str, Path], debug_mode: bool):
from typhoon.core.transpiler.dag_transpiler import DagFile
from typhoon.core.transpiler.task_transpiler import TasksFile
output_folder_path = Path(output_folder_path)
output_folder_path.mkdir(parents=True, exist_ok=True)
dag = DAGDefinitionV2.parse_obj(dag)
dag_code = DagFile(dag, debug_mode=debug_mode).render()
(output_folder_path / f'{dag.name}.py').write_text(dag_code)
tasks_code = TasksFile(dag.tasks).render()
(output_folder_path / 'tasks.py').write_text(tasks_code)
def load_dag_definitions(ignore_errors: bool = False) -> List[Tuple[DAGDefinitionV2, Path]]:
add_yaml_constructors()
dags = []
for dag_file in Settings.dags_directory.rglob('*.yml'):
try:
dag = DAGDefinitionV2.parse_obj(
yaml.load(dag_file.read_text(), yaml.FullLoader)
)
except ValidationError:
if ignore_errors:
continue
else:
raise
dags.append((dag, dag_file))
return dags
def load_dag_definition(dag_name: str, ignore_errors: bool = False) -> Optional[DAGDefinitionV2]:
dags = load_dag_definitions(ignore_errors)
matching_dags = [dag for dag, _ in dags if dag.name == dag_name]
assert len(matching_dags) <= 1, f'Found {len(matching_dags)} dags with name "{dag_name}"'
return matching_dags[0] if len(matching_dags) == 1 else None
def get_dag_errors() -> Dict[str, List[dict]]:
add_yaml_constructors()
result = {}
for dag_file in Settings.dags_directory.rglob('*.yml'):
try:
DAGDefinitionV2.parse_obj(
yaml.load(dag_file.read_text(), yaml.FullLoader)
)
except ValidationError as e:
result[dag_file.name.split('.yml')[0]] = e.errors()
return result
def get_dags_contents(dags_directory: Union[str, Path]) -> List[str]:
dags_directory = Path(dags_directory)
dags = []
for dag_file in dags_directory.rglob('*.yml'):
dags.append(dag_file.read_text())
return dags
def get_dag_filenames():
dag_files = filter(lambda x: x.endswith('.yml'), os.listdir(str(Settings.dags_directory)))
return dag_files
def load_component(
component_name: str,
ignore_errors: bool = False,
kind: Literal['typhoon', 'custom', 'all'] = 'all',
) -> Optional[Component]:
assert kind in ['typhoon', 'custom', 'all'], f'Kind should be one of ["typhoon", "custom", "all"]. Found: {kind}'
components = load_components(ignore_errors, kind)
matching_components = [(component, code) for component, code in components if component.name == component_name]
assert len(matching_components) <= 1, f'Found {len(matching_components)} components with name "{component_name}"'
return matching_components[0][0] if len(matching_components) == 1 else None
def load_components(
ignore_errors: bool = False,
kind: Literal['typhoon', 'custom', 'all'] = 'all',
) -> List[Tuple[Component, str]]:
if kind == 'all':
return [(c, cs) for c, cs in load_component_definitions(ignore_errors, kind='typhoon')] + \
[(c, cs) for c, cs in load_component_definitions(ignore_errors, kind='custom')]
else:
return [(c, cs) for c, cs in load_component_definitions(ignore_errors, kind)]
def load_component_definitions(
ignore_errors,
kind=Literal['typhoon', 'custom'],
) -> List[Tuple[Component, str]]:
add_yaml_constructors()
if kind == 'custom':
component_files = list(Settings.components_directory.rglob('*.yml'))
else:
component_files = [Path(x) for x in get_typhoon_extensions_info()['components'].values()]
components = []
for component_file in component_files:
if ignore_errors:
try:
comp = Component.parse_obj(yaml.load(component_file.read_text(), yaml.FullLoader))
except ValidationError:
continue
else:
comp = Component.parse_obj(yaml.load(component_file.read_text(), yaml.FullLoader))
components.append((comp, component_file.read_text()))
return components
| 37.21374 | 117 | 0.686974 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 611 | 0.125333 |
203a500dc2fb2afb73319d6d75b7c082ac926d2a | 755 | py | Python | pqu/Check/check.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 14 | 2019-08-29T23:46:24.000Z | 2022-03-21T10:16:25.000Z | pqu/Check/check.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 1 | 2020-08-04T16:14:45.000Z | 2021-12-01T01:54:34.000Z | pqu/Check/check.py | brown170/fudge | 4f818b0e0b0de52bc127dd77285b20ce3568c97a | [
"BSD-3-Clause"
] | 2 | 2022-03-03T22:41:41.000Z | 2022-03-03T22:54:43.000Z | # <<BEGIN-copyright>>
# Copyright 2021, Lawrence Livermore National Security, LLC.
# See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
# <<END-copyright>>
import sys
import glob, os, shutil, filecmp
PYTHON = sys.executable
files = sorted( glob.glob( 't*.py' ) )
if( os.path.exists( 'Out' ) ) : shutil.rmtree( 'Out' )
os.mkdir( 'Out' )
for file in files :
base = file[:-3]
status = os.system( '%s %s > Out/%s.out' % ( PYTHON, file, base ) )
if( status ) : print( '=========== %s ===========' % file )
outs = sorted( glob.glob( 'Out/t*.out' ) )
for out in outs :
file = os.path.basename( out )
if( not( filecmp.cmp( os.path.join( 'Out.checked', file ), out ) ) ) : print( 'ERROR: %s' % out )
| 27.962963 | 101 | 0.598675 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 294 | 0.389404 |
203abe1bfbdd57f76bc43df415fa9c12d5619ca5 | 315 | py | Python | config.py | nicfro/brownian_motion | 03db2d9a2527b6ee9c6004960cc71da302a1fe2e | [
"MIT"
] | null | null | null | config.py | nicfro/brownian_motion | 03db2d9a2527b6ee9c6004960cc71da302a1fe2e | [
"MIT"
] | null | null | null | config.py | nicfro/brownian_motion | 03db2d9a2527b6ee9c6004960cc71da302a1fe2e | [
"MIT"
] | null | null | null | settings = {"velocity_min": 1,
"velocity_max": 3,
"x_boundary": 800,
"y_boundary": 800,
"small_particle_radius": 5,
"big_particle_radius": 10,
"number_of_particles": 500,
"density_min": 2,
"density_max": 20
} | 31.5 | 39 | 0.47619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.453968 |
203bff717d5abec1d2372f4548aa55e0fe774c45 | 2,791 | py | Python | hooks_plugins/hooks_mail_plugin/mail_plugin.py | crawlino/crawlino-plugins | 482d2e42a43e15a65ea4ab5ee274da887214a1b7 | [
"BSD-3-Clause"
] | 2 | 2018-03-16T10:07:02.000Z | 2020-10-20T22:41:36.000Z | crawlino/plugins/hooks_plugins/hooks_mail_plugin/mail_plugin.py | BBVA/crawlino | 685f57e6b3e9356484ead2681bb178f651d2f371 | [
"Apache-2.0"
] | null | null | null | crawlino/plugins/hooks_plugins/hooks_mail_plugin/mail_plugin.py | BBVA/crawlino | 685f57e6b3e9356484ead2681bb178f651d2f371 | [
"Apache-2.0"
] | 1 | 2020-10-20T22:41:37.000Z | 2020-10-20T22:41:37.000Z | import logging
import smtplib
from crawlino import hook_plugin, PluginReturnedData, CrawlinoValueError
log = logging.getLogger("crawlino-plugin")
@hook_plugin
def hook_mail(prev_step: PluginReturnedData, **kwargs):
log.debug("Hooks Module :: mail plugin")
data = prev_step.to_dict
# -------------------------------------------------------------------------
# Check the source of data. If data comes from step: expressions, check if
# there're results. If not have results -> don't display nothing
#
# Data from STEP_EXTRACTORS have property: 'extractor_results'
# -------------------------------------------------------------------------
if "extractor_results" in data:
if not data["extractor_results"]:
# No data to display
return
else:
data = data["extractor_results"]
else:
data = [data]
config = kwargs["config"]
# -------------------------------------------------------------------------
# Get mandatory data
# -------------------------------------------------------------------------
mail_from: str = config["from"]
mail_to: str = config["to"]
mail_body_field: str = config["bodyField"]
mail_subject_field: str = config.get("subject", mail_body_field[:100])
# -------------------------------------------------------------------------
# Server settings
# -------------------------------------------------------------------------
server_config: dict = config["server"]
if mail_from.endswith("@gmail.com"):
server_smtp = "smtp.gmail.com"
server_port = 587
server_tls = True
else:
server_smtp = server_config["smtp"]
server_port = int(server_config.get("port", 587))
server_tls = bool(server_config.get("tls", True))
server_user = server_config["user"]
server_password = server_config["password"]
server = smtplib.SMTP(server_smtp, server_port, timeout=2)
if server_tls:
server.starttls()
log.info("Start sending mail")
try:
server.login(server_user, server_password)
for d in data:
_data = d.to_dict
try:
message_body = f"Subject:{mail_subject_field}\n\n" \
f"{_data[mail_body_field]}"
except KeyError as e:
log.error(f"Error while try to sending mail. bodyField "
f"'{mail_body_field}' not found in response of "
f"previous step")
continue
server.sendmail(mail_from, mail_to, message_body)
server.quit()
log.info("Mail sent")
except Exception as e:
log.error(f"Error while try to sending mail: '{e}'")
| 33.22619 | 79 | 0.506628 | 0 | 0 | 0 | 0 | 2,638 | 0.945181 | 0 | 0 | 1,162 | 0.416338 |
203d31bf82b5024f824101b94407bc8945c5c669 | 142 | py | Python | app/model/message.py | godraadam/privy-router | 05628e8744cbe26a19cf7c45b119eacb675402d4 | [
"MIT"
] | null | null | null | app/model/message.py | godraadam/privy-router | 05628e8744cbe26a19cf7c45b119eacb675402d4 | [
"MIT"
] | null | null | null | app/model/message.py | godraadam/privy-router | 05628e8744cbe26a19cf7c45b119eacb675402d4 | [
"MIT"
] | null | null | null |
from pydantic import BaseModel
class PrivyMessage(BaseModel):
recipient_alias: str # alias of recipient
message: str # the message
| 17.75 | 45 | 0.753521 | 107 | 0.753521 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.232394 |
203d83fb18476062a54ca0ada097b5f1e4509232 | 2,825 | py | Python | src/fedActionFromTargetRate.py | jrrpanix/ML9 | e501bb530f82620146b2d7703c3b561b49067dd7 | [
"MIT"
] | null | null | null | src/fedActionFromTargetRate.py | jrrpanix/ML9 | e501bb530f82620146b2d7703c3b561b49067dd7 | [
"MIT"
] | null | null | null | src/fedActionFromTargetRate.py | jrrpanix/ML9 | e501bb530f82620146b2d7703c3b561b49067dd7 | [
"MIT"
] | null | null | null | import os
import csv
import glob
import numpy as np
import pandas as pd
import nltk
import string
import re
from numpy import genfromtxt
from nltk import *
from nltk.corpus.reader.plaintext import PlaintextCorpusReader
from nltk import word_tokenize
from nltk.util import ngrams
from collections import Counter
def statementDate(elem):
return elem[0]
def createRateMoves(pathToStatements,pathToMinutes,pathToCSV):
actionDF = pd.DataFrame()
targetRateHistDF = pd.DataFrame()
dailyRates = pd.read_csv(pathToCSV,dtype=object)
priorRate = 0
actionFlag = 0
previousDayValue = 0
direction = 'unchg'
for index,row in dailyRates.iterrows():
if(row['date'] > '20170101' and index < (len(dailyRates)-1)):
row['DFEDTAR'] = dailyRates.iloc[index+1,1]
#row['DFEDTAR'] = dailyRates[dailyRates['DFEDTAR']][index+1]
chg = float(row['DFEDTAR']) - float(priorRate)
if(chg>0):
direction='raise'
actionFlag = 1
elif(chg<0):
direction='lower'
actionFlag=1
else:
direction='unchg'
actionFlag=0
targetRateHistDF = targetRateHistDF.append({"Date":row['date'],"MinutesRelease":"","PriorRate": priorRate,"Rate":row['DFEDTAR'],"Direction":direction,"ActionFlag":int(actionFlag),"Change":chg},ignore_index=True)
priorRate = row['DFEDTAR']
for file in list(glob.glob(pathToStatements+'*.txt')):
actionDF = actionDF.append({"Date":str(file).split('/')[3].split('.')[0]},ignore_index=True)
targetRateHistDF = targetRateHistDF[['Date','MinutesRelease','PriorRate','Rate','Direction','ActionFlag','Change']]
#print(actionDF.loc[actionDF["Date"] == "20010103","Rate"])
actionDF = actionDF.sort_values(by=['Date'])
actionDF.index = pd.RangeIndex(len(actionDF.index))
targetRateHistDF = targetRateHistDF[targetRateHistDF['Date'].isin(actionDF['Date'].tolist())]
targetRateHistDF.index = pd.RangeIndex(len(targetRateHistDF.index))
# print(targetRateHistDF)
dateArray = []
for file in list(glob.glob(pathToMinutes+'*.txt')):
fileString = str(file).split('/')[3].split('.')[0].split('_')
dateArray.append([fileString[0],fileString[3]])
dateArray.sort(key=statementDate,reverse=True)
# print(dateArray)
for i in range(len(targetRateHistDF)):
meetingDate = targetRateHistDF.iloc[i,0]
for j in range(len(dateArray)):
if(meetingDate>dateArray[j][0]):
targetRateHistDF.iloc[i,1] = dateArray[j-1][1]
break
targetRateHistDF.iloc[0,1] = '20000323'
# print(targetRateHistDF)
targetRateHistDF.to_csv('../text/history/RatesDecision.csv',header=False, index=False, sep=',')
def main():
path = '../text/history/dailyRateHistory.csv'
pathTwo = '../text/statements/'
pathThree = '../text/minutes/'
createRateMoves(pathTwo,pathThree,path)
if __name__ == '__main__':
main()
| 34.45122 | 215 | 0.699115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 609 | 0.215575 |
203e507cc3f602e592e2839827155a78ab5500d6 | 47 | py | Python | wk/cv/utils/__init__.py | Peiiii/wk | dcf948c1cb36c1eec9b2a554ea0296c6d3dbbdc4 | [
"MIT"
] | null | null | null | wk/cv/utils/__init__.py | Peiiii/wk | dcf948c1cb36c1eec9b2a554ea0296c6d3dbbdc4 | [
"MIT"
] | null | null | null | wk/cv/utils/__init__.py | Peiiii/wk | dcf948c1cb36c1eec9b2a554ea0296c6d3dbbdc4 | [
"MIT"
] | null | null | null | from .imutils import *
from .boxutils import *
| 15.666667 | 23 | 0.744681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
203f5085c6e6bdb4474b040f35e294865c742113 | 4,256 | py | Python | PDF-Tools/main.py | Aayush-hub/Amazing-Python-Scripts | 5488454b16fa969d32ad7a56618e62e64291c052 | [
"MIT"
] | 3 | 2021-01-14T13:54:22.000Z | 2021-11-15T11:26:51.000Z | PDF-Tools/main.py | Aayush-hub/Amazing-Python-Scripts | 5488454b16fa969d32ad7a56618e62e64291c052 | [
"MIT"
] | 1 | 2021-02-24T02:06:21.000Z | 2021-02-24T02:06:21.000Z | PDF-Tools/main.py | Aayush-hub/Amazing-Python-Scripts | 5488454b16fa969d32ad7a56618e62e64291c052 | [
"MIT"
] | 1 | 2021-02-22T18:47:39.000Z | 2021-02-22T18:47:39.000Z | import os
from PyPDF2 import PdfFileReader, PdfFileWriter
def merge_pdfs():
''' Merge multiple PDF's into one combined PDF '''
input_paths = input(r"Enter comma separated list of paths to the PDFs ")
paths = input_paths.split(',')
pdf_file_writer = PdfFileWriter()
# Pick each pdf one by one and combined to one single pdf
for path in paths:
pdf_file_reader = PdfFileReader(path)
for page in range(pdf_file_reader.getNumPages()):
pdf_file_writer.addPage(pdf_file_reader.getPage(page))
# Output the merged pdf
with open('merged.pdf', 'wb') as out:
pdf_file_writer.write(out)
def split_pdfs():
'''Split PDF to multiple PDF's of 1 Page each'''
input_pdf = input(r"Enter I/P PDF path ")
pdf = PdfFileReader(input_pdf)
for page in range(pdf.getNumPages()):
pdf_file_writer = PdfFileWriter()
pdf_file_writer.addPage(pdf.getPage(page))
# Append page num to each new pdf
output = 'split{page}.pdf'.format(page=page)
with open(output, 'wb') as output_pdf:
pdf_file_writer.write(output_pdf)
def add_watermark():
''' Adds watermark to given PDF.
Note: The watermark PDF should be a image with transparent background '''
input_pdf = input(r"Enter I/P PDF path ")
watermark = input(r"Enter watermark PDF path ")
watermark_obj = PdfFileReader(watermark)
watermark_page = watermark_obj.getPage(0)
pdf_file_reader = PdfFileReader(input_pdf)
pdf_file_writer = PdfFileWriter()
# Watermark all the pages
for page_num in range(pdf_file_reader.getNumPages()):
page = pdf_file_reader.getPage(page_num)
page.mergePage(watermark_page)
pdf_file_writer.addPage(page)
with open('watermarked-pdf.pdf', 'wb') as out:
pdf_file_writer.write(out)
def add_encryption():
''' Encrypts the given PDF with the provided password '''
input_pdf = input(r"Enter I/P PDF path ")
password = input(r"Enter password ")
pdf_file_writer = PdfFileWriter()
pdf_file_reader = PdfFileReader(input_pdf)
for page_num in range(pdf_file_reader.getNumPages()):
pdf_file_writer.addPage(pdf_file_reader.getPage(page_num))
# Encrypt using the password
pdf_file_writer.encrypt(user_pwd=password, owner_pwd=None,
use_128bit=True)
with open('encrypted.pdf', 'wb') as fh:
pdf_file_writer.write(fh)
def rotate_pages():
'''Rotate the given PDF left or right by 90 degrees.'''
input_pdf = input(r"Enter I/P PDF path ")
pdf_file_writer = PdfFileWriter()
pdf_file_reader = PdfFileReader(input_pdf)
orient = input("Specify orientation: clockwise or counterclockwise ")
# Rotate each page one by one accordingly
if(orient == "clockwise"):
for page_num in range(pdf_file_reader.getNumPages()):
rot_page = pdf_file_reader.getPage(page_num).rotateClockwise(90)
pdf_file_writer.addPage(rot_page)
elif(orient == "counterclockwise"):
for page_num in range(pdf_file_reader.getNumPages()):
rot_page = pdf_file_reader.getPage(
page_num).rotateCounterClockwise(90)
pdf_file_writer.addPage(rot_page)
with open('rotated.pdf', 'wb') as fh:
pdf_file_writer.write(fh)
def menu():
'''Menu for the various functionalities offered'''
# Change Current working directory to where the script is located.
# This is done to enable use of relative paths from base folder.
abspath = os.path.abspath(__file__)
dname = os.path.dirname(abspath)
os.chdir(dname)
print("\n Welcome to PDF-Tools \n Store the PDF's in the folder of the script \n Choose from the given options\n")
print(" 1.Merge PDF\n 2.Split PDF\n 3.Rotate PDF\n 4.Add Watermark\n 5.Encrypt PDF\n")
# Call the necessary function according to the choice provided by the user
z = int(input())
if(z == 1):
merge_pdfs()
elif(z == 2):
split_pdfs()
elif(z == 3):
rotate_pages()
elif(z == 4):
add_watermark()
elif(z == 5):
add_encryption()
else:
print("Please select valid choice\n")
menu()
if __name__ == '__main__':
menu()
| 32.48855 | 118 | 0.666588 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,376 | 0.323308 |
20405da01c0697c18d446756c010ffeb88012a42 | 6,060 | py | Python | tech.py | ajul/galciv3wikiscripts | 337fc27286e5d3117c83f3171e32be8941e9af4a | [
"MIT"
] | null | null | null | tech.py | ajul/galciv3wikiscripts | 337fc27286e5d3117c83f3171e32be8941e9af4a | [
"MIT"
] | 1 | 2016-03-06T11:57:38.000Z | 2016-04-02T02:13:41.000Z | tech.py | ajul/galciv3wikiscripts | 337fc27286e5d3117c83f3171e32be8941e9af4a | [
"MIT"
] | null | null | null | import xml.etree.ElementTree as ET
import re
import os
import loc
datadir = 'D:/Steam/steamapps/common/Galactic Civilizations III/data'
gamedatadir = os.path.join(datadir, 'Game')
def processTechTree(techList, filename):
filebase, _ = os.path.splitext(filename)
techSpecializationList = ET.parse(os.path.join(gamedatadir,
filename.replace('Defs', 'SpecializationDefs'))).getroot()
locSources = ['%sText.xml' % filebase]
# item : [leads to, ...]
prereqs = {}
# specialization : techs
specializations = {}
# techs: unlocks
unlocks = {}
# returns the tech itself if not specialization, otherwise returns the corresponding specialization
def getItem(tech):
specialization = tech.findtext('Specialization')
if specialization is None:
return tech
else:
return techSpecializationList.find("TechSpecialization[InternalName='%s']" % specialization)
def getTechByGenericName(genericName):
return techList.find("Tech[GenericName='%s']" % genericName)
# gets the tech or specialization corresponding to a given name
def getItemByGenericName(genericName):
tech = getTechByGenericName(genericName)
if tech is None: return None
return getItem(tech)
# compute specializations
for tech in techList.findall('Tech'):
if tech.findtext('RootNode') == 'true':
rootTech = tech
item = getItem(tech)
if item != tech:
# is a specialization
if item not in specializations:
specializations[item] = []
specializations[item].append(tech)
prereqs[item] = []
unlocks[tech] = []
# compute prereqs
for tech in techList.findall('Tech'):
item = getItem(tech)
for prereqTechName in tech.findall('Prerequ/Techs/Option'):
prereqItem = getItemByGenericName(prereqTechName.text)
if prereqItem is None:
print('Tree has no prereq tech %s' % prereqTechName.text)
continue
if item not in prereqs[prereqItem]:
prereqs[prereqItem].append(item)
# compute unlocks
unlockSources = [
('Improvement', 'Improvement'),
('InvasionStyle', 'Invasion Tactic'),
('PlanetaryProject', 'Planetary Project'),
('ShipComponent', 'Ship Component'),
('SpecialShipComponent', 'Ship Component'),
('ShipHullStat', 'Hull Size'),
('StarbaseModule', 'Starbase Module'),
('UPResolution', 'United Planets Resolution'),
]
for unlockType, unlockTypeName in unlockSources:
unlockRoot = ET.parse(os.path.join(gamedatadir, '%sDefs.xml' % unlockType))
unlockLocSources = ['%sText.xml' % unlockType]
for unlockable in unlockRoot.findall('*'):
unlockName = loc.english(unlockable.findtext('DisplayName'), unlockLocSources) or '???'
unlockText = '%s: %s' % (unlockTypeName, unlockName)
for techName in unlockable.findall('Prerequ/Techs/Option'):
tech = getTechByGenericName(techName.text)
if tech is None:
# print('Tree has no unlock tech %s' % techName.text)
continue
unlocks[tech].append(unlockText)
# TODO: planet traits
# output
def techInfo(tech):
info = {}
info['Name'] = loc.english(tech.findtext('DisplayName'), locSources)
age = tech.findtext('Prerequ/TechAge/Option') or ''
info['Age'] = age.replace('AgeOf', '')
info['ResearchCost'] = int(tech.findtext('ResearchCost'))
statInfo = []
for unlock in unlocks[tech]:
statInfo.append(unlock)
for stats in tech.findall('Stats'):
effectType = loc.english('STATNAME_%s' % stats.findtext('EffectType'), ['StatText.xml'])
effectType = re.sub('\[.*\]\s*', '', effectType)
targetType = stats.findtext('Target/TargetType')
if stats.findtext('BonusType') == 'Flat':
value = '%0.1f' % float(stats.findtext('Value'))
elif stats.findtext('BonusType') == 'Multiplier':
value = '%+d%%' % (float(stats.findtext('Value')) * 100.0)
statInfo.append('%s %s %s' % (targetType, effectType, value))
return info, statInfo
def wikiOutput(item, depth = 0):
result = '|-\n'
result += '| ' + '>' * depth
if item.tag == 'Tech':
info, statInfo = techInfo(item)
result += ' %(Name)s || %(Age)s || %(ResearchCost)s \n' % info
result += '| \n'
for bonus in statInfo:
result += '* %s \n' % bonus
else:
info, _ = techInfo(specializations[item][0])
result += ' %(Name)s || %(Age)s || %(ResearchCost)s \n' % info
result += '| \n'
for tech in specializations[item]:
_, statInfo = techInfo(tech)
result += '* Specialization:\n'
for bonus in statInfo:
result += ':* %s \n' % bonus
for item in prereqs[item]:
result += wikiOutput(item, depth + 1)
return result
result = ''
for categoryTech in prereqs[rootTech]:
result += '== %s ==\n' % loc.english(categoryTech.findtext('DisplayName'), locSources)
result += '{|class = "wikitable"\n'
result += '! Name !! Age !! Research cost !! Effects \n'
result += wikiOutput(categoryTech)
result += '|}\n'
return result
for filename in os.listdir(gamedatadir):
techList = ET.parse(os.path.join(gamedatadir, filename)).getroot()
if techList.tag != 'TechList': continue
result = processTechTree(techList, filename)
filebase, _ = os.path.splitext(filename)
outfile = open(os.path.join('out', '%s.txt' % filebase), 'w')
outfile.write(result)
outfile.close()
| 39.350649 | 109 | 0.576733 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,520 | 0.250825 |
2040b731cc0d7545c2cc99188f7bbf870dcb905d | 13,052 | py | Python | matching/CopingWithOutlier.py | HeCraneChen/3D-Crowd-Pose-Estimation-Based-on-MVG | 0b1fc8aca27520976516012f6594a513cb1fb2f3 | [
"MIT"
] | 35 | 2020-07-14T22:42:33.000Z | 2022-01-07T06:32:26.000Z | matching/CopingWithOutlier.py | HeCraneChen/3D-Crowd-Pose-Estimation-Based-on-MVG | 0b1fc8aca27520976516012f6594a513cb1fb2f3 | [
"MIT"
] | 5 | 2020-09-14T03:19:27.000Z | 2021-05-28T19:45:29.000Z | matching/CopingWithOutlier.py | HeCraneChen/3D-Crowd-Pose-Estimation-Based-on-MVG | 0b1fc8aca27520976516012f6594a513cb1fb2f3 | [
"MIT"
] | 6 | 2020-09-15T02:28:36.000Z | 2021-07-16T06:03:36.000Z | import sys
import json
import os
import numpy as np
from scipy.optimize import linear_sum_assignment
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
import cv2
import pylab as pl
from numpy import linalg as LA
import random
import math
from mpl_toolkits.mplot3d import axes3d, Axes3D
from pose_optimize.multiview_geo import get_distance
from all_dataset_para import Get_P_from_dataset
from AccomodateDataset import accomoDataset
def CopeOutlier(pts3D, median_bones, ratio_thresh):
"""cope with outliers by two constraints:
1) check the cluster, vote the points that are ridiculously far away as outliers
2) check the boneL constraint
median_bones: [nose_eye, eye_ear, LRshoulder, upperArm, lowerArm, LRhip, hip_knee, knee_ankle, ankle_heel, heel_bigtoe, heel_smalltoe, ear_ear, wrist_wrist, ankle_ankle, heel_heel, toe_toe]
-Args:
pts3D: 4 by n numpy array, rows 0,1,2,3 are x,y,z, row3 is always 1
median_bones: a list of bone lengths
-Returns:
pts3D_inlier 4 by m numpy array, rows 0,1,2,3 are x,y,z, row3 is always 1
"""
pts3D_new = pts3D
people_num = int(pts3D.shape[1]/23)
Dist = {}
for counter1 in range (people_num):
adjust_result = []
start = counter1 * 23
end = start + 23
person = pts3D[:,start:end]
person_list = person.T.reshape((1,-1))[0]
for counter2 in range(23):
Dist[counter2] = getdistance(person[:,counter2], person)
sorted_Dict = sorted(Dist.items(), key=lambda item: item[1])
for counter3 in range(1,len(sorted_Dict)):
distance = sorted_Dict[counter3][1]
ind_i = sorted_Dict[counter3][0]
ratio = sorted_Dict[counter3][1] / sorted_Dict[counter3 - 1][1]
if ratio > ratio_thresh:
x = person[0,ind_i]
y = person[1,ind_i]
z = person[2,ind_i]
outlier_coor = [x, y, z]
outlier_index = sorted_Dict[counter3][0]
inlier_from_outlier = adjustbone(outlier_index, outlier_coor, median_bones, person_list)
adjust_result.append((outlier_index,inlier_from_outlier))
person_new = person # 4 by 23 numpy array
for counter4 in range (len(adjust_result)):
temp = np.asarray(adjust_result[counter4][1]).reshape(3,1)
test = person_new[:,0]
person_new[:,adjust_result[counter4][0]] = np.concatenate((temp, np.array([[1]])),axis = 0).reshape(4)
if counter1 == 0:
pts3D_new = person_new
else:
pts3D_new = np.concatenate((pts3D_new, person_new), axis = 1)
return pts3D_new
def getdistance(pt, pts):
"""get the distance of one point w.r.t all points
-Args:
pt: 3 by 1 numpy array
pts: 3 by n numpy array
"""
d = 0
for counter in range (pts.shape[1]):
d = d + LA.norm(pts[:,counter] - pt)
return d
def adjustbone(outlier_index, outlier_coor, median_bones, person):
"""adjust the outlier, and convert it to inlier based on bone length
-Args:
outlier_index: int
outlier_coor: a list [x,y,z]
person: a list [x1,y1,z1,x2,y2,z2,...]
median_bones: [nose_eye, eye_ear, LRshoulder, upperArm, lowerArm, LRhip, hip_knee, knee_ankle, ankle_heel, heel_bigtoe, heel_smalltoe, ear_ear, wrist_wrist, ankle_ankle, heel_heel, toe_toe]
-Returns:
inlier_from_outlier: in the form of a list [x,y,z]
"""
nose = person[0:3] #linked to Leye and Reye
Leye = person[4:7] #linked to nose and Leaer
Reye = person[8:11] #linked to nose and Rear
Lear = person[12:15] #linked to Leye
Rear = person[16:19] #linked to Reye
Lshoulder = person[20:23] #linked to Rshoulder
Rshoulder = person[24:27] #linked to Lshoulder
Lelbow = person[28:31] #linked to Lshoulder and Lwrist
Relbow = person[32:35] #linked to Rshoulder and Rwrist
Lwrist = person[36:39] #linked to Lelbow
Rwrist = person[40:43] #linked to Relbow
Lhip = person[44:47] #linked to Lknee and Rhip
Rhip = person[48:51] #linked to Rknee and Lhip
Lknee = person[52:55] #linked to Lhip and Lankle
Rknee = person[56:59] #linked to Rhip and Rankle
Lankle = person[60:63] #linked to Lheel and Lknee
Rankle = person[64:67] #linked to Rheel and Rknee
Lbigtoe = person[68:71] #linked to Lheel
Lsmalltoe = person[72:75] #linked to Lheel
Lheel = person[76:79] #linked to Lbigtoe and Lankle
Rbigtoe = person[80:83] #linked to Rheel and
Rsmalltoe = person[84:87] #linked to Rheel
Rheel = person[88:91] #linked to Rbigtoe and Rankle
if outlier_index == 0: #nose, use left eye and right eye to adjust
normal_bone1 = median_bones[0]
normal_bone2 = median_bones[0]
inlier_from_outlier = CalcInlier_two(outlier_coor, Leye, Reye, normal_bone1, normal_bone2)
if outlier_index == 1: #left eye, use nose and left ear to adjust
normal_bone1 = median_bones[0]
normal_bone2 = median_bones[1]
inlier_from_outlier = CalcInlier_two(outlier_coor, nose, Lear, normal_bone1, normal_bone2)
if outlier_index == 2: #right eye, use nose and right ear to adjust
normal_bone1 = median_bones[0]
normal_bone2 = median_bones[1]
inlier_from_outlier = CalcInlier_two(outlier_coor, nose, Rear, normal_bone1, normal_bone2)
if outlier_index == 3: #Lear, use Leye and Rear to adjust
normal_bone1 = median_bones[1]
normal_bone2 = median_bones[11]
inlier_from_outlier = CalcInlier_two(outlier_coor, Leye, Rear, normal_bone1, normal_bone2)
if outlier_index == 4: #Rear, use Reye and Lear to adjust
normal_bone1 = median_bones[1]
normal_bone2 = median_bones[11]
inlier_from_outlier = CalcInlier_two(outlier_coor, Reye, Lear, normal_bone1, normal_bone2)
if outlier_index == 5: #Lshoulder, linked to Rshoulder and Lelbow
normal_bone1 = median_bones[2]
normal_bone2 = median_bones[3]
inlier_from_outlier = CalcInlier_two(outlier_coor, Rshoulder, Lelbow, normal_bone1, normal_bone2)
if outlier_index == 6: #Rshoulder, linked to Lshoulder and Relbow
normal_bone1 = median_bones[2]
normal_bone2 = median_bones[3]
inlier_from_outlier = CalcInlier_two(outlier_coor, Lshoulder, Relbow, normal_bone1, normal_bone2)
if outlier_index == 7: #Lelbow, linked to Lshoulder and Lwrist
normal_bone1 = median_bones[3]
normal_bone2 = median_bones[4]
inlier_from_outlier = CalcInlier_two(outlier_coor, Lshoulder, Lwrist, normal_bone1, normal_bone2)
if outlier_index == 8: #Relbow, linked to Rshoulder and Rwrist
normal_bone1 = median_bones[3]
normal_bone2 = median_bones[4]
inlier_from_outlier = CalcInlier_two(outlier_coor, Rshoulder, Rwrist, normal_bone1, normal_bone2)
if outlier_index == 9: #Lwrist, linked to Lelbow and Rwrist
normal_bone1 = median_bones[4]
normal_bone2 = median_bones[12]
inlier_from_outlier = CalcInlier_two(outlier_coor, Lelbow, Rwrist, normal_bone1, normal_bone2)
if outlier_index == 10: #Rwrist, linked to Relbow and Lwrist
normal_bone1 = median_bones[4]
normal_bone2 = median_bones[12]
inlier_from_outlier = CalcInlier_two(outlier_coor, Relbow, Lwrist, normal_bone1, normal_bone2)
if outlier_index == 11: #Lhip, linked to Lknee and Rhip
normal_bone1 = median_bones[5]
normal_bone2 = median_bones[6]
inlier_from_outlier = CalcInlier_two(outlier_coor, Rhip, Lknee, normal_bone1, normal_bone2)
if outlier_index == 12: #Rhip, linked to Rknee and Lhip
normal_bone1 = median_bones[5]
normal_bone2 = median_bones[6]
inlier_from_outlier = CalcInlier_two(outlier_coor, Lhip, Rknee, normal_bone1, normal_bone2)
if outlier_index == 13: #Lknee, linked to Lhip and Lankle
normal_bone1 = median_bones[6]
normal_bone2 = median_bones[7]
inlier_from_outlier = CalcInlier_two(outlier_coor, Lhip, Lankle, normal_bone1, normal_bone2)
if outlier_index == 14: #Rknee, linked to Rhip and Rankle
normal_bone1 = median_bones[6]
normal_bone2 = median_bones[7]
inlier_from_outlier = CalcInlier_two(outlier_coor, Rhip, Rankle, normal_bone1, normal_bone2)
if outlier_index == 15: #Lankle, linked to Lheel and Lknee
normal_bone1 = median_bones[8]
normal_bone2 = median_bones[7]
inlier_from_outlier = CalcInlier_two(outlier_coor, Lheel, Lknee, normal_bone1, normal_bone2)
if outlier_index == 16: #Rankle, linked to Rheel and Rknee
normal_bone1 = median_bones[8]
normal_bone2 = median_bones[7]
inlier_from_outlier = CalcInlier_two(outlier_coor, Rheel, Rknee, normal_bone1, normal_bone2)
if outlier_index == 17: #Lbigtoe, linked to Lheel and stride
normal_bone1 = median_bones[9]
stride = (median_bones[13] + median_bones[14] + median_bones[15]) / 3
normal_bone2 = stride
ave_Rfoot = ((np.asarray(Rheel) + np.asarray(Rbigtoe) + np.asarray(Rsmalltoe) + np.asarray(Rankle))/4).tolist()
inlier_from_outlier = CalcInlier_two(outlier_coor, Lheel, ave_Rfoot, normal_bone1, normal_bone2)
if outlier_index == 18: #Lsmalltoe, linked to Lheel and stride
normal_bone1 = median_bones[10]
stride = (median_bones[13] + median_bones[14] + median_bones[15]) / 3
normal_bone2 = stride
ave_Rfoot = ((np.asarray(Rheel) + np.asarray(Rbigtoe) + np.asarray(Rsmalltoe) + np.asarray(Rankle))/4).tolist()
inlier_from_outlier = CalcInlier_two(outlier_coor, Lheel, ave_Rfoot, normal_bone1, normal_bone2)
if outlier_index == 19: #Lheel, linked to Lbigtoe and Lankle
normal_bone1 = median_bones[9]
normal_bone2 = median_bones[8]
inlier_from_outlier = CalcInlier_two(outlier_coor, Lbigtoe, Lankle, normal_bone1, normal_bone2)
if outlier_index == 20: #Rbigtoe, linked to Rheel and stride
normal_bone1 = median_bones[9]
stride = (median_bones[13] + median_bones[14] + median_bones[15]) / 3
normal_bone2 = stride
ave_Lfoot = ((np.asarray(Lheel) + np.asarray(Lbigtoe) + np.asarray(Lsmalltoe) + np.asarray(Lankle))/4).tolist()
inlier_from_outlier = CalcInlier_two(outlier_coor, Rheel, ave_Lfoot, normal_bone1, normal_bone2)
if outlier_index == 21: #Rsmalltoe, linked to Rheel
normal_bone1 = median_bones[10]
stride = (median_bones[13] + median_bones[14] + median_bones[15]) / 3
normal_bone2 = stride
ave_Lfoot = ((np.asarray(Lheel) + np.asarray(Lbigtoe) + np.asarray(Lsmalltoe) + np.asarray(Lankle))/4).tolist()
inlier_from_outlier = CalcInlier_two(outlier_coor, Rheel, ave_Lfoot, normal_bone1, normal_bone2)
if outlier_index == 22: #Rheel, linked to Rbigtoe and Rankle
normal_bone1 = median_bones[9]
normal_bone2 = median_bones[8]
inlier_from_outlier = CalcInlier_two(outlier_coor, Rbigtoe, Rankle, normal_bone1, normal_bone2)
return inlier_from_outlier
def CalcInlier_one(outlier_coor, connect1,normal_bone1):
"""calculate the coor of inlier based on one connected point
-Args:
outlier_coor, connect1: a list in the form of [x,y,z]
normal_bone1: constants
-Returns:
inlier_from_outlier: a list in the form of [x,y,z]
"""
minial_dis = 0.5
if LA.norm(np.asarray(connect1) - np.asarray(outlier_coor)) < minial_dis :
return outlier_coor
p1 = normal_bone1 / LA.norm(np.asarray(connect1) - np.asarray(outlier_coor))
inlier_from_outlier = (np.asarray(connect1) * (1 - p1) + np.asarray(outlier_coor) * p1).tolist()
return inlier_from_outlier
def CalcInlier_two(outlier_coor, connect1, connect2, normal_bone1, normal_bone2):
"""calculate the coor of inlier based on two connected points
-Args:
outlier_coor, connect1, connect2: a list in the form of [x,y,z]
normal_bone1, normal_bone2: constants
-Returns:
inlier_from_outlier: a list in the form of [x,y,z]
"""
minial_dis = 0.5
if LA.norm(np.asarray(connect1) - np.asarray(outlier_coor)) < minial_dis \
or LA.norm(np.asarray(connect2) - np.asarray(outlier_coor)) < minial_dis :
return outlier_coor
p1 = normal_bone1 / LA.norm(np.asarray(connect1) - np.asarray(outlier_coor))
p2 = normal_bone2 / LA.norm(np.asarray(connect2) - np.asarray(outlier_coor))
inlier_from_outlier1 = np.asarray(connect1) * (1 - p1) + np.asarray(outlier_coor) * p1
inlier_from_outlier2 = np.asarray(connect2) * (1 - p2) + np.asarray(outlier_coor) * p2
inlier_from_outlier = (0.5 * (inlier_from_outlier1 + inlier_from_outlier2)).tolist()
return inlier_from_outlier | 47.461818 | 196 | 0.674686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,065 | 0.23483 |
20415742b9197ff7961e1d8e40fda991f5be725a | 4,887 | py | Python | tests/test_workspace.py | rossumai/rossumctl | 647c2ba9c2b7eb44759b3857b93276368e3d14c8 | [
"MIT"
] | null | null | null | tests/test_workspace.py | rossumai/rossumctl | 647c2ba9c2b7eb44759b3857b93276368e3d14c8 | [
"MIT"
] | 10 | 2020-08-24T09:20:38.000Z | 2020-09-29T08:03:57.000Z | tests/test_workspace.py | rossumai/rossumctl | 647c2ba9c2b7eb44759b3857b93276368e3d14c8 | [
"MIT"
] | null | null | null | import re
from functools import partial
from traceback import print_tb
import pytest
from more_itertools import ilen
from rossum.workspace import create_command, list_command, delete_command, change_command
from tests.conftest import (
TOKEN,
match_uploaded_json,
ORGANIZATIONS_URL,
WORKSPACES_URL,
DOCUMENTS_URL,
QUEUES_URL,
ANNOTATIONS_URL,
)
ORGANIZATION_ID = "1"
ORGANIZATION_URL = f"{ORGANIZATIONS_URL}/{ORGANIZATION_ID}"
@pytest.mark.usefixtures("mock_login_request", "mock_organization_urls", "rossum_credentials")
class TestCreate:
def test_success(self, requests_mock, cli_runner):
name = "TestName"
new_id = "2"
requests_mock.post(
WORKSPACES_URL,
additional_matcher=partial(
match_uploaded_json, {"name": name, "organization": ORGANIZATION_URL}
),
request_headers={"Authorization": f"Token {TOKEN}"},
status_code=201,
json={"id": new_id},
)
result = cli_runner.invoke(create_command, [name])
assert not result.exit_code, print_tb(result.exc_info[2])
assert f"{new_id}\n" == result.output
@pytest.mark.usefixtures("mock_login_request", "rossum_credentials")
class TestList:
def test_success(self, requests_mock, cli_runner):
workspace_id = 1
name = "test@example.com"
queue_id = 1
queue_url = f"{QUEUES_URL}/{queue_id}"
requests_mock.get(
WORKSPACES_URL,
json={
"pagination": {"total": 1, "next": None},
"results": [{"id": workspace_id, "queues": [queue_url], "name": name}],
},
)
requests_mock.get(
QUEUES_URL,
json={
"pagination": {"total": 1, "next": None},
"results": [{"id": queue_id, "url": queue_url}],
},
)
result = cli_runner.invoke(list_command)
assert not result.exit_code, print_tb(result.exc_info[2])
expected_table = f"""\
id name queues
---- ---------------- --------
{workspace_id} {name} {queue_id}
"""
assert result.output == expected_table
@pytest.mark.usefixtures("mock_login_request", "rossum_credentials")
class TestDelete:
def test_success(self, requests_mock, cli_runner):
workspace_id = "1"
queue_id = "1"
workspace_url = f"{WORKSPACES_URL}/{workspace_id}"
n_documents = 2
requests_mock.get(
workspace_url,
request_headers={"Authorization": f"Token {TOKEN}"},
json={"id": workspace_id, "url": workspace_url},
)
requests_mock.get(
f"{QUEUES_URL}?workspace={workspace_id}",
complete_qs=True,
request_headers={"Authorization": f"Token {TOKEN}"},
json={"pagination": {"next": None, "total": 1}, "results": [{"id": queue_id}]},
)
requests_mock.get(
f"{ANNOTATIONS_URL}?queue={queue_id}&page_size=50&sideload=documents",
complete_qs=True,
request_headers={"Authorization": f"Token {TOKEN}"},
json={
"pagination": {"next": None, "total": 1},
"results": [
{"id": i, "url": rf"{ANNOTATIONS_URL}/{i}", "document": rf"{DOCUMENTS_URL}/{i}"}
for i in range(n_documents)
],
"documents": [
{"id": i, "url": rf"{DOCUMENTS_URL}/{i}"} for i in range(n_documents)
],
},
)
requests_mock.delete(
workspace_url, request_headers={"Authorization": f"Token {TOKEN}"}, status_code=204
)
requests_mock.delete(
re.compile(rf"{DOCUMENTS_URL}/\d+"),
request_headers={"Authorization": f"Token {TOKEN}"},
status_code=204,
)
result = cli_runner.invoke(delete_command, [workspace_id, "--yes"])
assert not result.exit_code, print_tb(result.exc_info[2])
assert not result.output
assert (
ilen(r for r in requests_mock.request_history if r.method == "DELETE")
== n_documents + 1
)
@pytest.mark.usefixtures("mock_login_request", "rossum_credentials")
class TestChange:
def test_success(self, requests_mock, cli_runner):
name = "TestName"
workspace_id = "1"
requests_mock.patch(
f"{WORKSPACES_URL}/{workspace_id}",
additional_matcher=partial(match_uploaded_json, {"name": name}),
request_headers={"Authorization": f"Token {TOKEN}"},
status_code=200,
)
result = cli_runner.invoke(change_command, [workspace_id, "-n", name])
assert not result.exit_code, print_tb(result.exc_info[2])
assert not result.output
| 33.244898 | 100 | 0.580929 | 4,115 | 0.84203 | 0 | 0 | 4,417 | 0.903826 | 0 | 0 | 1,188 | 0.243094 |
2041841ab1ef90eb30625e075209aee43a02bb15 | 9,457 | py | Python | mesonconf.py | objectx/meson | c0f097c0c74551972f7ec2203cd960824984f058 | [
"Apache-2.0"
] | null | null | null | mesonconf.py | objectx/meson | c0f097c0c74551972f7ec2203cd960824984f058 | [
"Apache-2.0"
] | null | null | null | mesonconf.py | objectx/meson | c0f097c0c74551972f7ec2203cd960824984f058 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2014 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
import pickle
import argparse
import coredata, optinterpreter
from meson import build_types
parser = argparse.ArgumentParser()
parser.add_argument('-D', action='append', default=[], dest='sets',
help='Set an option to the given value.')
parser.add_argument('directory', nargs='+')
class ConfException(Exception):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class Conf:
def __init__(self, build_dir):
self.build_dir = build_dir
self.coredata_file = os.path.join(build_dir, 'meson-private/coredata.dat')
self.build_file = os.path.join(build_dir, 'meson-private/build.dat')
if not os.path.isfile(self.coredata_file) or not os.path.isfile(self.build_file):
raise ConfException('Directory %s does not seem to be a Meson build directory.' % build_dir)
self.coredata = pickle.load(open(self.coredata_file, 'rb'))
self.build = pickle.load(open(self.build_file, 'rb'))
if self.coredata.version != coredata.version:
raise ConfException('Version mismatch (%s vs %s)' %
(coredata.version, self.coredata.version))
def save(self):
# Only called if something has changed so overwrite unconditionally.
pickle.dump(self.coredata, open(self.coredata_file, 'wb'))
# We don't write the build file because any changes to it
# are erased when Meson is executed the nex time, i.e. the next
# time Ninja is run.
def print_aligned(self, arr):
if len(arr) == 0:
return
longest_name = max((len(x[0]) for x in arr))
longest_descr = max((len(x[1]) for x in arr))
for i in arr:
name = i[0]
descr = i[1]
value = i[2]
namepad = ' '*(longest_name - len(name))
descrpad = ' '*(longest_descr - len(descr))
f = '%s%s %s%s' % (name, namepad, descr, descrpad)
print(f, value)
def tobool(self, thing):
if thing.lower() == 'true':
return True
if thing.lower() == 'false':
return False
raise ConfException('Value %s is not boolean (true or false).' % thing)
def set_options(self, options):
for o in options:
if '=' not in o:
raise ConfException('Value "%s" not of type "a=b".' % o)
(k, v) = o.split('=', 1)
if k == 'type':
if v not in build_types:
raise ConfException('Invalid build type %s.' % v)
self.coredata.buildtype = v
elif k == 'strip':
self.coredata.strip = self.tobool(v)
elif k == 'coverage':
v = self.tobool(v)
self.coredata.coverage = self.tobool(v)
elif k == 'pch':
self.coredata.use_pch = self.tobool(v)
elif k == 'unity':
self.coredata.unity = self.tobool(v)
elif k == 'prefix':
if not os.path.isabs(v):
raise ConfException('Install prefix %s is not an absolute path.' % v)
self.coredata.prefix = v
elif k == 'libdir':
if os.path.isabs(v):
raise ConfException('Library dir %s must not be an absolute path.' % v)
self.coredata.libdir = v
elif k == 'bindir':
if os.path.isabs(v):
raise ConfException('Binary dir %s must not be an absolute path.' % v)
self.coredata.bindir = v
elif k == 'includedir':
if os.path.isabs(v):
raise ConfException('Include dir %s must not be an absolute path.' % v)
self.coredata.includedir = v
elif k == 'datadir':
if os.path.isabs(v):
raise ConfException('Data dir %s must not be an absolute path.' % v)
self.coredata.datadir = v
elif k == 'mandir':
if os.path.isabs(v):
raise ConfException('Man dir %s must not be an absolute path.' % v)
self.coredata.mandir = v
elif k == 'localedir':
if os.path.isabs(v):
raise ConfException('Locale dir %s must not be an absolute path.' % v)
self.coredata.localedir = v
elif k in self.coredata.user_options:
tgt = self.coredata.user_options[k]
if isinstance(tgt, optinterpreter.UserBooleanOption):
tgt.set_value(self.tobool(v))
elif isinstance(tgt, optinterpreter.UserComboOption):
try:
tgt.set_value(v)
except optinterpreter.OptionException:
raise ConfException('Value of %s must be one of %s.' %
(k, tgt.choices))
elif isinstance(tgt, optinterpreter.UserStringOption):
tgt.set_value(v)
else:
raise ConfException('Internal error, unknown option type.')
elif k.endswith('linkargs'):
lang = k[:-8]
if not lang in self.coredata.external_link_args:
raise ConfException('Unknown language %s in linkargs.' % lang)
# TODO, currently split on spaces, make it so that user
# can pass in an array string.
newvalue = v.split()
self.coredata.external_link_args[lang] = newvalue
elif k.endswith('args'):
lang = k[:-4]
if not lang in self.coredata.external_args:
raise ConfException('Unknown language %s in compile args' % lang)
# TODO same fix as above
newvalue = v.split()
self.coredata.external_args[lang] = newvalue
else:
raise ConfException('Unknown option %s.' % k)
def print_conf(self):
print('Core properties\n')
print('Source dir', self.build.environment.source_dir)
print('Build dir ', self.build.environment.build_dir)
print('')
print('Core options\n')
carr = []
carr.append(['type', 'Build type', self.coredata.buildtype])
carr.append(['strip', 'Strip on install', self.coredata.strip])
carr.append(['coverage', 'Coverage report', self.coredata.coverage])
carr.append(['pch', 'Precompiled headers', self.coredata.use_pch])
carr.append(['unity', 'Unity build', self.coredata.unity])
self.print_aligned(carr)
print('')
print('Compiler arguments\n')
for (lang, args) in self.coredata.external_args.items():
print(lang + 'args', str(args))
print('')
print('Linker args\n')
for (lang, args) in self.coredata.external_link_args.items():
print(lang + 'linkargs', str(args))
print('')
print('Directories\n')
parr = []
parr.append(['prefix', 'Install prefix', self.coredata.prefix])
parr.append(['libdir', 'Library directory', self.coredata.libdir])
parr.append(['bindir', 'Binary directory', self.coredata.bindir])
parr.append(['includedir', 'Header directory', self.coredata.includedir])
parr.append(['datadir', 'Data directory', self.coredata.datadir])
parr.append(['mandir', 'Man page directory', self.coredata.mandir])
parr.append(['localedir', 'Locale file directory', self.coredata.localedir])
self.print_aligned(parr)
print('')
if len(self.coredata.user_options) == 0:
print('This project does not have any options')
else:
print('Project options\n')
options = self.coredata.user_options
keys = list(options.keys())
keys.sort()
optarr = []
for key in keys:
opt = options[key]
optarr.append([key, opt.description, opt.value])
self.print_aligned(optarr)
if __name__ == '__main__':
options = parser.parse_args()
if len(options.directory) > 1:
print(args)
print('%s <build directory>' % sys.argv[0])
print('If you omit the build directory, the current directory is substituted.')
sys.exit(1)
if len(options.directory) == 0:
builddir = os.getcwd()
else:
builddir = options.directory[0]
try:
c = Conf(builddir)
if len(options.sets) > 0:
c.set_options(options.sets)
c.save()
else:
c.print_conf()
except ConfException as e:
print('Meson configurator encountered an error:\n')
print(e)
| 42.791855 | 104 | 0.562863 | 7,850 | 0.830073 | 0 | 0 | 0 | 0 | 0 | 0 | 2,507 | 0.265095 |
204335bc3b7927613614917f0d97975baa214832 | 481 | py | Python | code/plot_harmonic_number.py | nagaokayuji/workshop-complexity | 404681fa235c1e551695bfc1481a445c268dff8e | [
"MIT"
] | null | null | null | code/plot_harmonic_number.py | nagaokayuji/workshop-complexity | 404681fa235c1e551695bfc1481a445c268dff8e | [
"MIT"
] | null | null | null | code/plot_harmonic_number.py | nagaokayuji/workshop-complexity | 404681fa235c1e551695bfc1481a445c268dff8e | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
def count_harmonic_numbers(n: int):
count = 0
for i in range(1, n+1): # 1 ~ N まで
for _ in range(i, n+1, i): # N以下の i の倍数
count += 1
return count
x = np.linspace(1, 10**5, 100, dtype='int')
y = list(map(lambda x: count_harmonic_numbers(x), x))
y2 = x * np.log(x)
print(y)
print(y2)
plt.plot(x, y, label="count")
plt.plot(x, y2, label="NlogN")
plt.plot(x, x, label="N")
plt.legend()
plt.show()
| 20.913043 | 53 | 0.607069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 60 | 0.120724 |
2045932168297c8faa71edacf1321431a5f3bb12 | 1,549 | py | Python | stockviewer/stockviewer/source/websource.py | vyacheslav-bezborodov/skt | 58551eed497687adec5b56336037613a78cc5b2d | [
"MIT"
] | null | null | null | stockviewer/stockviewer/source/websource.py | vyacheslav-bezborodov/skt | 58551eed497687adec5b56336037613a78cc5b2d | [
"MIT"
] | null | null | null | stockviewer/stockviewer/source/websource.py | vyacheslav-bezborodov/skt | 58551eed497687adec5b56336037613a78cc5b2d | [
"MIT"
] | null | null | null | import logging
import csv
from urllib2 import urlopen, quote
from datetime import datetime
from stockviewer.utils import make_timedelta, make_fields
class websource():
def __init__(self, config):
logging.debug('Web source init: config {}'.format(config))
self.__config = config
self.__url = self.__config.find('url').text
self.__date_request_format = self.__config.find('date_request_format').text
self.__date_output_format = self.__config.find('date_output_format').text
self.__fields = make_fields(self.__config.find('fields'))
def get(self, symbol, begin = None, end = None):
if not begin:
begin = datetime.today()
if not end:
end = begin + make_timedelta(self.__config.find('default_view_window'))
url = self.__url.format(symbol=quote(symbol), begin=quote(datetime.strftime(begin, self.__date_request_format)), end=quote(datetime.strftime(end, self.__date_request_format)))
logging.info('Opening url `{}`'.format(url))
response = []
try:
response = urlopen(url)
except Exception as e:
logging.warning(e)
reader = csv.reader(response)
stock_info = []
first = True
for row in reader:
if first:
first = False
continue
info = {
self.__fields['date']: datetime.strptime(row[0], self.__date_output_format),
self.__fields['open']: float(row[1]),
self.__fields['high']: float(row[2]),
self.__fields['low']: float(row[3]),
self.__fields['close']: float(row[4]),
self.__fields['volume']: int(row[5]),
}
stock_info.append(info)
return stock_info
| 27.175439 | 177 | 0.705617 | 1,396 | 0.901227 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.102647 |
2046584bfc0657c5aba655a2373df7d1dfcd4130 | 90 | py | Python | backdriveb2/api/objects/__init__.py | Joffreybvn/backdriveb2 | bf6098a45aacbdad6a0ff95c45ff141a2bcf7e1c | [
"MIT"
] | null | null | null | backdriveb2/api/objects/__init__.py | Joffreybvn/backdriveb2 | bf6098a45aacbdad6a0ff95c45ff141a2bcf7e1c | [
"MIT"
] | null | null | null | backdriveb2/api/objects/__init__.py | Joffreybvn/backdriveb2 | bf6098a45aacbdad6a0ff95c45ff141a2bcf7e1c | [
"MIT"
] | null | null | null |
from .account import Account
from .bucket import Bucket
__all__ = ["Account", "Bucket"]
| 15 | 31 | 0.733333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 17 | 0.188889 |
2046d4bae33dd0cf277dfbef2a7af67cf8aaf6c2 | 328 | py | Python | src/utilities/helpers/predict.py | szymonmaszke/UniFirstKaggle | c1718e5ad9006251e8280c65fd8651b4d4efa31e | [
"MIT"
] | 2 | 2019-04-12T12:20:08.000Z | 2019-04-12T15:42:14.000Z | src/utilities/helpers/predict.py | szymonmaszke/UniFirstKaggle | c1718e5ad9006251e8280c65fd8651b4d4efa31e | [
"MIT"
] | null | null | null | src/utilities/helpers/predict.py | szymonmaszke/UniFirstKaggle | c1718e5ad9006251e8280c65fd8651b4d4efa31e | [
"MIT"
] | null | null | null | import pathlib
import numpy as np
def create_submission(path: pathlib.Path, predictions):
pred_with_id = np.stack([np.arange(len(predictions)), predictions], axis=1)
np.savetxt(
fname=path,
X=pred_with_id,
fmt="%d",
delimiter=",",
header="id,label",
comments="",
)
| 20.5 | 79 | 0.597561 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.057927 |
204741c598e69e13e9f1cf051103af7ae120ef73 | 2,566 | py | Python | pipeline.py | hodleth/bestcondor | 167b6ca5e1f6f7256b870303ebcd051d41c9a1ec | [
"MIT"
] | 5 | 2021-02-17T16:26:22.000Z | 2021-11-24T14:25:32.000Z | pipeline.py | hodleth/bestcondor | 167b6ca5e1f6f7256b870303ebcd051d41c9a1ec | [
"MIT"
] | 1 | 2019-09-30T00:43:23.000Z | 2019-09-30T00:43:23.000Z | pipeline.py | hodleth/bestcondor | 167b6ca5e1f6f7256b870303ebcd051d41c9a1ec | [
"MIT"
] | 2 | 2018-05-18T14:42:00.000Z | 2020-05-22T23:16:50.000Z | import json
import urllib
import utils as ut
from distutils.util import strtobool
class Call(object):
"""docstring for Call"""
def __init__(self, currentStrike, currentPrice, currentProbOTM, currentIV, currentITM):
self.currentStrike = currentStrike
self.currentPrice = currentPrice
self.currentProbOTM = currentProbOTM
self.currentIV = currentIV
self.currentITM = currentITM
class Put(object):
"""docstring for Put"""
def __init__(self, currentStrike, currentPrice, currentProbOTM, currentIV, currentITM):
self.currentStrike = currentStrike
self.currentPrice = currentPrice
self.currentProbOTM = currentProbOTM
self.currentIV = currentIV
self.currentITM = currentITM
def parseOptionsChain(ticker, expDate):
urlTicker = 'https://query2.finance.yahoo.com/v7/finance/options/' + ticker
urlDate = '?date=' + str(ut.formatExpiryURL(expDate))
url = urlTicker + urlDate
rawData = urllib.urlopen(url).read()
parsedTickerData = json.loads(rawData)
optionsJson = parsedTickerData['optionChain']['result'][0]
optionsQuote = optionsJson['quote']
strikes = optionsJson['strikes']
currentStockPrice = float(optionsQuote['regularMarketPrice'])
truncatedStrikes = ut.truncateStrikes(strikes, currentStockPrice)
return (optionsJson, truncatedStrikes, currentStockPrice)
def formatOptionChain(optionsJson, truncatedStrikes, currentStockPrice):
chainCalls = optionsJson['options'][0]['calls'] #append[i] to iterate through calls
chainPuts = optionsJson['options'][0]['puts'] #append[i] to iterate through puts
outputCalls, outputPuts = [],[]
for i in range(len(chainCalls)):
currentStrike = (chainCalls[i]['strike'])
if currentStrike in truncatedStrikes:
currentPrice = float(chainCalls[i]['lastPrice'])
currentIV = float(chainCalls[i]['impliedVolatility'])
currentITM = str(bool(strtobool(str(chainCalls[i]['inTheMoney']))))
currentProbOTM = (1 - ut.calculateDelta(currentPrice, currentStockPrice))
outputCalls.append(Call(currentStrike, currentPrice, currentProbOTM, currentIV, currentITM))
for i in range(len(chainPuts)):
currentStrike = (chainPuts[i]['strike'])
if currentStrike in truncatedStrikes:
currentPrice = float(chainPuts[i]['lastPrice'])
currentIV = float(chainPuts[i]['impliedVolatility'])
currentITM = str(bool(strtobool(str(chainPuts[i]['inTheMoney']))))
currentProbOTM = (1 - ut.calculateDelta(currentPrice, currentStockPrice))
outputPuts.append(Put(currentStrike, currentPrice, currentProbOTM, currentIV, currentITM))
return (outputCalls, outputPuts)
| 32.075 | 95 | 0.758379 | 612 | 0.238504 | 0 | 0 | 0 | 0 | 0 | 0 | 366 | 0.142634 |
20487ec8a56148d10391484016a8c173799c42c3 | 352 | py | Python | setup.py | rickie/hopla | 24a422194e42c03d5877dc167b2b07147326a595 | [
"Apache-2.0"
] | null | null | null | setup.py | rickie/hopla | 24a422194e42c03d5877dc167b2b07147326a595 | [
"Apache-2.0"
] | null | null | null | setup.py | rickie/hopla | 24a422194e42c03d5877dc167b2b07147326a595 | [
"Apache-2.0"
] | null | null | null | """
Module used for building hopla.
[quote](https://setuptools.readthedocs.io/en/latest/setuptools.html):
As PEP 517 is new, support is not universal, and frontends that do
support it may still have bugs. For compatibility, you may want to
put a setup.py file containing only a setuptools.setup() invocation.
"""
import setuptools
setuptools.setup()
| 29.333333 | 69 | 0.772727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 313 | 0.889205 |
2048990e1c1f8155fa103ba38bc3687e2cba4347 | 3,545 | py | Python | pkgs/statsmodels-0.6.1-np110py27_0/lib/python2.7/site-packages/statsmodels/regression/tests/tests_predict.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2016-09-02T20:31:32.000Z | 2016-09-02T20:31:32.000Z | pkgs/statsmodels-0.6.1-np110py27_0/lib/python2.7/site-packages/statsmodels/regression/tests/tests_predict.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | pkgs/statsmodels-0.6.1-np110py27_0/lib/python2.7/site-packages/statsmodels/regression/tests/tests_predict.py | wangyum/anaconda | 6e5a0dbead3327661d73a61e85414cf92aa52be6 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-05-02T10:50:15.000Z | 2021-05-02T10:50:15.000Z | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 20 17:12:53 2014
author: Josef Perktold
"""
import numpy as np
from statsmodels.regression.linear_model import OLS, WLS
from statsmodels.sandbox.regression.predstd import wls_prediction_std
def test_predict_se():
# this test doesn't use reference values
# checks conistency across options, and compares to direct calculation
# generate dataset
nsample = 50
x1 = np.linspace(0, 20, nsample)
x = np.c_[x1, (x1 - 5)**2, np.ones(nsample)]
np.random.seed(0)#9876789) #9876543)
beta = [0.5, -0.01, 5.]
y_true2 = np.dot(x, beta)
w = np.ones(nsample)
w[nsample * 6. / 10:] = 3
sig = 0.5
y2 = y_true2 + sig * w * np.random.normal(size=nsample)
x2 = x[:,[0,2]]
# estimate OLS
res2 = OLS(y2, x2).fit()
#direct calculation
covb = res2.cov_params()
predvar = res2.mse_resid + (x2 * np.dot(covb, x2.T).T).sum(1)
predstd = np.sqrt(predvar)
prstd, iv_l, iv_u = wls_prediction_std(res2)
np.testing.assert_almost_equal(prstd, predstd, 15)
#stats.t.isf(0.05/2., 50 - 2)
q = 2.0106347546964458
ci_half = q * predstd
np.testing.assert_allclose(iv_u, res2.fittedvalues + ci_half, rtol=1e-12)
np.testing.assert_allclose(iv_l, res2.fittedvalues - ci_half, rtol=1e-12)
prstd, iv_l, iv_u = wls_prediction_std(res2, x2[:3,:])
np.testing.assert_equal(prstd, prstd[:3])
np.testing.assert_allclose(iv_u, res2.fittedvalues[:3] + ci_half[:3],
rtol=1e-12)
np.testing.assert_allclose(iv_l, res2.fittedvalues[:3] - ci_half[:3],
rtol=1e-12)
# check WLS
res3 = WLS(y2, x2, 1. / w).fit()
#direct calculation
covb = res3.cov_params()
predvar = res3.mse_resid * w + (x2 * np.dot(covb, x2.T).T).sum(1)
predstd = np.sqrt(predvar)
prstd, iv_l, iv_u = wls_prediction_std(res3)
np.testing.assert_almost_equal(prstd, predstd, 15)
#stats.t.isf(0.05/2., 50 - 2)
q = 2.0106347546964458
ci_half = q * predstd
np.testing.assert_allclose(iv_u, res3.fittedvalues + ci_half, rtol=1e-12)
np.testing.assert_allclose(iv_l, res3.fittedvalues - ci_half, rtol=1e-12)
# testing shapes of exog
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-1:,:], weights=3.)
np.testing.assert_equal(prstd, prstd[-1])
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-1,:], weights=3.)
np.testing.assert_equal(prstd, prstd[-1])
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-2:,:], weights=3.)
np.testing.assert_equal(prstd, prstd[-2:])
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-2:,:], weights=[3, 3])
np.testing.assert_equal(prstd, prstd[-2:])
prstd, iv_l, iv_u = wls_prediction_std(res3, x2[:3,:])
np.testing.assert_equal(prstd, prstd[:3])
np.testing.assert_allclose(iv_u, res3.fittedvalues[:3] + ci_half[:3],
rtol=1e-12)
np.testing.assert_allclose(iv_l, res3.fittedvalues[:3] - ci_half[:3],
rtol=1e-12)
#use wrong size for exog
#prstd, iv_l, iv_u = wls_prediction_std(res3, x2[-1,0], weights=3.)
np.testing.assert_raises(ValueError, wls_prediction_std, res3, x2[-1,0],
weights=3.)
# check some weight values
sew1 = wls_prediction_std(res3, x2[-3:,:])[0]**2
for wv in np.linspace(0.5, 3, 5):
sew = wls_prediction_std(res3, x2[-3:,:], weights=1. / wv)[0]**2
np.testing.assert_allclose(sew, sew1 + res3.scale * (wv - 1))
| 34.086538 | 77 | 0.626234 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 500 | 0.141044 |
2049233a0825d6a90f1dfa2c038366a683c0ad0c | 738 | py | Python | Lib/Similarity/Jaccard.py | allanbatista/search_engine | 478b027c64889c9e5681c7ce55a9a2276522e8fd | [
"Apache-2.0"
] | 1 | 2019-04-22T21:45:54.000Z | 2019-04-22T21:45:54.000Z | Lib/Similarity/Jaccard.py | allanbatista/search_engine | 478b027c64889c9e5681c7ce55a9a2276522e8fd | [
"Apache-2.0"
] | null | null | null | Lib/Similarity/Jaccard.py | allanbatista/search_engine | 478b027c64889c9e5681c7ce55a9a2276522e8fd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import json
from Tools.Logger import logger
from Lib.Similarity.Similarity import Similarity
class Jaccard(Similarity):
def predict(self, doc):
results = []
for index in range(self.total):
x = self.X[index]
sum_max = 0.0
sum_min = 0.0
for xi, yi in zip(x, doc):
sum_min += min(xi, yi)
sum_max += max(xi, yi)
try:
results.append([self.y[index], sum_min / sum_max])
except ZeroDivisionError:
results.append([self.y[index], 0.0])
results.sort(reverse=True, key=lambda x: x[1])
return results
| 21.085714 | 66 | 0.539295 | 574 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.059621 |
20493d5c70d5a62c1b2348d846154d6204b0d810 | 4,209 | py | Python | src/sequencer.py | Azure/DiskInfo | f41dfb2a4004c570b67f132c9dbf6cc53097a579 | [
"MIT"
] | 4 | 2019-10-08T18:37:21.000Z | 2020-06-01T14:09:33.000Z | src/sequencer.py | Azure/DiskInfo | f41dfb2a4004c570b67f132c9dbf6cc53097a579 | [
"MIT"
] | null | null | null | src/sequencer.py | Azure/DiskInfo | f41dfb2a4004c570b67f132c9dbf6cc53097a579 | [
"MIT"
] | 2 | 2021-01-02T15:12:42.000Z | 2021-12-03T06:18:43.000Z | """
Copyright (c) Microsoft Corporation
"""
import sys
import time
import logging
from argparse import ArgumentParser
from .constants import *
from .discovery import *
from .nvme import storeNVMeDevice
from .ata import storeATADevice
from .datahandle import outputData
REV_MAJOR = 2
REV_MINOR = 0
# Uncomment the appropriate logging level to control output verbosity.
#logging.basicConfig(level=logging.DEBUG)
#logging.basicConfig(level=logging.INFO)
#logging.basicConfig(level=logging.WARNING)
logging.basicConfig(level=logging.ERROR)
#logging.basicConfig(level=logging.CRITICAL)
def dumpDataForDisk(diskNum, diskNumList):
if (diskNumList is None):
# No list was passed. Print data for all disks.
return True
else:
for passedDiskNum in diskNumList:
if (diskNum == int(passedDiskNum)):
# This disk was in the passed in list.
return True
return False
def collectDiskInfo(classifier):
# Capture start time for performance measurement debug.
tStart = time.time()
# Setup options and arguments.
usage = "python runner.py outputDirectory [options]"
parser = ArgumentParser(description=usage)
parser.add_argument("file", default=".", nargs="?")
parser.add_argument("-d", "--device", action="store", dest="dev", nargs="*", help="Only output data for specified disk number(s).")
parser.add_argument("-l", "--list", action="store_true", dest="list", help="Output list of storage devices on the node.")
parser.add_argument("-o", "--output", action="store_true", dest="output", help="Output disk data to screen only.")
parser.add_argument("-v", "--verbose", action="store_true", dest="verbose", help="Enable verbose output.")
options = parser.parse_args()
if (options.output):
# Output mode pushes only final result to stdout
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(level=logging.CRITICAL)
elif (options.verbose):
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(level=logging.DEBUG)
# Query the disks on the node.
disks = get_disks()
if (options.list):
print ("%12s %30s %5s %30s" % ("Disk Number", "Model", "Bus", "Serial Number"))
# Parse disk data one at a time.
for disk in disks:
model = disk[DISK_MODEL]
bus = int(disk[DISK_BUS_TYPE])
mnfgr = disk[DISK_MANUFACTURER]
disk_number = int(disk[DISK_OSDISK])
serialNumber = disk[DISK_SERIAL_NUMBER]
drive = (model, bus, mnfgr)
if (options.list):
print ("%12d %30s %5s %30s" % (disk_number, model, BUS_TYPE_NAMES[bus], serialNumber))
continue
if (not dumpDataForDisk(disk_number, options.dev)):
continue
# Classify this drive to understand vendor and available log pages.
itsa = classifier(drive)
logging.debug("itsa {0}".format(itsa))
if itsa is not None:
result = itsa()
vendor = result[0]
bus = result[1]
vu_log_function = result[2]
logging.debug("Vendor = {0}, bus = {1} = {2}".format(vendor, bus, BUS_TYPE_NAMES[bus]))
device_dict = {}
device_dict.update({"REV_MAJOR":REV_MAJOR})
device_dict.update({"REV_MINOR":REV_MINOR})
storeDiskData(disk, device_dict)
if bus == BUS_TYPE_NVME:
storeNVMeDevice(disk_number, model, device_dict, drive, vu_log_function)
elif bus == BUS_TYPE_SATA:
storeATADevice(disk_number, model, device_dict, drive, vu_log_function)
# Output the disk data.
outputData(device_dict, options.file, options.output)
# Capture end time for performance measurement debug.
tEnd = time.time()
# Guideline is to stick within 5 seconds of processing time because this could block other services.
logging.info("Execution time in seconds = {0}".format(float(tEnd - tStart)))
| 37.580357 | 135 | 0.640057 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,320 | 0.313614 |
204982feeb2350d700a1865a2661d1dea99858c7 | 272 | py | Python | Ekeopara_Praise/Phase 2/STRINGS/Day32 Tasks/Task8.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | Ekeopara_Praise/Phase 2/STRINGS/Day32 Tasks/Task8.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | Ekeopara_Praise/Phase 2/STRINGS/Day32 Tasks/Task8.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | '''8. Write a Python program to count occurrences of a substring in a string.'''
def count_word_in_string(string1, substring2):
return string1.count(substring2)
print(count_word_in_string('The quick brown fox jumps over the lazy dog that is chasing the fox.', "fox")) | 54.4 | 106 | 0.768382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.569853 |
2049a81f1692b22d6927802aa4aae5e254614b90 | 664 | py | Python | minilabs/test-hypothesis-by-simulating-statistics/m7_l1_tests/q2.py | ebaccay/inferentialthinking | 9f839c76062169b9de498c1e044f668e7517ee94 | [
"MIT"
] | 1 | 2022-02-24T20:32:17.000Z | 2022-02-24T20:32:17.000Z | minilabs/test-hypothesis-by-simulating-statistics/m7_l1_tests/q2.py | ebaccay/inferentialthinking | 9f839c76062169b9de498c1e044f668e7517ee94 | [
"MIT"
] | null | null | null | minilabs/test-hypothesis-by-simulating-statistics/m7_l1_tests/q2.py | ebaccay/inferentialthinking | 9f839c76062169b9de498c1e044f668e7517ee94 | [
"MIT"
] | 3 | 2021-03-04T06:44:47.000Z | 2021-05-05T06:00:33.000Z | test = {
"name": "q2",
"points": 1,
"hidden": True,
"suites": [
{
"cases": [
{
"code": r"""
>>> sample_population(test_results).num_rows
3000
""",
"hidden": False,
"locked": False,
},
{
"code": r"""
>>> "Test Result" in sample_population(test_results).labels
True
""",
"hidden": False,
"locked": False,
},
{
"code": r"""
>>> round(apply_statistic(test_results, "Village Number", np.average), 4)
8.1307
""",
"hidden": False,
"locked": False,
},
],
"scored": False,
"setup": "",
"teardown": "",
"type": "doctest"
},
]
} | 17.025641 | 78 | 0.460843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 416 | 0.626506 |
204a214617b545badaea0317c1a8410d659c58af | 3,144 | py | Python | yadage/handlers/predicate_handlers.py | vvolkl/yadage | bd34a5a1d7d06f7dd3917af2af8badd5af3f195d | [
"MIT"
] | null | null | null | yadage/handlers/predicate_handlers.py | vvolkl/yadage | bd34a5a1d7d06f7dd3917af2af8badd5af3f195d | [
"MIT"
] | null | null | null | yadage/handlers/predicate_handlers.py | vvolkl/yadage | bd34a5a1d7d06f7dd3917af2af8badd5af3f195d | [
"MIT"
] | null | null | null | import logging
import jsonpointer
import yadage.handlers.utils as utils
from yadage.handlers.expression_handlers import handlers as exprhandlers
log = logging.getLogger(__name__)
handlers, predicate = utils.handler_decorator()
def checkmeta(flowview, metainfo):
log.debug('checking meta %s on view with offset %s',
metainfo, flowview.offset)
applied_ids = [rl.identifier for rl in flowview.applied_rules]
rulesok = all([x in applied_ids for x in metainfo['stages']])
stepsok = all([flowview.dag.getNode(x).has_result()
for x in metainfo['steps']])
log.debug('all rules applied: %s, all steps have results: %s',
rulesok, stepsok)
return (rulesok and stepsok)
def scope_done(scope, flowview):
'''
walks recursively all scopes starting at some initial scope to determine if
all steps and stages under this scope have been executed / applied. Will indicate
that it's safe to reference any result of the workflow within that scope.
'''
log.debug('checking scope %s on view with offset %s',
scope, flowview.offset)
result = True
bookkeeper = jsonpointer.JsonPointer(scope).resolve(flowview.bookkeeper)
for k, v in bookkeeper.items():
for k, v in bookkeeper.items():
if k == '_meta':
result = result and checkmeta(flowview, v)
else:
childscope = scope + '/{}'.format(k)
result = result and scope_done(childscope, flowview)
return result
@predicate('jsonpath_ready')
def jsonpath_ready(stage, depspec,stagespec):
'''
the main predicate for yadage. for a list of jsonpath expressions
determine whether the stage or workflow scope is ready (i.e. has a result)
'''
log.debug('checking jsonpath ready predicate\n%s', depspec)
dependencies = depspec['expressions']
for x in dependencies:
depmatches = stage.view.query(x, stage.view.steps)
if not depmatches:
log.debug('no query matches, not ready')
return False
issubwork = '_nodeid' not in depmatches[0].value[0]
if issubwork:
log.debug('dependency is a subworkflow. determine if scope is done')
if not all([scope_done(scope['_offset'], stage.view) for match in depmatches for scope in match.value]):
return False
else:
if not all([x.has_result() for x in stage.view.getSteps(x)]):
return False
log.debug('all checks ok, predicate is True')
return True
@predicate('expressions_fulfilled')
def expressions_fulfilled(stage, depspec,stagespec):
'''
the main predicate for yadage. for a list of jsonpath expressions
determine whether the stage or workflow scope is ready (i.e. has a result)
'''
log.debug('checking jsonpath ready predicate\n%s', depspec)
expressions = depspec['expressions']
for expression in expressions:
handler = exprhandlers[expression['expression_type']]
value = handler(stage.view, expression)
if not value:
return False
return True
| 36.988235 | 116 | 0.66126 | 0 | 0 | 0 | 0 | 1,594 | 0.506997 | 0 | 0 | 1,034 | 0.32888 |
204a80e3dc5aa46fa4f6f7ef070b16b15e318756 | 877 | py | Python | spark/DataFormat/Parquet_Example.py | pradeep-charism/nus-mtech-workshops | 74fe1a137ee0c8efa4f53a7ade9e9203662c577f | [
"MIT"
] | null | null | null | spark/DataFormat/Parquet_Example.py | pradeep-charism/nus-mtech-workshops | 74fe1a137ee0c8efa4f53a7ade9e9203662c577f | [
"MIT"
] | null | null | null | spark/DataFormat/Parquet_Example.py | pradeep-charism/nus-mtech-workshops | 74fe1a137ee0c8efa4f53a7ade9e9203662c577f | [
"MIT"
] | null | null | null | from pyspark.sql import SparkSession
spark = SparkSession.builder.master("local").appName('ReadParquet').config("spark.driver.host", "localhost").config(
"spark.ui.port", "4040").getOrCreate()
peopleDF = spark.read.json("people.json")
# DataFrames can be saved as Parquet files, maintaining the schema information.
peopleDF.write.format("parquet").mode("overwrite").save("people.parquet")
# Read in the Parquet file created above.
# Parquet files are self-describing so the schema is preserved.
# The result of loading a parquet file is also a DataFrame.
parquetFile = spark.read.parquet("people.parquet")
# Parquet files can also be used to create a temporary view and then used in SQL statements.
parquetFile.createOrReplaceTempView("parquetFile")
teenagers = spark.sql("SELECT name FROM parquetFile WHERE age >= 13 AND age <= 19")
teenagers.show()
# spark.stop()
| 38.130435 | 116 | 0.761688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 557 | 0.63512 |
204ae4f8d4e4ad33afe74d750b02fb1b6d933f26 | 5,832 | py | Python | async_pokepy/types/ability.py | PendragonLore/async_pokepy | 9982505fbc360eae349086bfa8f6faad0133f5fa | [
"MIT"
] | 5 | 2019-05-30T21:45:24.000Z | 2021-11-07T20:35:40.000Z | async_pokepy/types/ability.py | PendragonLore/async_pokepy | 9982505fbc360eae349086bfa8f6faad0133f5fa | [
"MIT"
] | null | null | null | async_pokepy/types/ability.py | PendragonLore/async_pokepy | 9982505fbc360eae349086bfa8f6faad0133f5fa | [
"MIT"
] | 1 | 2019-05-10T19:21:51.000Z | 2019-05-10T19:21:51.000Z | # -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2019 Lorenzo
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from .abc import BaseObject
from .common import Effect, Name, NamedAPIObject, VerboseEffect
__all__ = (
"Ability",
"AbilityEffectChange",
"AbilityPokemon",
"AbilityFlavorText",
)
class Ability(BaseObject):
"""Represents an ability object from the API.
.. versionadded:: 0.1.2a
.. container:: operations
.. describe:: str(x)
Returns the Pokémon's name.
.. describe:: x[y]
Returns a Pokémon's y attribute.
.. describe:: x == y
Check if two Pokémons are the same.
.. describe:: x != y
Check if two Pokémons are *not* the same.
Attributes
----------
id: :class:`int`
The identifier for the ability.
name: :class:`str`
The name for the ability.
is_main_series: :class:`bool`
Whether or not the ability originated in the main series of the video games.
generation: :class:`NamedAPIObject`
The generation the ability originated in.
names: List[:class:`Name`]
The name of the ability listed in different languages.
effect_entries: List[:class:`VerboseEffect`]
The effect of the ability listed in different languages.
effect_changes: List[:class:`AbilityEffectChange`]
The list of previous effects the ability has had across version groups.
flavor_text_entries: List[:class:`AbilityFlavorText`]
The flavor text of the ability listed in different languages.
pokemon: List[:class:`AbilityPokemon`]
A list of Pokémon that could potentially have the ability."""
__slots__ = (
"is_main_series", "generation", "names", "effect_entries", "effect_changes", "flavor_text_entries", "pokemon"
)
def __init__(self, data: dict):
super().__init__(data)
self.is_main_series = data["is_main_series"]
self.generation = NamedAPIObject(data["generation"])
self.names = [Name(d) for d in data["names"]]
self.effect_entries = [VerboseEffect(d) for d in data["effect_entries"]]
self.effect_changes = [AbilityEffectChange(d) for d in data["effect_changes"]]
self.flavor_text_entries = [AbilityFlavorText(d) for d in data["flavor_text_entries"]]
self.pokemon = [AbilityPokemon(d) for d in data["pokemon"]]
class AbilityEffectChange:
"""Represents a past change of the effect of a move in a version group.
.. versionadded:: 0.1.2a
Attributes
----------
effect_entries: List[:class:`Effect`]
The previous effect of the ability listed in different languages.
version_group: :class:`NamedAPIObject`
The version group in which the previous effect of the ability originated."""
__slots__ = ("effect_entries", "version_group")
def __init__(self, data: dict):
self.effect_entries = [Effect(d) for d in data["effect_entries"]]
self.version_group = NamedAPIObject(data["version_group"])
def __repr__(self) -> str:
return "<AbilityEffectChange version_group='{0.version_group}'>".format(self)
class AbilityPokemon:
"""Reppresents an Pokémon of an :class:`Ability`.
Attributes
----------
is_hidden: :class:`bool`
Whether or not this a hidden ability for the Pokémon.
slot: :class:`int`
The slot of the ability for the pokemon.
pokemon: :class:`NamedAPIObject`
The Pokémon this ability could belong to."""
__slots__ = ("is_hidden", "slot", "pokemon")
def __init__(self, data: dict):
self.is_hidden = data["is_hidden"]
self.slot = data["slot"]
self.pokemon = NamedAPIObject(data["pokemon"])
def __repr__(self) -> str:
return "<AbilityPokemon is_hidden={0.is_hidden} slot={0.slot} pokemon='{0.pokemon}'>".format(self)
class AbilityFlavorText:
"""Represents the flavor text for a move, with a language and a version group.
. container:: operations
.. describe:: str(x)
Returns the actual flavor text.
Attributes
----------
flavor_text: :class:`str`
The actual text.
language: :class:`NamedAPIObject`
The language in which the text is in.
version_group: :class:`NamedAPIObject`
The version group that uses this text."""
__slots__ = ("flavor_text", "language", "version_group")
def __init__(self, data: dict):
self.flavor_text = data["flavor_text"]
self.language = NamedAPIObject(data["language"])
self.version_group = NamedAPIObject(data["version_group"])
def __str__(self) -> str:
return self.flavor_text
def __repr__(self) -> str:
return "<AbilityFlavorText language='{0.language}' version_group='{0.version_group}'>".format(self)
| 34.305882 | 117 | 0.677812 | 4,524 | 0.774658 | 0 | 0 | 0 | 0 | 0 | 0 | 4,287 | 0.734075 |
204d7e55c1632816918480daf6e565802968e210 | 1,852 | py | Python | compose/metrics/client.py | galeksandrp/compose | 73f8cf9132018429783e65bde66f8f961637e418 | [
"Apache-2.0"
] | 2 | 2020-05-04T02:09:57.000Z | 2022-02-28T22:15:51.000Z | compose/metrics/client.py | galeksandrp/compose | 73f8cf9132018429783e65bde66f8f961637e418 | [
"Apache-2.0"
] | 38 | 2021-07-19T21:08:06.000Z | 2022-03-28T21:11:05.000Z | compose/metrics/client.py | galeksandrp/compose | 73f8cf9132018429783e65bde66f8f961637e418 | [
"Apache-2.0"
] | 1 | 2021-07-30T07:35:34.000Z | 2021-07-30T07:35:34.000Z | import os
from enum import Enum
import requests
from docker import ContextAPI
from docker.transport import UnixHTTPAdapter
from compose.const import IS_WINDOWS_PLATFORM
if IS_WINDOWS_PLATFORM:
from docker.transport import NpipeHTTPAdapter
class Status(Enum):
SUCCESS = "success"
FAILURE = "failure"
CANCELED = "canceled"
class MetricsSource:
CLI = "docker-compose"
if IS_WINDOWS_PLATFORM:
METRICS_SOCKET_FILE = 'npipe://\\\\.\\pipe\\docker_cli'
else:
METRICS_SOCKET_FILE = 'http+unix:///var/run/docker-cli.sock'
class MetricsCommand(requests.Session):
"""
Representation of a command in the metrics.
"""
def __init__(self, command,
context_type=None, status=Status.SUCCESS,
source=MetricsSource.CLI, uri=None):
super().__init__()
self.command = ("compose " + command).strip() if command else "compose --help"
self.context = context_type or ContextAPI.get_current_context().context_type or 'moby'
self.source = source
self.status = status.value
self.uri = uri or os.environ.get("METRICS_SOCKET_FILE", METRICS_SOCKET_FILE)
if IS_WINDOWS_PLATFORM:
self.mount("http+unix://", NpipeHTTPAdapter(self.uri))
else:
self.mount("http+unix://", UnixHTTPAdapter(self.uri))
def send_metrics(self):
try:
return self.post("http+unix://localhost/usage",
json=self.to_map(),
timeout=.05,
headers={'Content-Type': 'application/json'})
except Exception as e:
return e
def to_map(self):
return {
'command': self.command,
'context': self.context,
'source': self.source,
'status': self.status,
}
| 28.492308 | 94 | 0.612851 | 1,440 | 0.777538 | 0 | 0 | 0 | 0 | 0 | 0 | 350 | 0.188985 |
204e975daa460f0be111c6fc0681dc306dadfc2f | 921 | py | Python | chapter17/full_system/plot_accel_debug_pitch_and_roll.py | dannystaple/Learn-Robotics-Programming-Second-Edition | 081ed9bbab59aab57334fe8f2f06a157a8639eb4 | [
"MIT"
] | 19 | 2020-05-13T12:53:59.000Z | 2022-03-07T19:50:30.000Z | chapter17/full_system/plot_accel_debug_pitch_and_roll.py | dannystaple/Learn-Robotics-Programming-Second-Edition | 081ed9bbab59aab57334fe8f2f06a157a8639eb4 | [
"MIT"
] | 1 | 2020-11-20T16:56:24.000Z | 2020-12-01T06:24:45.000Z | chapter17/full_system/plot_accel_debug_pitch_and_roll.py | dannystaple/Learn-Robotics-Programming-Second-Edition | 081ed9bbab59aab57334fe8f2f06a157a8639eb4 | [
"MIT"
] | 12 | 2019-12-24T18:13:14.000Z | 2022-03-20T23:44:12.000Z | import vpython as vp
import logging
import time
from robot_imu import RobotImu
logging.basicConfig(level=logging.INFO)
imu = RobotImu()
pr = vp.graph(xmin=0, xmax=60, scroll=True)
graph_pitch = vp.gcurve(color=vp.color.red, graph=pr)
graph_roll = vp.gcurve(color=vp.color.green, graph=pr)
xyz = vp.graph(xmin=0, xmax=60, scroll=True)
graph_x = vp.gcurve(color=vp.color.orange, graph=xyz)
graph_y = vp.gcurve(color=vp.color.cyan, graph=xyz)
graph_z = vp.gcurve(color=vp.color.purple, graph=xyz)
start = time.time()
while True:
vp.rate(100)
elapsed = time.time() - start
pitch, roll = imu.read_accelerometer_pitch_and_roll()
raw_accel = imu.read_accelerometer()
graph_pitch.plot(elapsed, pitch)
graph_roll.plot(elapsed, roll)
print(f"Pitch: {pitch:.2f}, Roll: {roll:.2f}")
graph_x.plot(elapsed, raw_accel.x)
graph_y.plot(elapsed, raw_accel.y)
graph_z.plot(elapsed, raw_accel.z)
| 29.709677 | 57 | 0.724213 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.042345 |
204eaeed960b5419d169a5db59cdb9bba5ac5bed | 10,440 | py | Python | sql/sql_tuning.py | bbotte/archery-sql-platfrom | 74d314832ec40c20e5656c58ce62d7cdc9731f1e | [
"Apache-2.0"
] | 2 | 2021-05-27T04:07:25.000Z | 2021-09-03T02:56:39.000Z | sql/sql_tuning.py | bbotte/archery-sql-platfrom | 74d314832ec40c20e5656c58ce62d7cdc9731f1e | [
"Apache-2.0"
] | null | null | null | sql/sql_tuning.py | bbotte/archery-sql-platfrom | 74d314832ec40c20e5656c58ce62d7cdc9731f1e | [
"Apache-2.0"
] | 1 | 2019-05-29T11:22:40.000Z | 2019-05-29T11:22:40.000Z | # -*- coding: UTF-8 -*-
import time
import simplejson as json
from MySQLdb.connections import numeric_part
from django.contrib.auth.decorators import permission_required
from django.http import HttpResponse
from common.utils.extend_json_encoder import ExtendJSONEncoder
from common.utils.const import SQLTuning
from sql.utils.dao import Dao
import sqlparse
from sqlparse.sql import IdentifierList, Identifier
from sqlparse.tokens import Keyword, DML
@permission_required('sql.optimize_sqltuning', raise_exception=True)
def tuning(request):
instance_name = request.POST.get('instance_name')
db_name = request.POST.get('db_name')
sqltext = request.POST.get('sql_content')
option = request.POST.getlist('option[]')
sql_tunning = SqlTuning(instance_name=instance_name, db_name=db_name, sqltext=sqltext)
result = {'status': 0, 'msg': 'ok', 'data': {}}
if 'sys_parm' in option:
basic_information = sql_tunning.basic_information()
sys_parameter = sql_tunning.sys_parameter()
optimizer_switch = sql_tunning.optimizer_switch()
result['data']['basic_information'] = basic_information
result['data']['sys_parameter'] = sys_parameter
result['data']['optimizer_switch'] = optimizer_switch
if 'sql_plan' in option:
plan, optimizer_rewrite_sql = sql_tunning.sqlplan()
result['data']['optimizer_rewrite_sql'] = optimizer_rewrite_sql
result['data']['plan'] = plan
if 'obj_stat' in option:
object_statistics_tableistructure, object_statistics_tableinfo, object_statistics_indexinfo = sql_tunning.object_statistics()
result['data']['object_statistics_tableistructure'] = object_statistics_tableistructure
result['data']['object_statistics_tableinfo'] = object_statistics_tableinfo
result['data']['object_statistics_indexinfo'] = object_statistics_indexinfo
if 'sql_profile' in option:
session_status = sql_tunning.exec_sql()
result['data']['session_status'] = session_status
# 关闭连接
sql_tunning.dao.close()
result['data']['sqltext'] = sqltext
return HttpResponse(json.dumps(result, cls=ExtendJSONEncoder, bigint_as_string=True),
content_type='application/json')
class SqlTuning(object):
def __init__(self, instance_name, db_name, sqltext):
self.dao = Dao(instance_name=instance_name, flag=True)
self.db_name = db_name
self.sqltext = sqltext
self.sql_variable = '''
select
lower(variable_name),
variable_value
from performance_schema.global_variables
where upper(variable_name) in ('%s')
order by variable_name;''' % ('\',\''.join(SQLTuning.SYS_PARM_FILTER))
self.sql_optimizer_switch = '''
select variable_value
from performance_schema.global_variables
where upper(variable_name) = 'OPTIMIZER_SWITCH';
'''
self.sql_table_info = '''
select
table_name,
engine,
row_format as format,
table_rows,
avg_row_length as avg_row,
round((data_length + index_length) / 1024 / 1024, 2) as total_mb,
round((data_length) / 1024 / 1024, 2) as data_mb,
round((index_length) / 1024 / 1024, 2) as index_mb
from information_schema.tables
where table_schema = '%s' and table_name = '%s'
'''
self.sql_table_index = '''
select
table_name,
index_name,
non_unique,
seq_in_index,
column_name,
collation,
cardinality,
nullable,
index_type
from information_schema.statistics
where table_schema = '%s' and table_name = '%s'
order by 1, 3;
'''
@staticmethod
def __is_subselect(parsed):
if not parsed.is_group:
return False
for item in parsed.tokens:
if item.ttype is DML and item.value.upper() == 'SELECT':
return True
return False
def __extract_from_part(self, parsed):
from_seen = False
for item in parsed.tokens:
# print item.ttype,item.value
if from_seen:
if self.__is_subselect(item):
for x in self.__extract_from_part(item):
yield x
elif item.ttype is Keyword:
raise StopIteration
else:
yield item
elif item.ttype is Keyword and item.value.upper() == 'FROM':
from_seen = True
@staticmethod
def __extract_table_identifiers(token_stream):
for item in token_stream:
if isinstance(item, IdentifierList):
for identifier in item.get_identifiers():
yield identifier.get_real_name()
elif isinstance(item, Identifier):
yield item.get_real_name()
# It's a bug to check for Keyword here, but in the example
# above some tables names are identified as keywords...
elif item.ttype is Keyword:
yield item.value
def __extract_tables(self, p_sqltext):
stream = self.__extract_from_part(sqlparse.parse(p_sqltext)[0])
return list(self.__extract_table_identifiers(stream))
def basic_information(self):
return self.dao.mysql_query(sql="select @@version")
def sys_parameter(self):
# 获取mysql版本信息
version = self.basic_information()['rows'][0][0]
server_version = tuple([numeric_part(n) for n in version.split('.')[:2]])
if server_version < (5, 7):
sql = self.sql_variable.replace('performance_schema', 'information_schema')
else:
sql = self.sql_variable
return self.dao.mysql_query(sql=sql)
def optimizer_switch(self):
# 获取mysql版本信息
version = self.basic_information()['rows'][0][0]
server_version = tuple([numeric_part(n) for n in version.split('.')[:2]])
if server_version < (5, 7):
sql = self.sql_optimizer_switch.replace('performance_schema', 'information_schema')
else:
sql = self.sql_optimizer_switch
return self.dao.mysql_query(sql=sql)
def sqlplan(self):
plan = self.dao.mysql_query(self.db_name, "explain extended " + self.sqltext)
optimizer_rewrite_sql = self.dao.mysql_query(sql="show warnings")
return plan, optimizer_rewrite_sql
# 获取关联表信息存在缺陷,只能获取到一张表
def object_statistics(self):
tableistructure = {'column_list': [], 'rows': []}
tableinfo = {'column_list': [], 'rows': []}
indexinfo = {'column_list': [], 'rows': []}
for index, table_name in enumerate(self.__extract_tables(self.sqltext)):
tableistructure = self.dao.mysql_query(db_name=self.db_name, sql="show create table {};".format(
table_name.replace('`', '').lower()))
tableinfo = self.dao.mysql_query(
sql=self.sql_table_info % (self.db_name, table_name.replace('`', '').lower()))
indexinfo = self.dao.mysql_query(
sql=self.sql_table_index % (self.db_name, table_name.replace('`', '').lower()))
return tableistructure, tableinfo, indexinfo
def exec_sql(self):
result = {"EXECUTE_TIME": 0,
"BEFORE_STATUS": {'column_list': [], 'rows': []},
"AFTER_STATUS": {'column_list': [], 'rows': []},
"SESSION_STATUS(DIFFERENT)": {'column_list': ['status_name', 'before', 'after', 'diff'], 'rows': []},
"PROFILING_DETAIL": {'column_list': [], 'rows': []},
"PROFILING_SUMMARY": {'column_list': [], 'rows': []}
}
sql_profiling = "select concat(upper(left(variable_name,1)),substring(lower(variable_name),2,(length(variable_name)-1))) var_name,variable_value var_value from performance_schema.session_status order by 1"
# 获取mysql版本信息
version = self.basic_information()['rows'][0][0]
server_version = tuple([numeric_part(n) for n in version.split('.')[:2]])
if server_version < (5, 7):
sql = sql_profiling.replace('performance_schema', 'information_schema')
else:
sql = sql_profiling
self.dao.mysql_query(sql="set profiling=1")
records = self.dao.mysql_query(sql="select ifnull(max(query_id),0) from INFORMATION_SCHEMA.PROFILING")
query_id = records['rows'][0][0] + 3 # skip next sql
# 获取执行前信息
result['BEFORE_STATUS'] = self.dao.mysql_query(sql=sql)
# 执行查询语句,统计执行时间
t_start = time.time()
self.dao.mysql_query(sql=self.sqltext)
t_end = time.time()
cost_time = "%5s" % "{:.4f}".format(t_end - t_start)
result['EXECUTE_TIME'] = cost_time
# 获取执行后信息
result['AFTER_STATUS'] = self.dao.mysql_query(sql=sql)
# 获取PROFILING_DETAIL信息
result['PROFILING_DETAIL'] = self.dao.mysql_query(
sql="select STATE,DURATION,CPU_USER,CPU_SYSTEM,BLOCK_OPS_IN,BLOCK_OPS_OUT ,MESSAGES_SENT ,MESSAGES_RECEIVED ,PAGE_FAULTS_MAJOR ,PAGE_FAULTS_MINOR ,SWAPS from INFORMATION_SCHEMA.PROFILING where query_id=" + str(
query_id) + " order by seq")
result['PROFILING_SUMMARY'] = self.dao.mysql_query(
sql="SELECT STATE,SUM(DURATION) AS Total_R,ROUND(100*SUM(DURATION)/(SELECT SUM(DURATION) FROM INFORMATION_SCHEMA.PROFILING WHERE QUERY_ID=" + str(
query_id) + "),2) AS Pct_R,COUNT(*) AS Calls,SUM(DURATION)/COUNT(*) AS R_Call FROM INFORMATION_SCHEMA.PROFILING WHERE QUERY_ID=" + str(
query_id) + " GROUP BY STATE ORDER BY Total_R DESC")
# 处理执行前后对比信息
before_status_rows = [list(item) for item in result['BEFORE_STATUS']['rows']]
after_status_rows = [list(item) for item in result['AFTER_STATUS']['rows']]
for index, item in enumerate(before_status_rows):
if before_status_rows[index][1] != after_status_rows[index][1]:
before_status_rows[index].append(after_status_rows[index][1])
before_status_rows[index].append(
str(float(after_status_rows[index][1]) - float(before_status_rows[index][1])))
diff_rows = [item for item in before_status_rows if len(item) == 4]
result['SESSION_STATUS(DIFFERENT)']['rows'] = diff_rows
return result
| 44.050633 | 222 | 0.633908 | 8,345 | 0.786967 | 1,091 | 0.102886 | 2,615 | 0.246605 | 0 | 0 | 3,519 | 0.331856 |
204f616867cf52fd8d4b52f2ac16b848490278a9 | 902 | py | Python | LeetCode/Array and Strings/20. Valid Parentheses/solution.py | Ceruleanacg/Crack-Interview | 994dc0eee2f576fc543c90b82398dc8d957cdf09 | [
"MIT"
] | 17 | 2018-09-04T15:51:30.000Z | 2021-06-04T08:47:07.000Z | LeetCode/Array and Strings/20. Valid Parentheses/solution.py | Ceruleanacg/Crack-Interview | 994dc0eee2f576fc543c90b82398dc8d957cdf09 | [
"MIT"
] | null | null | null | LeetCode/Array and Strings/20. Valid Parentheses/solution.py | Ceruleanacg/Crack-Interview | 994dc0eee2f576fc543c90b82398dc8d957cdf09 | [
"MIT"
] | 6 | 2018-11-03T09:36:25.000Z | 2020-05-27T17:51:08.000Z | class Solution:
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
if not s:
return True
stack = []
for char in s:
if char == '{' or char == '[' or char == '(':
stack.append(char)
else:
try:
c = stack.pop()
if char == '}':
if c != '{':
return False
elif char == ']':
if c != '[':
return False
elif char == ')':
if c != '(':
return False
except IndexError:
return False
if len(stack) > 0:
return False
else:
return True
print(Solution().isValid('()')) | 23.736842 | 57 | 0.301552 | 866 | 0.960089 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.097561 |
204fa04e0bd4b4812ebc2b27bc0f4a54565743d0 | 2,230 | py | Python | problems/g1_single/Alpha.py | cprudhom/pycsp3 | 980927188f4262c9ea48a6534795712f09d731d6 | [
"MIT"
] | 28 | 2019-12-14T09:25:52.000Z | 2022-03-24T08:15:13.000Z | problems/g1_single/Alpha.py | cprudhom/pycsp3 | 980927188f4262c9ea48a6534795712f09d731d6 | [
"MIT"
] | 7 | 2020-04-15T11:02:07.000Z | 2022-01-20T12:48:54.000Z | problems/g1_single/Alpha.py | cprudhom/pycsp3 | 980927188f4262c9ea48a6534795712f09d731d6 | [
"MIT"
] | 3 | 2020-04-15T08:23:45.000Z | 2021-12-07T14:02:28.000Z | """
Well-known crypto-arithmetic puzzle of unknown origin (e.g., a model is present in Gecode)
Examples of Execution:
python3 Alpha.py
python3 Alpha.py -variant=var
"""
from pycsp3 import *
if not variant():
def of(word):
return [x[i] for i in alphabet_positions(word)]
# x[i] is the value for the ith letter of the alphabet
x = VarArray(size=26, dom=range(1, 27))
satisfy(
# all letters must be different
AllDifferent(x),
# respecting clues
[Sum(of("ballet")) == 45,
Sum(of("cello")) == 43,
Sum(of("concert")) == 74,
Sum(of("flute")) == 30,
Sum(of("fugue")) == 50,
Sum(of("glee")) == 66,
Sum(of("jazz")) == 58,
Sum(of("lyre")) == 47,
Sum(of("oboe")) == 53,
Sum(of("opera")) == 65,
Sum(of("polka")) == 59,
Sum(of("quartet")) == 50,
Sum(of("saxophone")) == 134,
Sum(of("scale")) == 51,
Sum(of("solo")) == 37,
Sum(of("song")) == 61,
Sum(of("soprano")) == 82,
Sum(of("theme")) == 72,
Sum(of("violin")) == 100,
Sum(of("waltz")) == 34]
)
elif variant("var"):
# letters[i] is the value for the ith letter of the alphabet
a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z = letters = VarArray(size=26, dom=range(1, 27))
satisfy(
# all letters must be different
AllDifferent(letters),
# respecting clues
[Sum(b, a, l, l, e, t) == 45,
Sum(c, e, l, l, o) == 43,
Sum(c, o, n, c, e, r, t) == 74,
Sum(f, l, u, t, e) == 30,
Sum(f, u, g, u, e) == 50,
Sum(g, l, e, e) == 66,
Sum(j, a, z, z) == 58,
Sum(l, y, r, e) == 47,
Sum(o, b, o, e) == 53,
Sum(o, p, e, r, a) == 65,
Sum(p, o, l, k, a) == 59,
Sum(q, u, a, r, t, e, t) == 50,
Sum(s, a, x, o, p, h, o, n, e) == 134,
Sum(s, c, a, l, e) == 51,
Sum(s, o, l, o) == 37,
Sum(s, o, n, g) == 61,
Sum(s, o, p, r, a, n, o) == 82,
Sum(t, h, e, m, e) == 72,
Sum(v, i, o, l, i, n) == 100,
Sum(w, a, l, t, z) == 34]
)
| 29.342105 | 128 | 0.433632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 536 | 0.240359 |
204fe53dee63a8492d4a3b958826dede8b213446 | 3,030 | py | Python | 2020/day_16/day_16.py | viddrobnic/adventofcode | 8f06f4ad3ed6744d20d222b050a15b8ff0ff9c82 | [
"MIT"
] | null | null | null | 2020/day_16/day_16.py | viddrobnic/adventofcode | 8f06f4ad3ed6744d20d222b050a15b8ff0ff9c82 | [
"MIT"
] | null | null | null | 2020/day_16/day_16.py | viddrobnic/adventofcode | 8f06f4ad3ed6744d20d222b050a15b8ff0ff9c82 | [
"MIT"
] | 1 | 2020-12-01T16:49:12.000Z | 2020-12-01T16:49:12.000Z | def read_data():
rules = dict()
your_ticket = None
nearby_tickets = []
state = 0
with open('in') as f:
for line in map(lambda x: x.strip(), f.readlines()):
if line == '':
state += 1
continue
if line == 'your ticket:':
continue
if line == 'nearby tickets:':
continue
if state == 0:
parts = line.split(':')
ranges = parts[1].split('or')
rules[parts[0]] = []
for r in ranges:
nums = r.split('-')
rules[parts[0]].append((int(nums[0]), int(nums[1])))
if state == 1:
your_ticket = list(map(int, line.split(',')))
if state == 2:
nearby_tickets.append(list(map(int, line.split(','))))
return rules, your_ticket, nearby_tickets
def part_one(rules, tickets):
error_rate = 0
invalid_tickets = []
for i, ticket in enumerate(tickets):
for val in ticket:
valid_val = False
for rule in rules.keys():
valid = False
for m, M in rules[rule]:
if val >= m and val <= M:
valid = True
break
if valid:
valid_val = True
break
if not valid_val:
error_rate += val
invalid_tickets.append(i)
return invalid_tickets, error_rate
def part_two(rules, your_ticket, tickets):
possible_values = [set(rules.keys()) for i in range(len(rules.keys()))]
for ticket in tickets:
for i, val in enumerate(ticket):
for rule in rules.keys():
valid = False
for m, M in rules[rule]:
if val >= m and val <= M:
valid = True
break
if not valid and rule in possible_values[i]:
possible_values[i].remove(rule)
while True:
to_filter = False
for v in map(len, possible_values):
if v > 1:
to_filter = True
if not to_filter:
break
for i in range(len(possible_values)):
if len(possible_values[i]) == 1:
for j in range(len(possible_values)):
if i != j:
possible_values[j] -= possible_values[i]
res = 1
for i in range(len(possible_values)):
if 'departure' in list(possible_values[i])[0]:
res *= your_ticket[i]
return res
def main():
rules, your_ticket, nearby_tickets = read_data()
invalid_tickets, error_rate = part_one(rules, nearby_tickets)
p_two = part_two(rules, your_ticket, [t for i, t in enumerate(nearby_tickets) if i not in invalid_tickets])
print(f'Part One: {error_rate}')
print(f'Part Two: {p_two}')
if __name__ == '__main__':
main()
| 28.584906 | 111 | 0.488119 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.039274 |
2050374ed4f6e4f4a494cbe77f255d8b721e42b6 | 3,717 | py | Python | confdgnmi/tests/test_client_server_api.py | micnovak/ConfD-Demos | 479499e7c5339ae77b611e17196e7516d1f1a1ce | [
"Apache-2.0"
] | 11 | 2019-12-07T20:15:57.000Z | 2022-02-04T18:12:52.000Z | confdgnmi/tests/test_client_server_api.py | micnovak/ConfD-Demos | 479499e7c5339ae77b611e17196e7516d1f1a1ce | [
"Apache-2.0"
] | 2 | 2020-03-01T11:04:16.000Z | 2021-02-03T14:17:23.000Z | confdgnmi/tests/test_client_server_api.py | micnovak/ConfD-Demos | 479499e7c5339ae77b611e17196e7516d1f1a1ce | [
"Apache-2.0"
] | 6 | 2019-10-18T15:26:03.000Z | 2021-01-13T10:28:30.000Z | import socket
import threading
from time import sleep
import pytest
import gnmi_pb2
from client_server_test_base import GrpcBase
from confd_gnmi_api_adapter import GnmiConfDApiServerAdapter
from confd_gnmi_common import make_gnmi_path, make_xpath_path
from confd_gnmi_server import AdapterType
from route_status import RouteData, RouteProvider, ChangeOp
from utils.utils import log
_confd_DEBUG = 1
@pytest.mark.grpc
@pytest.mark.confd
@pytest.mark.usefixtures("fix_method")
class TestGrpcApi(GrpcBase):
def set_adapter_type(self):
self.adapter_type = AdapterType.API
@staticmethod
def _route_change_thread(path_value, route_data, sleep_val=2):
sleep(sleep_val)
log.info("==> path_value=%s route_data=%s sleep_val=%s", path_value,
route_data, sleep_val)
for pv_chunk in path_value:
log.debug("pv_chunk=%s", pv_chunk)
msgs = []
for pv in pv_chunk:
op = ChangeOp.MODIFIED.value
xpath = make_xpath_path(pv[0])
val_str = pv[1]
msg = "{}\n{}\n{}".format(op, xpath, val_str)
msgs.append(msg)
# TODO update route_data
# TODO reuse with route status
# TODO port number
log.debug("msgs=%s", msg)
with socket.socket() as s:
try:
s.connect(("localhost",
GnmiConfDApiServerAdapter.external_port))
log.debug("Connected to change server")
msg = ""
for m in msgs:
# log.debug("m=%s", m)
msg += m + '\n'
# remove last \n
msg = msg[:-1]
log.debug("msg=%s", msg)
s.sendall(msg.encode("utf-8"))
except Exception:
log.debug("Cannot connect to change server!")
log.info("<==")
@pytest.mark.long
@pytest.mark.confd
def test_subscribe_stream_on_change_api_state(self, request):
log.info("testing test_subscribe_stream_on_change_api_state")
GnmiConfDApiServerAdapter.monitor_external_changes = True
changes_list = [
("/route-status[route=rt5]/leaf1", "1010"),
("/route-status[route=rt6]/leaf1", "1020"),
"send",
("/route-status[route=rt6]/leaf1", "1030"),
"send",
]
path_value = [[]] # empty element means no check
path_value.extend(self._changes_list_to_pv(changes_list))
prefix_str = ""
prefix = make_gnmi_path(prefix_str)
paths = [make_gnmi_path("route-status")]
kwargs = {"assert_fun": GrpcBase.assert_in_updates}
kwargs["prefix"] = prefix
kwargs["paths"] = paths
kwargs["path_value"] = path_value
kwargs["subscription_mode"] = gnmi_pb2.SubscriptionList.STREAM
kwargs["read_count"] = len(path_value)
kwargs["assert_fun"] = GrpcBase.assert_in_updates
route_data = RouteData(num=10, random=False)
assert len(route_data.routes)
RouteProvider.init_dp(route_data, confd_debug_level=_confd_DEBUG)
confd_thread = threading.Thread(target=RouteProvider.confd_loop)
change_thread = threading.Thread(
target=self._route_change_thread,
args=(path_value[1:], route_data,))
confd_thread.start()
change_thread.start()
self.verify_sub_sub_response_updates(**kwargs)
sleep(1)
change_thread.join()
RouteProvider.stop_confd_loop()
confd_thread.join()
RouteProvider.close_dp()
| 34.738318 | 76 | 0.59618 | 3,236 | 0.870595 | 0 | 0 | 3,312 | 0.891041 | 0 | 0 | 606 | 0.163035 |
2050ad7391c8cda60dda86f9cfecd9f21d5079fa | 3,486 | py | Python | Final_Version/FingerFinder.py | jaronoff97/mirrorpi | cf1a6d103648164f2ae154ca0bdb795a70944df9 | [
"MIT"
] | null | null | null | Final_Version/FingerFinder.py | jaronoff97/mirrorpi | cf1a6d103648164f2ae154ca0bdb795a70944df9 | [
"MIT"
] | null | null | null | Final_Version/FingerFinder.py | jaronoff97/mirrorpi | cf1a6d103648164f2ae154ca0bdb795a70944df9 | [
"MIT"
] | null | null | null | import numpy as np
import cv2
import math
class FingerFinder(object):
"""docstring for FingerFinder"""
def __init__(self, background_reduction=False):
super(FingerFinder, self).__init__()
self.bg_reduction = background_reduction
self.kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))
self.fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
def process(self, frame):
res = cv2.flip(frame, 1)
if self.bg_reduction:
res = self.apply_mask(res)
fingers1 = self.find_hand(res, 50, 50, 300, 450, 1)
fingers2 = self.find_hand(res, 50, 590, 900, 450, 2)
self.draw_bounding(res, 50, 50, 300, 450, fingers1)
self.draw_bounding(res, 590, 50, 900, 450, fingers2)
def find_border(self, contours):
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt = contours[max_index]
return cnt
def apply_mask(self, frame):
fgmask = self.fgbg.apply(frame)
fgmask = cv2.dilate(fgmask, self.kernel, iterations=10)
# fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, self.kernel)
return cv2.bitwise_and(frame, frame, mask=fgmask)
def make_thresh(self, res):
grey = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
_, thresh1 = cv2.threshold(grey, 127, 255,
cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)
_, contours, hierarchy = cv2.findContours(thresh1.copy(),
cv2.RETR_TREE,
cv2.CHAIN_APPROX_NONE)
return contours
def count_fingers(self, cnt, img):
hull = cv2.convexHull(cnt, returnPoints=False)
defects = cv2.convexityDefects(cnt, hull)
count_defects = 0
for i in range(defects.shape[0]):
s, e, f, d = defects[i, 0]
start = tuple(cnt[s][0])
end = tuple(cnt[e][0])
far = tuple(cnt[f][0])
a = math.sqrt((end[0] - start[0])**2 + (end[1] - start[1])**2)
b = math.sqrt((far[0] - start[0])**2 + (far[1] - start[1])**2)
c = math.sqrt((end[0] - far[0])**2 + (end[1] - far[1])**2)
angle = math.acos((b**2 + c**2 - a**2) / (2 * b * c)) * 57
if angle <= 90:
count_defects += 1
cv2.circle(img, far, 1, [0, 0, 255], -1)
# dist = cv2.pointPolygonTest(cnt,far,True)
cv2.line(img, start, end, [0, 255, 0], 2)
# cv2.circle(crop_img,far,5,[0,0,255],-1)
return count_defects
def find_hand(self, res, xpos, ypos, width, height, handnum):
crop_img = res[xpos:height, ypos:width]
cv2.imshow('hand {0}'.format(handnum), crop_img)
contours = self.make_thresh(crop_img)
cnt = self.find_border(contours)
drawing = np.zeros(crop_img.shape, np.uint8)
cv2.drawContours(drawing, [cnt], 0, (0, 255, 0), 0)
#
#
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(crop_img, (x, y), (x + w, y + h), (0, 0, 255), 0)
fingers = self.count_fingers(cnt, crop_img)
return fingers
def draw_bounding(self, res, xpos, ypos, width, height, fingers):
cv2.rectangle(res, (width, height), (xpos, ypos), (0, 255, 0), 0)
cv2.putText(res, "{0} Fingers".format(fingers), (xpos, ypos),
cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 255))
| 41.5 | 75 | 0.55938 | 3,441 | 0.987091 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.058807 |
2052abf9b427d7f9d0208d82b5b74f383c928ce5 | 455 | py | Python | inventory/admin.py | Riphiphip/website | dc5bf64f24d5cf78661686af0281705f4d1d2576 | [
"MIT"
] | null | null | null | inventory/admin.py | Riphiphip/website | dc5bf64f24d5cf78661686af0281705f4d1d2576 | [
"MIT"
] | null | null | null | inventory/admin.py | Riphiphip/website | dc5bf64f24d5cf78661686af0281705f4d1d2576 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Item
@admin.register(Item)
class ItemAdmin(admin.ModelAdmin):
fieldsets = [
('Item', {
'fields': [
'name',
'stock',
'description',
'thumbnail'
]
}),
('Meta', {
'fields': [
'views',
]
}),
]
search_fields = [
'name',
]
| 18.958333 | 34 | 0.382418 | 373 | 0.81978 | 0 | 0 | 395 | 0.868132 | 0 | 0 | 78 | 0.171429 |
2052dd57ee1a745e8bb3313bc85494e39874dcfb | 8,950 | py | Python | HubblePi/Toolbox.py | scriptorron/hubblepi | 402af74b537a40d0764b11d105aa8d3b0dd242f3 | [
"MIT"
] | null | null | null | HubblePi/Toolbox.py | scriptorron/hubblepi | 402af74b537a40d0764b11d105aa8d3b0dd242f3 | [
"MIT"
] | null | null | null | HubblePi/Toolbox.py | scriptorron/hubblepi | 402af74b537a40d0764b11d105aa8d3b0dd242f3 | [
"MIT"
] | null | null | null | import numpy as np
import json
import colour_demosaicing
def LoadRaw(FN):
"""
DEPRECATED!
load and unpack RAW image
:params FN: file name
"""
data = np.load(FN)
shape = data.shape
if shape == (1944, 3240):
CameraType = 1
elif shape == (2464, 4100):
CameraType = 2
elif shape == (3040, 6084):
CameraType = 3
else:
raise ValueError("unknown raw data")
if CameraType in [1, 2]:
# Horizontally, each row consists of 10-bit values. Every four bytes are
# the high 8-bits of four values, and the 5th byte contains the packed low
# 2-bits of the preceding four values. In other words, the bits of the
# values A, B, C, D and arranged like so:
#
# byte 1 byte 2 byte 3 byte 4 byte 5
# AAAAAAAA BBBBBBBB CCCCCCCC DDDDDDDD AABBCCDD
#
# Here, we convert our data into a 16-bit array, shift all values left by
# 2-bits and unpack the low-order bits from every 5th byte in each row,
# then remove the columns containing the packed bits
data = data.astype(np.uint16) << 2
for byte in range(4):
data[:, byte::5] |= ((data[:, 4::5] >> ((4 - byte) * 2)) & 0b11)
data = np.delete(data, np.s_[4::5], 1)
else:
# HQ camera
data = data.astype(np.uint16)
UD = np.zeros((shape[0], shape[1] // 3 * 2), dtype=np.uint16)
UD[:, ::2] = (data[:, ::3] << 4) + (data[:, 2::3] & 0x0F)
UD[:, 1::2] = (data[:, 1::3] << 4) + ((data[:, 2::3] >> 4) & 0x0F)
data = UD
return data
def LoadInfo(FN):
with open(FN, "r") as fh:
infos = json.load(fh)
return infos
def SplitBayerChannels(data):
"""
split BGGR raw image in single channels
:params data: raw data
"""
if data.shape == (1944, 2592):
# version 1 camera has data of shape (1944, 2592) and GBRG pattern
r = data[1::2, 0::2] # Red
g0 = data[0::2, 0::2] # Green
g1 = data[1::2, 1::2] # Green
b = data[0::2, 1::2] # Blue
elif data.shape == (3040, 4056):
# HQ camera has data of shape (3040, 4056) and BGGR pattern
r = data[1::2, 1::2] # Red
g0 = data[0::2, 1::2] # Green
g1 = data[1::2, 0::2] # Green
b = data[0::2, 0::2] # Blue
else:
raise ValueError("unknown data shape")
return {"r": r, "g0": g0, "g1": g1, "b" :b}
def RoundToSignificantDigits(f, n_d=3):
if f > 0:
factor = 10 ** -(np.floor(np.log10(f)) - (n_d-1))
return np.round(f * factor) / factor
else:
return f
def PostProcess(
Raw, Bits=10,
Black=0.0, Flat=1.0,
Pattern=u"GBRG", Debayer="bilinear",
RGB_gains=(1.5,1.0,1.5),
OutOffset = 0.0,
OutGain = 1.0
):
"""
process RAW image
Version 1 camera has GBRG pattern, HQ camera is BGGR
"""
# Black and Flat correction
Img = (Raw - Black) * (np.mean(Flat) / Flat)
# normalize scaling
Img = Img / float(2 ** Bits)
# debayer
if Debayer=="bilinear":
Img = colour_demosaicing.demosaicing_CFA_Bayer_bilinear(Img, pattern=Pattern)
elif Debayer=="Malvar2004":
Img = colour_demosaicing.demosaicing_CFA_Bayer_Malvar2004(Img, pattern=Pattern)
elif Debayer=="Menon2007":
Img = colour_demosaicing.demosaicing_CFA_Bayer_Menon2007(Img, pattern=Pattern)
elif Debayer=="DDFAPD":
Img = colour_demosaicing.demosaicing_CFA_Bayer_DDFAPD(Img, pattern=Pattern)
else:
# from picamera example
rgb = np.zeros(Img.shape + (3,), dtype=Img.dtype)
rgb[1::2, 0::2, 0] = Img[1::2, 0::2] # Red
rgb[0::2, 0::2, 1] = Img[0::2, 0::2] # Green
rgb[1::2, 1::2, 1] = Img[1::2, 1::2] # Green
rgb[0::2, 1::2, 2] = Img[0::2, 1::2] # Blue
# Below we present a fairly naive de-mosaic method that simply
# calculates the weighted average of a pixel based on the pixels
# surrounding it. The weighting is provided by a byte representation of
# the Bayer filter which we construct first:
bayer = np.zeros(rgb.shape, dtype=np.uint8)
bayer[1::2, 0::2, 0] = 1 # Red
bayer[0::2, 0::2, 1] = 1 # Green
bayer[1::2, 1::2, 1] = 1 # Green
bayer[0::2, 1::2, 2] = 1 # Blue
# Allocate an array to hold our output with the same shape as the input
# data. After this we define the size of window that will be used to
# calculate each weighted average (3x3). Then we pad out the rgb and
# bayer arrays, adding blank pixels at their edges to compensate for the
# size of the window when calculating averages for edge pixels.
output = np.empty(rgb.shape, dtype=rgb.dtype)
window = (3, 3)
borders = (window[0] - 1, window[1] - 1)
border = (borders[0] // 2, borders[1] // 2)
rgb = np.pad(rgb, [
(border[0], border[0]),
(border[1], border[1]),
(0, 0),
], 'constant')
bayer = np.pad(bayer, [
(border[0], border[0]),
(border[1], border[1]),
(0, 0),
], 'constant')
# For each plane in the RGB data, we use a nifty numpy trick
# (as_strided) to construct a view over the plane of 3x3 matrices. We do
# the same for the bayer array, then use Einstein summation on each
# (np.sum is simpler, but copies the data so it's slower), and divide
# the results to get our weighted average:
for plane in range(3):
p = rgb[..., plane]
b = bayer[..., plane]
pview = np.lib.stride_tricks.as_strided(p, shape=(
p.shape[0] - borders[0],
p.shape[1] - borders[1]) + window, strides=p.strides * 2)
bview = np.lib.stride_tricks.as_strided(b, shape=(
b.shape[0] - borders[0],
b.shape[1] - borders[1]) + window, strides=b.strides * 2)
psum = np.einsum('ijkl->ij', pview)
bsum = np.einsum('ijkl->ij', bview)
output[..., plane] = psum // bsum
Img = output
# white balance
Img[:,:,0] *= RGB_gains[0]
Img[:,:,1] *= RGB_gains[1]
Img[:,:,2] *= RGB_gains[2]
# output rescaling
Img = (Img - OutOffset) * OutGain
# finish
return Img
def ExtractRawFromJpgData(JpgData, CameraType = 3):
frame_len = len(JpgData)
offset = {
1: 6404096,
2: 10270208,
3: 18711040,
}[CameraType]
preview_len = frame_len - offset
JpgData = JpgData[preview_len:]
assert JpgData[:4] == bytes('BRCM', encoding="latin2")
JpgData = JpgData[32768:]
RawData = np.frombuffer(JpgData, dtype=np.uint8)
reshape, crop = {
1: ((1952, 3264), (1944, 3240)),
2: ((2480, 4128), (2464, 4100)),
3: ((3040 + 16, 6084 + 28), (3040, 6084)),
}[CameraType]
data = RawData.reshape(reshape)[:crop[0], :crop[1]]
#
if CameraType in [1, 2]:
# Horizontally, each row consists of 10-bit values. Every four bytes are
# the high 8-bits of four values, and the 5th byte contains the packed low
# 2-bits of the preceding four values. In other words, the bits of the
# values A, B, C, D and arranged like so:
#
# byte 1 byte 2 byte 3 byte 4 byte 5
# AAAAAAAA BBBBBBBB CCCCCCCC DDDDDDDD AABBCCDD
#
# Here, we convert our data into a 16-bit array, shift all values left by
# 2-bits and unpack the low-order bits from every 5th byte in each row,
# then remove the columns containing the packed bits
data = data.astype(np.uint16) << 2
for byte in range(4):
data[:, byte::5] |= ((data[:, 4::5] >> ((4 - byte) * 2)) & 0b11)
data = np.delete(data, np.s_[4::5], 1)
else:
# HQ camera
data = data.astype(np.uint16)
shape = data.shape
UD = np.zeros((shape[0], shape[1] // 3 * 2), dtype=np.uint16)
if True:
UD[:, ::2] = (data[:, ::3] << 4) + (data[:, 2::3] & 0x0F)
UD[:, 1::2] = (data[:, 1::3] << 4) + ((data[:, 2::3] >> 4) & 0x0F)
data = UD
elif False:
UD[:, 1::2] = (data[:, 1::3] << 4) + (data[:, 2::3] & 0x0F)
UD[:, ::2] = (data[:, ::3] << 4) + ((data[:, 2::3] >> 4) & 0x0F)
data = UD
else:
data[:, 0::3] = data[:, 0::3] << 4
data[:, 0::3] |= ((data[:, 1::3] >> 4) & 0x0f)
data[:, 1::3] = data[:, 1::3] << 4
data[:, 1::3] |= (data[:, 1::3] & 0x0f)
data = np.delete(data, np.s_[2::3], 1)
return data
def LoadRawJpg(FN, CameraType=3):
"""
load and unpack RAW JPG image
:params FN: file name
"""
with open(FN, "rb") as fh:
JpgData = fh.read()
Raw = ExtractRawFromJpgData(JpgData=JpgData, CameraType=CameraType)
return Raw
| 37.291667 | 87 | 0.540894 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,852 | 0.318659 |
2053346c5cba5096c9f7d2677dff0f68577970a3 | 4,487 | py | Python | src/backend/autoencoder_evaluate.py | framtale/image-retrieval | 1e40def038e908545e3ec1a2dab75132a49bb005 | [
"MIT"
] | null | null | null | src/backend/autoencoder_evaluate.py | framtale/image-retrieval | 1e40def038e908545e3ec1a2dab75132a49bb005 | [
"MIT"
] | 1 | 2021-06-07T13:29:20.000Z | 2021-06-26T13:56:41.000Z | src/backend/autoencoder_evaluate.py | framtale/image-retrieval | 1e40def038e908545e3ec1a2dab75132a49bb005 | [
"MIT"
] | null | null | null | from numpy.core.defchararray import array
from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
from tensorflow.keras.datasets import mnist
from PIL import Image
from tqdm import tqdm
import matplotlib.pyplot as plt
import statistics
import numpy as np
import pickle
import cv2
import sys
import os
# We use the euclidean distance as our default metric
def euclidean_distance(a, b):
return np.linalg.norm(a - b)
# We use cosine similarity for comparison
def cosine_similarity(a, b):
return np.dot(a, b)/(np.linalg.norm(a)*np.linalg.norm(b))
def perform_search_euclidean(query_image, index, maxResults=10):
results = []
for i in range(0, len(index["features"])):
# Compute the euclidean distance between our query features
# and the features for the current image in our index, then
# update our results list with a 2-tuple consisting of the
# computed distance and the index of the image
distance = euclidean_distance(query_image, index["features"][i])
results.append((distance, i))
# Sort the results and grab the top ones
results = sorted(results)[:maxResults]
return results
def perform_search_cosine(query_image, index, maxResults=10):
results = []
for i in range(0, len(index["features"])):
# Compute the cosine similarity between our query features
# and the features for the current image in our index, then
# update our results list with a 2-tuple consisting of the
# computed distance and the index of the image
distance = cosine_similarity(query_image, index["features"][i])
results.append((distance, i))
# Sort the results and grab the top ones
results = sorted(results)[:maxResults]
return results
# Again we use the MNIST dataset as default
print("[INFO] loading MNIST dataset...")
((trainX, trainY), (testX, testY)) = mnist.load_data()
# Add a channel dimension to every image in the dataset, then scale
# the pixel intensities to the range [0, 1]
trainX = np.expand_dims(trainX, axis=-1)
testX = np.expand_dims(testX, axis=-1)
trainX = trainX.astype("float32") / 255.0
testX = testX.astype("float32") / 255.0
# We just take the first 500 entries
testX = testX[:500]
# Load the autoencoder model and index from disk
print("[INFO] loading autoencoder and index...")
autoencoder = load_model("autoencoder.h5")
index = pickle.loads(open("feature_vectors.pickle", "rb").read())
# Create the encoder model which consists of *just* the encoder
# portion of the autoencoder
encoder = Model(inputs=autoencoder.input,
outputs=autoencoder.get_layer("encoded").output)
# Compute the feature vector of our input image
features = encoder.predict(testX)
if not os.path.isfile("euclidean_error.pickle"):
euclidean_error = []
print("[INFO] Performing euclidean evaluation...")
for i in tqdm(range(testX.shape[0])):
queryfeatures = features[i]
results = perform_search_euclidean(queryfeatures, index, maxResults=1000)
label = testY[i]
error_rate = 0
for result in results:
if trainY[result[1]] != label:
error_rate += 1
euclidean_error.append(error_rate)
with open("euclidean_error.pickle", "wb") as epickle:
pickle.dump(euclidean_error, epickle)
if not os.path.isfile("cosine_error.pickle"):
cosine_error = []
print("[INFO] Performing cosine evaluation...")
for i in tqdm(range(testX.shape[0])):
queryfeatures = features[i]
results = perform_search_cosine(queryfeatures, index, maxResults=1000)
label = testY[i]
error_rate = 0
for result in results:
if trainY[result[1]] != label:
error_rate += 1
cosine_error.append(error_rate)
with open("cosine_error.pickle", "wb") as cpickle:
pickle.dump(cosine_error, cpickle)
with open("euclidean_error.pickle", "rb") as epickle:
euclidean_error = pickle.load(epickle)
with open("cosine_error.pickle", "rb") as cpickle:
cosine_error = pickle.load(cpickle)
euclidean_average = sum(euclidean_error)/len(euclidean_error)
euclidean_median = statistics.median(euclidean_error)
print("euclidean average is {}".format(euclidean_average))
print("euclidean median is {}".format(euclidean_median))
cosine_average = sum(cosine_error)/len(cosine_error)
cosine_median = statistics.median(cosine_error)
print("cosine average is {}".format(cosine_average))
print("cosine median is {}".format(cosine_median))
plt.plot(range(len(testX)), euclidean_error, ".")
plt.savefig("euclidean_error.png")
plt.plot(range(len(testX)), cosine_error, ".")
plt.savefig("cosine_error.png")
print(sum(cosine_error)/len(cosine_error))
| 34.515385 | 75 | 0.751504 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,548 | 0.344997 |
2053cab2dbdb69606035ee74c6e1b50faa72a65b | 14,806 | py | Python | a4plot/python/rooplot/stacks/stacks.py | a4/a4 | e1de89260cb3894908f1d01dfacea125abc79da9 | [
"BSL-1.0"
] | 4 | 2015-04-07T20:25:16.000Z | 2019-04-27T15:04:02.000Z | a4plot/python/rooplot/stacks/stacks.py | a4/a4 | e1de89260cb3894908f1d01dfacea125abc79da9 | [
"BSL-1.0"
] | null | null | null | a4plot/python/rooplot/stacks/stacks.py | a4/a4 | e1de89260cb3894908f1d01dfacea125abc79da9 | [
"BSL-1.0"
] | 1 | 2021-06-02T17:22:35.000Z | 2021-06-02T17:22:35.000Z | from ROOT import gROOT, gStyle, Double
from ROOT import TLegend, TLatex, TCanvas, THStack, TLine, TBox
from ROOT import kYellow, kBlack, kWhite, kRed, kWhite, kOrange
import os
import random
from colors import set_color_1D, set_color_2D, set_data_style, set_MCTotal_style, set_signal_style_1D
tsize = 0.06
tyoffset = 1.1 * 0.06 / tsize
txoffset = 2.5 * 0.06 / tsize
lmargin = 0.14
def get_legend(data, sum_mc, list_mc, signals):
#legend = TLegend(0.2,0.65,0.4,0.94)
llen = 1 + len(data) + len(list_mc) + len(signals)
#mtop, mright, width, hinc = 0.01, 0.01, 0.38, 0.05
#mtop, mright, width, hinc = 0.07, 0.25, 0.15, 0.01
if tsize == 0.06:
mtop, mright, width, hinc = 0.13, 0.07, 0.25, 0.6666*tsize
else:
mtop, mright, width, hinc = 0.13, 0.1, 0.3, 0.6666*tsize
x1, y1, x2, y2 = 1.0 - mright - width, 1.0 - mtop, 1.0 - mright, 1.0 - mtop - hinc*llen
print x1, y1, x2, y2
legend = TLegend(x1, y1, x2, y2)
legend.SetNColumns(2)
legend.SetColumnSeparation(0.05)
legend.SetBorderSize(0)
legend.SetTextFont(42)
legend.SetTextSize(tsize)
legend.SetFillColor(0)
legend.SetFillStyle(0)
legend.SetLineColor(0)
for d in data:
legend.AddEntry(d, os.path.split(d.GetTitle())[1][:-5] if d.GetTitle()[-5:]=='.root' else os.path.split(d.GetTitle())[1], "p")
if sum_mc:
legend.AddEntry(sum_mc,"MC (stat)","flp") # <== NB: omit this entry for 2D histogram
for h in list_mc: # sorted by initial XS
legend.AddEntry(h, os.path.split(h.GetTitle())[1][:-5] if h.GetTitle()[-5:]=='.root' else os.path.split(h.GetTitle())[1],"f")
for s in signals:
legend.AddEntry(s, os.path.split(s.GetTitle())[1][:-5] if s.GetTitle()[-5:]=='.root' else os.path.split(s.GetTitle())[1],"l")
return legend
#NB: [ATLAS Preliminary label for when plots are approved only:
def get_lumi_label(lumi="168 pb^{-1}",centermass="8", atlas=True, draft=True):
x, y = lmargin + 0.03, (0.75 if atlas else 0.77)
n = TLatex()
n.SetNDC()
n.SetTextFont(32)
n.SetTextColor(kBlack)
n.SetTextSize(tsize*1.25)
n.DrawLatex(x, y,"#intL dt = %s, #sqrt{s} = %s TeV" % (lumi,centermass))
#x, y = 0.21, 0.65
x, y = 0.18, 0.85
if not atlas:
return n, None
l = TLatex()
l.SetNDC()
l.SetTextFont(42)
l.SetTextColor(kBlack)
if draft:
l.DrawLatex(x,y,"#bf{#it{ATLAS work in progress}}")
else:
l.DrawLatex(x,y,"#bf{#it{ATLAS preliminary}}")
return n, l
def create_mc_sum(mc_list, existing_mc_sum=None):
if not mc_list:
return None, None
if existing_mc_sum:
mc_sum = existing_mc_sum
else:
mc_sum = mc_list[0].Clone("mc_sum")
mc_sum.SetDirectory(0)
for h in mc_list[1:]:
for b in xrange(1, h.GetXaxis().GetNbins()+1):
# If there is negative weight in one channel, it should not
# be subtracted from other channels
if not (0 < h.GetBinContent(b)):
h.SetBinContent(b, 0.0)
# Sometimes negative Errors occur - they play havoc with the
# Display of error bands...
if not (0 < h.GetBinError(b)):
h.SetBinError(b, 0.0)
mc_sum.Add(h)
mc_sum.SetMarkerSize(0)
mc_sum.SetLineColor(kRed)
mc_sum.SetFillColor(kOrange)
mc_sum.SetFillStyle(3144)
mc_sum_line = mc_sum.Clone("mc_sum_line")
mc_sum_line.SetDirectory(0)
mc_sum_line.SetFillStyle(0)
mc_sum_line.SetFillColor(kWhite)
#mc_sum.SetLineStyle(0)
mc_sum.SetTitle("SM (stat)")
return mc_sum_line, mc_sum
def create_cuts(cuts_left, cuts_right, ymin, ymax, w):
save = []
hashwidth = 0.01*w
for vl in cuts_left + cuts_right:
l = TLine(vl, ymin, vl, ymax)
l.SetLineColor(kRed)
l.Draw()
save.append(l)
#gStyle.SetHatchesSpacing(0.01)
#gStyle.SetHatchesLineWidth(2)
for vl in cuts_left:
b = TBox(vl, ymin, vl - hashwidth, ymax)
b.SetFillStyle(3345)
b.SetFillColor(kRed)
b.SetLineStyle(0)
b.Draw()
save.append(b)
for vl in cuts_right:
b = TBox(vl, ymin, vl + hashwidth, ymax)
b.SetFillStyle(3354)
b.SetFillColor(kRed)
b.SetLineStyle(0)
b.Draw()
save.append(b)
return save
#-----------------
#Axis labels:
#y-axis labels: Entries / x Units (x = bin width, Units = e.g. GeV)
#x-axis labels: Quantity [Unit] (Quantity = e.g. M_{eff}, Units = e.g. GeV)
#----------------
#Other:
#no plot titles - histogram->SetTitle("");
#to change the maximum number of digits displayed - e.g. TGaxis::SetMaxDigits(3);
#Drawing 2D plots
#- Draw("box") for first MC (dijets)
#- then Draw("boxsame") for subsequent MC (W+jets)
#- Draw("psame") for data
def set_styles(data, mcs, signals):
for d in data:
set_data_style(d)
for signal in signals:
set_signal_style_1D(signal)
for i, mc in enumerate(mcs):
set_color_1D(mc,mc.GetTitle(), i)
from ROOT import gPad, kOrange, kRed
saver = []
def stack_1D(name, data, list_mc, signals, lumi="X",centermass="8", rebin=1, sum_mc=None, rebin_to=None, range=None, compare=False, sigma=False, log=False, prelim=False, cuts_left=(), cuts_right=()):
data = [h.Clone() for h in data]
list_mc = [h.Clone() for h in list_mc]
signals = [h.Clone() for h in signals]
sum_mc = sum_mc.Clone() if sum_mc else sum_mc
all_histos = list_mc + signals + data
saver.extend(all_histos)
saver.append(sum_mc)
h = all_histos[0]
xaxis = h.GetXaxis()
b1, b2 = h.GetXaxis().GetFirst(), h.GetXaxis().GetLast()
if range:
x1, x2 = range
x2 -= 0.000001
range = (x1, x2)
b1, b2 = xaxis.FindBin(x1), xaxis.FindBin(x2)
if rebin_to:
nbins = xaxis.GetNbins()
if range:
nbins = b2 - b1 + 1
rebin = int(round(nbins*1.0/rebin_to))
if rebin < 1:
rebin = 1
if rebin != 1:
for histo in all_histos:
histo.Rebin(rebin)
if True: # squash overflow bins
e = Double()
for histo in all_histos + [sum_mc] if sum_mc else []:
c = histo.IntegralAndError(0, b1, e)
histo.SetBinContent(b1, c)
histo.SetBinError(b1, e)
c = histo.IntegralAndError(b2, histo.GetNbinsX() + 1, e)
histo.SetBinContent(b2, c)
histo.SetBinError(b2, e)
x1, x2 = h.GetXaxis().GetBinLowEdge(b1), h.GetXaxis().GetBinLowEdge(b2)
# set up pads
cpad = gPad.func()
wh, ww = cpad.GetWh(), cpad.GetWw()
pad_fraction = 0
global tsize, tyoffset, txoffset
if compare or sigma:
tsize = 0.06 # was 0.06
tyoffset = 1.1 * 0.06 / tsize
txoffset = 2.5 * 0.06 / tsize
pad_fraction = 0.3
cpad.Divide(1, 2, 0.01, 0.01)
cpad.cd(1).SetPad(0, pad_fraction, 1, 1.0)
#cpad.cd(1).SetBottomMargin(0.15)
cpad.cd(1).SetTopMargin(0.08)
cpad.cd(1).SetBottomMargin(0.0)
cpad.cd(1).SetLeftMargin(lmargin)
cpad.cd(1).SetFillStyle(4000)
#cpad.cd(1).SetGridx()
#cpad.cd(1).SetGridy()
if log:
cpad.cd(1).SetLogy()
cpad.cd(2).SetPad(0, 0.0, 1, pad_fraction+0.1)
cpad.cd(2).SetGridx()
cpad.cd(2).SetGridy()
cpad.cd(2).SetFillStyle(4000)
cpad.cd(2).SetTopMargin(0.25)
cpad.cd(2).SetBottomMargin(0.4)
cpad.cd(2).SetLeftMargin(lmargin)
cpad.cd(1)
down_pad_fraction = pad_fraction+0.1
else:
tsize = 0.04 # was 0.06
tyoffset = 1.1 * 0.06 / tsize
txoffset = 2.5 * 0.06 / tsize
cpad.SetTopMargin(0.08)
cpad.SetBottomMargin(0.16)
cpad.SetLeftMargin(lmargin)
if log:
cpad.SetLogy()
# sort backgrounds by integral
list_mc.sort(key=lambda h : h.Integral())
list_mc.sort(key=lambda h : h.GetTitle() != "QCD")
hsave, mcstack = None, None
if list_mc:
mc_sum_line, mc_sum = create_mc_sum(list_mc, sum_mc)
all_histos.append(mc_sum)
all_histos.append(mc_sum_line)
# Create MC stack
mcstack = THStack()
for h in list_mc:
mcstack.Add(h)
#all_histos.append(mcstack)
# set range
if range:
h = all_histos[0]
xa = h.GetXaxis()
original_size = xa.GetBinLowEdge(xa.GetFirst()), xa.GetBinUpEdge(xa.GetLast())
for histo in all_histos:
xaxis = histo.GetXaxis()
xaxis.SetRangeUser(*range)
# get min/max
ymax = (max(h.GetMaximum() for h in all_histos) + 1) * (1.5 if not log else 100)
ymin = max(1.0 if log else 0.01, min(h.GetMinimum() for h in all_histos))
# unset range for mc
if range:
for histo in list_mc:
xaxis = histo.GetXaxis()
xaxis.SetRangeUser(*original_size)
# Draw everything
axis = None
if list_mc:
axis = mcstack
mcstack.Draw("Hist")
if range:
mcstack.GetXaxis().SetRangeUser(*range)
mc_sum.Draw("e2same")
mc_sum_line.Draw("hist same")
else:
mc_sum = None
mc_sum_line = None
for signal in signals:
if not list_mc and signal == signals[0]:
axis = signal
signal.Draw("hist")
else:
signal.Draw("hist same")
for d in data:
if not signals and not list_mc and d == data[0]:
axis = d
d.Draw("pe")
else:
d.Draw("pe same")
comparefactor = 1
if compare:
comparefactor = 0
pad_factor = 1.0/(1 - pad_fraction)
axis.GetYaxis().SetLabelSize(tsize * pad_factor)
axis.GetYaxis().SetTitleSize(tsize * pad_factor)
axis.GetYaxis().SetTitleOffset(tyoffset / pad_factor)
axis.GetXaxis().SetLabelSize(tsize * pad_factor * comparefactor)
axis.GetXaxis().SetTitleSize(tsize * pad_factor * comparefactor)
axis.GetXaxis().SetTitleOffset(comparefactor * tyoffset / pad_factor)
legend = get_legend(data,mc_sum,list(reversed(list_mc)),signals)
legend.Draw()
save = []
save.extend(create_cuts(cuts_left, cuts_right, 0 if log else ymin, ymax/(1.3 if not log else 50), x2-x1))
# Try to fix the limits...
axis.SetMaximum(ymax)
axis.SetMinimum(ymin)
dhist = mcstack if mcstack else [signals + data][0]
lumiLabel, atlasLabel = get_lumi_label(lumi, centermass, atlas=prelim, draft=True)
lumiLabel.Draw()
if atlasLabel:
atlasLabel.Draw()
save.extend((atlasLabel, lumiLabel))
if (compare or sigma) and mcstack:
cpad.cd(2)
# Create MC sum
cdata = [d.Clone() for d in data]
save.extend(cdata)
for cd in cdata:
cd.SetDirectory(0)
cmc = mc_sum_line.Clone("mc_sum_zero")
cmc2 = mc_sum_line.Clone("mc_sum_zero_line")
cmc.SetFillColor(kOrange)
cmc.SetFillStyle(2001)
cmc2.SetLineColor(kRed)
cmc2.SetFillStyle(0)
cmc.SetDirectory(0)
cmc2.SetDirectory(0)
save.append(cmc)
save.append(cmc2)
Nbins = int(mcstack.GetXaxis().GetNbins())
if sigma and cdata:
for i in xrange(Nbins + 2):
mc, mcerr = cmc.GetBinContent(i), cmc.GetBinError(i)
for cd in cdata:
d, dstat = cd.GetBinContent(i), cd.GetBinError(i)
if dstat < 1:
dstat = 1
sf = (mcerr**2 + dstat**2)**0.5
if d > 0:
cd.SetBinContent(i, (d - mc)/sf)
cd.SetBinError(i, dstat/sf)
else:
pass # content and error are both already zero
cmc.SetBinContent(i, 0.0)
cmc.SetBinError(i, mcerr/sf)
cmc2.SetBinContent(i, 0.0)
cmc2.SetBinError(i, 0.0)
#cmc2.GetYaxis().SetTitle("( Data - SM ) / #sigma_{stat,MC+Data} ")
cmc2.GetYaxis().SetTitle("( Data - MC ) / #sigma_{stat}")
else:
for i in xrange(Nbins + 2):
sf = cmc.GetBinContent(i)
if sf > 0:
cmc.SetBinError(i, cmc.GetBinError(i)/sf)
for cd in cdata:
cd.SetBinContent(i, cd.GetBinContent(i)/sf)
cd.SetBinError(i, cd.GetBinError(i)/sf)
else:
cmc.SetBinError(i, 1.0)
for cd in cdata:
cd.SetBinContent(i, 0)
cd.SetBinError(i, 0)
cmc.SetBinContent(i, 1.0)
cmc2.SetBinContent(i, 1.0)
cmc2.GetYaxis().SetTitle("Data / MC")
#cmc2.GetXaxis().SetTitle("")
if cdata:
mx = max(cd.GetBinContent(cd.GetMaximumBin())+0.2*cd.GetBinError(cd.GetMaximumBin()) for cd in cdata)
#mn = min(cd.GetBinContent(cd.GetMinimumBin())-0.2*cd.GetBinError(cd.GetMinimumBin()) for cd in cdata)
mn = mx
for cd in cdata:
minc, minbin = min([(cd.GetBinContent(i),i) for i in xrange(1, cd.GetNbinsX()+1) if cd.GetBinContent(i) > 0])
mn = min(minc - 0.2*cd.GetBinError(minbin), mn)
if compare:
mx = min(max(1.3, mx), 2)
mn = min(0.7, mn)
for h in cdata + [cmc, cmc2]:
h.SetMaximum(mx)
h.SetMinimum(mn)
cmc2.GetYaxis().SetNdivisions(5,0,0)
cmc2.Draw("hist")
cmc.Draw("e2 same")
for cd in cdata:
cd.Draw("pe same")
sf = 1.0
ysf = 0.7
pad_factor = 1.0/down_pad_fraction
cmc2.GetYaxis().SetLabelSize(tsize*pad_factor*sf*ysf)
cmc2.GetYaxis().SetTitleSize(tsize*pad_factor*sf)
cmc2.GetYaxis().SetTitleOffset(tyoffset / pad_factor / sf)
cmc2.GetXaxis().SetLabelSize(tsize*pad_factor*sf)
cmc2.GetXaxis().SetTitleSize(tsize*pad_factor*sf)
cmc2.GetXaxis().SetTitleOffset(txoffset / pad_factor / sf)
save.extend(create_cuts(cuts_left, cuts_right, mn, mx, x2-x1))
cpad.cd()
return legend, mcstack, mc_sum, mc_sum_line, save
def plot_1D(name, data, list_mc, signals, **kwargs):
set_styles(data, list_mc, signals)
return stack_1D(name, data, list_mc, signals, **kwargs)
#All MC stacked in this order:
#- ttbar 1st
#- Z+jets 2nd
#- W+jets 3rd
#- dijets last
#(i.e. inversely by cross-section)
#If a separate signal sample is drawn - it should not be added to the stack, but instead drawn as a separate line (black and SetLineWidth(4)).
#-----------------
| 33.881007 | 199 | 0.575645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,176 | 0.146967 |
2055ee4a9a03ad626c921954cbaa065f64b89465 | 32,421 | py | Python | test/pldi19/run_all.py | tjknoth/resyn | 54ff304c635f26b4b498b82be267923a65e0662d | [
"MIT"
] | 19 | 2020-04-20T02:45:21.000Z | 2021-12-12T22:04:31.000Z | test/pldi19/run_all.py | yunjeong-lee/resyn | 54ff304c635f26b4b498b82be267923a65e0662d | [
"MIT"
] | 1 | 2020-11-19T18:31:42.000Z | 2020-12-09T10:32:57.000Z | test/pldi19/run_all.py | yunjeong-lee/resyn | 54ff304c635f26b4b498b82be267923a65e0662d | [
"MIT"
] | 4 | 2020-06-01T05:43:15.000Z | 2021-02-19T04:04:49.000Z | #!/usr/bin/python3
import sys
import os, os.path
import platform
import shutil
import time
import re
import difflib
import pickle
from subprocess import run, PIPE
from colorama import init, Fore, Back, Style
from statistics import median
# Globals
if platform.system() in ['Linux', 'Darwin']:
SYNQUID_CMD = ['stack', 'exec', '--', 'resyn'] # Command to call Resyn
TIMEOUT_CMD = ['timeout'] # Timeout command
TIMEOUT = ['300'] # Timeout value (seconds)
else:
SYNQUID_CMD = ['Resyn.exe']
TIMEOUT_CMD = ['']
TIMEOUT = ['']
LOGFILE = 'results.log' # Log file
MICRO_LOGFILE = 'micro.log' # Log file
DUMPFILE = 'results' # Result serialization file
MICRO_DUMPFILE = 'micro' # you know
CSV_FILE = 'results.csv' # CSV-output file
MICRO_CSV_FILE = 'micro.csv' # CSV-output file (micro benchmarks)
LATEX_FILE = 'results.tex' # Latex-output file
MICRO_LATEX_FILE = 'micro.tex' # Latex-output file (micro benchmarks)
ORACLE_FILE = 'solutions' # Solutions file
MICRO_ORACLE_FILE = 'micro_solutions' # you know
COMMON_OPTS = ['--print-stats'] # Options to use for all benchmarks
RESOURCE_OPTS = []
RESOURCES_OFF_OPT = ['-r=false'] # Option to disable resource analysis
FNULL = open(os.devnull, 'w') # Null file
PAPER_PATH = '/home/tristan/Research/resource-paper/'
class Benchmark:
def __init__(self, name, description, components='', options=[], np = '-'):
self.name = name # Id
self.description = description # Description (in the table)
self.components = components # Description of components used (in the table)
self.options = options # Command-line options to use for this benchmark when running in individual context
self.num_programs = np # Number of programs generated in the enumerate-and-check process
def str(self):
return self.name + ': ' + self.description + ' ' + str(self.options)
# Micro benchmark
class MBenchmark:
def __init__(self, name, description, signature='', components='', options=[], complexity='', complexity_nr='', eac=-1, incremental=-1):
self.name = name # file to test
self.description = description # Description (in the table)
self.signature = signature # Type signature
self.complexity = complexity
self.complexity_nr = complexity_nr
self.components = components # Description of components used (in the table)
self.options = options # Command-line options to use for this benchmark when running in individual context
self.eac = eac
self.incremental = incremental
def str(self):
return self.name + ': ' + self.description + ' ' + str(self.options)
class BenchmarkGroup:
def __init__(self, name, default_options, benchmarks):
self.name = name # Id
self.default_options = default_options # Command-line options to use for all benchmarks in this group when running in common context
self.benchmarks = benchmarks # List of benchmarks in this group
INSERT_TYPE = '$\\forall\\alpha .\
\\tarrow{x}{\\alpha}\
{\\tarrow{xs}{\\tilist{\\tpot{\\alpha}{1}}}\
{\\tsubset{\\tilist{\\alpha}}{\T{elems} \ \\nu = [x] \\cup \T{elems} \ xs}}}$'
INSERT_FG_TYPE = '$\\forall\\alpha .\
\\tarrow{x}{\\alpha}\
{\\tarrow{xs}{\\tilist{\\tpot{\\alpha}{\\mathsf{ite}(x > \\nu, 1, 0)}}}\
{\\tsubset{\\tilist{\\alpha}}{\T{elems} \ \\nu = [x] \\cup \T{elems} \ xs}}}$'
INSERT_MEASURE_TYPE = '$\\forall\\alpha .\
\\tarrow{x}{\\alpha}\
{\\tarrow{xs}{ \\tpot{ \\tilist{ \\alpha }}{\\mathsf{numgt}(x,\\nu)} }\
{\\tsubset{\\tilist{\\alpha}}{\T{elems} \ \\nu = [x] \\cup \T{elems} \ xs}}}$'
LEN_COMPARE_TYPE = '$\\forall\\alpha .\
\\tarrow{ys}{\\tlist{\\tpot{\\alpha}{1}}}\
{\\tarrow{zs}{\\tlist{\\alpha}}{\\tsubset{\\tbool}{\\nu = ( \T{len} \ ys = \T{len} \ zs )}}} $'
REPLICATE_TYPE = '$\\forall\\alpha .\
\\tarrow{n}{\T{Nat}}\
{\\tarrow{x}{n \\times \\tpot{\\alpha}{n}}}\
{\\tsubset{\\tlist{\\alpha}}{\T{len} \ \\nu = n}}$'
INTERSECT_TYPE = '$\\forall\\alpha .\
\\tarrow{ys}{\\tilist{\\tpot{\\alpha}{1}}}\
{\\tarrow{zs}{\\tilist{\\tpot{\\alpha}{1}}}\
{\\tsubset{\\tlist{\\alpha}}{\T{elems} \ \\nu = \T{elems} \ ys \\cap \T{elems} \ zs}}}$'
RANGE_TYPE = '$\\tarrow{lo}{\T{Int}}\
{\\tarrow{hi}{\\tsubset{\\tpot{\T{Int}}{\\nu - lo}}{\\nu \geq lo}}\
{\\tsubset{\\tilist{\\tsubset{\T{Int}}{lo \leq \\nu \leq hi}}}{\T{len} \\nu = hi - lo}}}\
{} $'
COMPRESS_TYPE = '$\\forall \\alpha .\
\\tarrow{xs}{\\tlist{\\tpot{\\alpha}{1}}}\
{\\tsubset{\\tclist{\\alpha}}{\T{elems} \ xs = \T{elems} \ \\nu}}$'
TRIPLE_TYPE = '$\\forall \\alpha .\
\\tarrow{xs}{\\tlist{\\tpot{\\alpha}{2}}}\
{\\tsubset{\\tlist{\\alpha}}{\T{len} \ \\nu = \T{len} \ xs + \T{len} \ xs + \T{len} \ xs }}$'
TRIPLE_TYPE = '$\\forall \\alpha .\
\\tarrow{xs}{\\tlist{\\tpot{\\alpha}{2}}}\
{\\tsubset{\\tlist{\\alpha}}{\T{len} \ \\nu = \T{len} \ xs + \T{len} \ xs + \T{len} \ xs }}$'
CONCAT_TYPE = '$\\forall\\alpha .\
\\tarrow{xxs}{\\tlist{\\tlist{\\tpot{\\alpha}{1}}}}\
{\\tarrow{acc}{\\tlist{\\alpha}}\
{\\tsubset{\\tlist{\\alpha}}{\T{sumLen} \ xs = \T{len} \\nu}}}$'
DIFF_TYPE = '$\\forall\\alpha .\
\\tarrow{ys}{\\tilist{\\tpot{\\alpha}{1}}}\
{\\tarrow{zs}{\\tilist{\\tpot{\\alpha}{1}}}\
{\\tsubset{\\tlist{\\alpha}}{\T{elems} \ \\nu = \T{elems} \ ys - \T{elems} \ zs}}}$'
UNION_TYPE = '$\\forall\\alpha .\
\\tarrow{xs}{\\tlist{\\alpha}}\
{\\tarrow{ys}{\\tpot{\\tlist{\\alpha}}{\\mathsf{min}(\T{len} \ xs, \T{len} \ ys)}}\
{\\tsubset{\\tlist{\\alpha}}{\\T{elems} \\nu = \\T{elems} \ xs \\cup \\T{elems} \ ys}}}$'
TAKE_TYPE = '$\\forall\\alpha .\
\\tarrow{n}{\T{Nat}}\
{\\tarrow{xs}{\\tpot{\\tsubset{\\tlist{\\alpha}}{\T{len} \\nu \\geq n}}{n}}\
{\\tsubset{\\tlist{\\alpha}}{\T{len} \\nu = n}}}$'
DROP_TYPE = '$\\forall\\alpha .\
\\tarrow{n}{\T{Nat}}\
{\\tarrow{xs}{\\tpot{\\tsubset{\\tlist{\\alpha}}{\T{len} \\nu \\geq n}}{n}}\
{\\tsubset{\\tlist{\\alpha}}{\T{len} \\nu = \T{len} xs - n}}}$'
MICRO_BENCHMARKS = [
MBenchmark('List-Triple1', 'triple', TRIPLE_TYPE, 'append', ['--multiplicities=false'], '$\mid xs \mid$', '$\mid xs \mid$', 1),
MBenchmark('List-Triple2', 'triple\'', TRIPLE_TYPE, 'append\'', ['--multiplicities=false'], '$\mid xs \mid$', '$\mid xs \mid^2$', 1),
MBenchmark('List-Concat', 'concat list of lists', CONCAT_TYPE, 'append', [], '$\mid xxs \mid$', '$\mid xxs \mid^2$',1),
MBenchmark('List-Compress', 'compress', COMPRESS_TYPE, '$=$,$\\neq$', [], '$\mid xs \mid$', '$2^{ \mid xs \mid }$',1),
MBenchmark('List-Intersect', 'common', INTERSECT_TYPE, '$<$, member', ['-f=AllArguments', '-a=2', '--backtrack'], '$\mid ys \mid + \mid zs \mid$', '$\mid ys \mid \mid zs \mid$', 1),
MBenchmark('List-Diff', 'list difference', DIFF_TYPE, '$<$, member', ['-f=AllArguments', '--backtrack'], '$\mid ys \mid + \mid zs \mid$', '$\mid ys \mid \mid zs \mid$',1),
MBenchmark('List-Insert', 'insert', INSERT_TYPE , '$<$', ['--backtrack'], '$\mid xs \mid$', '$\mid xs \mid$'),
MBenchmark('List-Insert-Fine', 'insert\'', INSERT_MEASURE_TYPE, '$<$', ['-a=2', '--backtrack'], '$\T{numgt}(x,xs)$', '$\mid xs \mid$',-1,1),
MBenchmark('List-Insert-Fine-Alt', 'insert\'\'', INSERT_FG_TYPE, '$<$', [], '$\T{numgt}(x,xs)$', '$\mid xs \mid$',-1,1),
MBenchmark('List-Replicate', 'replicate', REPLICATE_TYPE, 'zero, inc, dec', [], '$n$', '$n$',-1,1),
MBenchmark('List-Take', 'take', TAKE_TYPE, 'zero, inc, dec', [], '$n$', '$n$',-1,1),
MBenchmark('List-Drop', 'drop', DROP_TYPE, 'zero, inc, dec', [], '$n$', '$n$',-1,1),
MBenchmark('List-Range', 'range', RANGE_TYPE, 'inc,dec,$\geq$', ['-f=Nonterminating'], '$hi - lo$', '-',-1,1),
#MBenchmark('List-Union', 'union', UNION_TYPE, 'min, $\leq$', ['--explicit-match'], '$min(\mid xs \mid, \mid ys \mid )$', '$\mid xs \mid$',1,1),
MBenchmark('List-InsertCT', 'CT insert', INSERT_TYPE, '$<$', ['--ct', '--backtrack', '-a=2'], '$\mid xs \mid$', '$\mid xs \mid$', 1),
MBenchmark('List-LenCompareCT', 'CT compare', LEN_COMPARE_TYPE, 'true, false, and', ['-f=AllArguments', '-a=2', '--ct'], '$\mid ys \mid$', '$\mid ys \mid$', 1),
MBenchmark('List-LenCompare', 'compare', LEN_COMPARE_TYPE, 'true, false, and', ['-f=AllArguments', '-a=2'], '$\mid ys \mid$', '$\mid ys \mid$'),
#MBenchmark('List-Union', 'union', ''),
#MBenchmark('List-Pairs', 'ordered pairs', 'append, attach' ),
]
ALL_BENCHMARKS = [
BenchmarkGroup("List", [], [
Benchmark('List-Null', 'is empty', 'true, false'),
Benchmark('List-Elem', 'member', 'true, false, $=$, $\\neq$'),
Benchmark('List-Stutter', 'duplicate each element'),
Benchmark('List-Replicate', 'replicate', '0, inc, dec, $\\leq$, $\\neq$'),
Benchmark('List-Append', 'append two lists', ''),
Benchmark('List-Take', 'take first $n$ elements', '0, inc, dec, $\\leq$, $\\neq$', ['--cegis-max=50']),
Benchmark('List-Drop', 'drop first $n$ elements', '0, inc, dec, $\\leq$, $\\neq$', ['--cegis-max=50']),
Benchmark('List-Concat', 'concat list of lists', 'append'),
Benchmark('List-Delete', 'delete value', '$=$, $\\neq$'),
Benchmark('List-Zip', 'zip'),
Benchmark('List-ZipWith', 'zip with'),
Benchmark('List-Ith', '$i$-th element', '0, inc, dec, $\\leq$, $\\neq$'),
Benchmark('List-ElemIndex', 'index of element', '0, inc, dec, $=$, $\\neq$'),
Benchmark('List-Snoc', 'insert at end'),
Benchmark('List-Split', 'balanced split', 'fst, snd, abs', ['-m=3']),
Benchmark('List-Reverse', 'reverse', 'insert at end'),
Benchmark('IncList-Insert', 'insert (sorted)', '$\\leq$, $\\neq$'),
Benchmark('List-ExtractMin', 'extract minimum', '$\\leq$, $\\neq$', ['-a=2', '-m=3']),
#Benchmark('List-Range', 'range', 'inc,dec,$\geq$'),
Benchmark('List-Foldr', 'foldr'),
Benchmark('List-Fold-Length', 'length using fold', '0, inc, dec', ['-m=0']),
Benchmark('List-Fold-Append', 'append using fold', '', ['-m=0']),
Benchmark('List-Map', 'map'),
#Benchmark('List-Split', 'split list', '', ['-m=3'])
# Try it by hand!
#Benchmark('TripleList-Intersect', 'three-way intersection', '$<$, member', ['-f=AllArguments', '-m=3'])
]),
BenchmarkGroup("Unique list", [], [
Benchmark('UniqueList-Insert', 'insert', '$=$, $\\neq$'),
Benchmark('UniqueList-Delete', 'delete', '$=$, $\\neq$'),
#Benchmark('List-Nub', 'remove duplicates', 'member', []),
Benchmark('List-Compress', 'compress', '$=$, $\\neq$', np = 3),
Benchmark('UniqueList-Range', 'integer range', '0, inc, dec, $\\leq$, $\\neq$'),
Benchmark('List-Partition', 'partition', '$\\leq$'),
#Benchmark('IncList-Pivot', 'append with pivot'),
]),
BenchmarkGroup("Sorted list", ['-f=AllArguments'], [
Benchmark('StrictIncList-Insert', 'insert', '$<$'),
Benchmark('StrictIncList-Delete', 'delete', '$<$'),
#Benchmark('List-Diff', 'difference', 'member, $<$', ['--backtrack', '-f=AllArguments']),
#Benchmark('TripleList-Intersect', 'three-way intersection', '$<$, member',['-f=AllArguments','--backtrack','-m=3'])
Benchmark('StrictIncList-Intersect', 'intersect', '$<$', ['-f=AllArguments', '--backtrack']),
]),
BenchmarkGroup("Tree", [], [
Benchmark('Tree-Count', 'node count', '0, 1, +'),
Benchmark('Tree-Flatten', 'preorder', 'append'),
Benchmark('Tree-ToList', 'to list', 'append'),
Benchmark('Tree-Elem', 'member', 'false, not, or, $=$', ['--multiplicities=false'] )
#Benchmark('Tree-BalancedReplicate', 'create balanced', '0, inc, dec,
#$\\leq$, $\\neq$' )
#Benchmark('Tree-Count', 'size')
]),
BenchmarkGroup("BST", [], [
Benchmark('BST-Member', 'member', 'true, false, $\\leq$, $\\neq$'),
Benchmark('BST-Insert', 'insert', '$\\leq$, $\\neq$'),
Benchmark('BST-Delete', 'delete', '$\\leq$, $\\neq$'),
Benchmark('BST-Sort', 'BST sort', '$\\leq$, $\\neq$')
]),
BenchmarkGroup("AVL", ['-a=2'], [
Benchmark('AVL-RotateL', 'rotate left', 'inc', ['-a 2', '-u']),
Benchmark('AVL-RotateR', 'rotate right', 'inc', ['-a 2', '-u']),
Benchmark('AVL-Balance', 'balance', 'rotate, nodeHeight, isSkewed, isLHeavy, isRHeavy', ['-a 2', '-e']),
Benchmark('AVL-Insert', 'insert', 'balance, $<$', ['-a 2']),
Benchmark('AVL-ExtractMin', 'extract minimum', '$<$', ['-a 2']),
Benchmark('AVL-Delete', 'delete', 'extract minimum, balance, $<$', ['-a 2', '-m 1']),
]),
BenchmarkGroup("RBT", ['-m=1', '-a=2'], [
Benchmark('RBT-BalanceL', 'balance left', '', ['-m=1', '-a=2']),
Benchmark('RBT-BalanceR', 'balance right', '', ['-m=1', '-a=2']),
Benchmark('RBT-Insert', 'insert', 'balance left, right, $\\leq$, $\\neq$', ['-m=1', '-a=2'])
]),
BenchmarkGroup("User", [], [
Benchmark('Evaluator', 'desugar AST', '0, 1, 2'),
Benchmark('AddressBook-Make', 'make address book', 'is private', ['-a=2']),
#Benchmark('AddressBook-Merge', 'merge address books', 'append', ['-a=2'])
]),
BenchmarkGroup("Binary Heap", [], [
Benchmark('BinHeap-Insert', 'insert', '$\\leq$, $\\neq$'),
Benchmark('BinHeap-Member', 'member', 'false, not, or, $\leq$, $\\neq$', ['--multiplicities=false']),
Benchmark('BinHeap-Singleton', '1-element constructor', '$\\leq$, $\\neq$'),
Benchmark('BinHeap-Doubleton', '2-element constructor', '$\\leq$, $\\neq$'),
Benchmark('BinHeap-Tripleton', '3-element constructor', '$\\leq$, $\\neq$')
])
]
class SynthesisResult:
def __init__(self, name, time, goal_count, code_size, spec_size, measure_count, num_constraints):
self.name = name # Benchmark name
self.time = time # Synthesis time (seconds)
self.goal_count = goal_count # Number of synthesis goals
self.code_size = code_size # Cumulative synthesized code size (in AST nodes)
self.spec_size = spec_size # Cumulative specification size (in AST nodes)
self.measure_count = measure_count # Number of measures defined
self.optimized = False
self.nres_code_size = '-'
self.nres_time = -3.0
self.eac_time = -3.0
self.incremental_time = -3.0
self.pct_slowdown = 0.0
self.num_constraints = num_constraints
def str(self):
return self.name + ', ' + '{0:0.2f}'.format(self.time) + ', ' + self.goal_count + ', ' + self.code_size + ', ' + self.spec_size + ', ' + self.measure_count
def run_benchmark(name, opts, default_opts):
'''Run benchmark name with command-line options opts (use default_opts with running the common context variant); record results in the results dictionary'''
with open(LOGFILE, 'a+') as logfile:
start = time.time()
logfile.write(name + '\n')
logfile.seek(0, os.SEEK_END)
# Run Synquid on the benchmark:
synthesis_res = run(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + RESOURCE_OPTS + opts + [name + '.sq'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
end = time.time()
print('{0:0.2f}'.format(end - start), end = ' ')
if synthesis_res.returncode: # Synthesis failed
print(Back.RED + Fore.RED + Style.BRIGHT + 'FAIL' + Style.RESET_ALL, end = ' ')
synthesis_output = ''
results [name] = SynthesisResult(name, (end - start), '-', '-', '-', '-', '-')
else: # Synthesis succeeded: code metrics from the output and record synthesis time
logfile.write(synthesis_res.stdout)
lastLines = synthesis_res.stdout.split('\n')[-6:]
synthesis_output = synthesis_res.stdout.split('\n')[:-6]
goal_count = re.match("\(Goals: (\d+)\).*$", lastLines[0]).group(1)
measure_count = re.match("\(Measures: (\d+)\).*$", lastLines[1]).group(1)
spec_size = re.match("\(Spec size: (\d+)\).*$", lastLines[2]).group(1)
solution_size = re.match("\(Solution size: (\d+)\).*$", lastLines[3]).group(1)
num_constraints = re.match("\(Number of resource constraints: (\d+)\).*$", lastLines[4]).group(1)
results [name] = SynthesisResult(name, (end - start), goal_count, solution_size, spec_size, measure_count, num_constraints)
print(Back.GREEN + Fore.GREEN + Style.BRIGHT + 'OK' + Style.RESET_ALL, end = ' ')
variant_options = [ # Command-line options to use for each variant of Synquid
('nres', opts + RESOURCES_OFF_OPT),
]
# Run each variant: (now there's only one, should probably change this...)
for (variant_id, opts) in variant_options:
run_version(name, variant_id, opts, logfile, str(synthesis_output), results)
print()
def run_micro_benchmark(name, opts, default_opts, eac, incremental):
'''Run benchmark name with command-line options opts (use default_opts with running the common context variant); record results in the results dictionary'''
with open(MICRO_LOGFILE, 'a+') as logfile:
start = time.time()
logfile.write(name + '\n')
logfile.seek(0, os.SEEK_END)
# Run Synquid on the benchmark:
synthesis_res = run(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + RESOURCE_OPTS + opts + [name + '.sq'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
end = time.time()
print('{0:0.2f}'.format(end - start), end = ' ')
if synthesis_res.returncode: # Synthesis failed
print(Back.RED + Fore.RED + Style.BRIGHT + 'FAIL' + Style.RESET_ALL, end = ' ')
synthesis_output = ''
micro_results [name] = SynthesisResult(name, (end - start), '-', '-', '-', '-', '-')
else: # Synthesis succeeded: code metrics from the output and record synthesis time
logfile.write(synthesis_res.stdout)
lastLines = synthesis_res.stdout.split('\n')[-6:]
synthesis_output = synthesis_res.stdout.split('\n')[:-6]
goal_count = re.match("\(Goals: (\d+)\).*$", lastLines[0]).group(1)
measure_count = re.match("\(Measures: (\d+)\).*$", lastLines[1]).group(1)
spec_size = re.match("\(Spec size: (\d+)\).*$", lastLines[2]).group(1)
solution_size = re.match("\(Solution size: (\d+)\).*$", lastLines[3]).group(1)
num_constraints = re.match("\(Number of resource constraints: (\d+)\).*$", lastLines[4]).group(1)
micro_results [name] = SynthesisResult(name, (end - start), goal_count, solution_size, spec_size, measure_count, num_constraints)
print(Back.GREEN + Fore.GREEN + Style.BRIGHT + 'OK' + Style.RESET_ALL, end = ' ')
variant_options = [ # Command-line options to use for each variant of Synquid
('nres', opts + RESOURCES_OFF_OPT),
]
# Run each variant: (now there's only one, should probably change this...)
for (variant_id, opts) in variant_options:
run_version(name, variant_id, opts, logfile, str(synthesis_output), micro_results)
eac_opts = ['--eac', '--backtrack']
if eac < 0:
micro_results[name].eac_time = '{-}'
else:
run_micro_version(name, logfile, 'EAC', eac_opts, lambda t: set_eac_time(micro_results, name, t))
incremental_opts = ['--inc-cegis=false']
if incremental < 0:
micro_results[name].incremental_time = '{-}'
else:
run_micro_version(name, logfile, 'NONINCREMENTAL', incremental_opts, lambda t: set_inc_time(micro_results, name, t))
print()
def set_eac_time(res, name, t):
res[name].eac_time = t
def set_inc_time(res, name, t):
res[name].incremental_time = t
def run_version(name, variant_id, variant_opts, logfile, with_res, results_file):
'''Run benchmark name using command-line options variant_opts and record it as a Synquid variant variant_id in the results dictionary'''
start = time.time()
logfile.seek(0, os.SEEK_END)
# Run Synquid on the benchmark, mute output:
synthesis_res = run(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS +
variant_opts + [name + '.sq'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
end = time.time()
#results_file[name].eac_time = -1
print('{0:0.2f}'.format(end - start), end = ' ')
if synthesis_res.returncode == 124: # Timeout: record timeout
print(Back.RED + Fore.RED + Style.BRIGHT + 'TIMEOUT' + Style.RESET_ALL, end = ' ')
results_file[name].nres_time = -1
elif synthesis_res.returncode: # Synthesis failed: record failure
print(Back.RED + Fore.RED + Style.BRIGHT + 'FAIL' + Style.RESET_ALL, end = ' ')
results_file[name].nres_time = -2
else: # Synthesis succeeded: record time for variant
lastLines = synthesis_res.stdout.split('\n')[-6:]
solution_size = re.match("\(Solution size: (\d+)\).*$", lastLines[3]).group(1)
results_file[name].nres_time = (end - start)
pct_slower = results_file[name].time / (end - start)
results_file[name].pct_slowdown = pct_slower
without_res = synthesis_res.stdout.split('\n')[:-6]
# Compare outputs to see if resources led to any optimization
diff = difflib.unified_diff(with_res, str(without_res))
print(Back.GREEN + Fore.GREEN + Style.BRIGHT + 'OK' + Style.RESET_ALL, end=' ')
try:
first = next(diff)
if with_res != '': print(Back.GREEN + Fore.GREEN + Style.BRIGHT + 'OPTIMIZED' + Style.RESET_ALL, end=' ')
results_file[name].optimized = True
results_file[name].nres_code_size = solution_size
except StopIteration:
print('Unchanged', end=' ')
def run_micro_version(name, logfile, version, opts, set_time):
'''Run benchmark using enumerate-and-check version of synquid'''
start = time.time()
logfile.seek(0, os.SEEK_END)
# Run Synquid on the benchmark, mute output:
synthesis_res = run(TIMEOUT_CMD + TIMEOUT + SYNQUID_CMD + COMMON_OPTS + opts + [name + '.sq'], stdout=PIPE, stderr=PIPE, universal_newlines=True)
end = time.time()
print('{0:0.2f}'.format(end - start), end = ' ')
if synthesis_res.returncode == 124: # Timeout: record timeout
print(Back.RED + Fore.RED + Style.BRIGHT + version + 'TIMEOUT' + Style.RESET_ALL, end = ' ')
set_time('TO')
#results_file[name].field = 'TO'
#results_file[name].eac_time = 'TO'
elif synthesis_res.returncode: # Synthesis failed: record failure
print(Back.RED + Fore.RED + Style.BRIGHT + version + 'FAIL' + Style.RESET_ALL, end = ' ')
set_time(-2)
#results_file[name].field = -2
#results_file[name].eac_time = -2
else: # Synthesis succeeded: record time for variant
set_time(end - start)
#results_file[name].field = end - start
#results_file[name].eac_time = (end - start)
print(Back.GREEN + Fore.GREEN + Style.BRIGHT + version + 'OK' + Style.RESET_ALL, end=' ')
def format_time(t):
if isinstance(t, str):
return t
elif t < 0:
return '-'
else:
return '{0:0.2f}'.format(t)
def write_micro_csv():
'''Generate CSV file for micro benchmark'''
with open(MICRO_CSV_FILE, 'w') as outfile:
for b in MICRO_BENCHMARKS:
outfile.write (b.name + ',')
result = micro_results [b.name]
optstr = 'True' if result.optimized else '-'
outfile.write (result.spec_size + ',')
outfile.write (result.code_size + ',')
outfile.write (format_time(result.time) + ',')
outfile.write (format_time(result.nres_time) + ',')
#outfile.write (result.eac_time + ',')
outfile.write (format_time(result.eac_time) + ',')
outfile.write (format_time(result.incremental_time) + ',')
outfile.write (result.nres_code_size + ',')
#outfile.write (optstr + ',')
outfile.write ('\n')
def write_csv():
'''Generate CSV file from the results dictionary'''
with open(CSV_FILE, 'w') as outfile:
for group in groups:
for b in group.benchmarks:
outfile.write (b.name + ',')
result = results [b.name]
optstr = 'True' if result.optimized else '-'
outfile.write (result.spec_size + ',')
outfile.write (result.code_size + ',')
outfile.write (format_time(result.time) + ',')
outfile.write (format_time(result.nres_time) + ',')
outfile.write (result.nres_code_size + ',')
#outfile.write (result.eac_time + ',')
#outfile.write (optstr + ',')
outfile.write ('\n')
def write_micro_latex():
'''Generate Latex table from the results dictionary'''
total_count = 0
to_def = 0
to_nres = 0
with open(MICRO_LATEX_FILE, 'w') as outfile:
rownum = 1
for b in MICRO_BENCHMARKS:
result = micro_results [b.name]
optstr = 'Yes' if result.optimized else '-'
row = str(rownum) +\
' & ' + b.description +\
' & ' + b.signature + \
' & ' + str(b.components) + \
' & ' + format_time(result.time) + \
' & ' + format_time(result.nres_time) + \
' & ' + format_time(result.eac_time) + \
' & ' + format_time(result.incremental_time) + \
' & ' + b.complexity + \
' & ' + b.complexity_nr + ' \\\\'
#format_time(result.eac_time) + \
#' & ' + result.nres_code_size + ' \\\\'
#' & ' + str(b.num_programs) + \
#' & ' + str(result.eac_time) + ' \\\\'
#' & ' + optstr + ' \\\\'
outfile.write (row)
outfile.write ('\n')
rownum = rownum + 1
total_count = total_count + 1
print('Total:', total_count)
def write_latex():
'''Generate Latex table from the results dictionary'''
total_count = 0
to_def = 0
to_nres = 0
with open(LATEX_FILE, 'w') as outfile:
for group in groups:
outfile.write ('\multirow{')
outfile.write (str(group.benchmarks.__len__()))
outfile.write ('}{*}{\\parbox{1cm}{\\vspace{-0.85\\baselineskip}\center{')
outfile.write (group.name)
outfile.write ('}}}')
for b in group.benchmarks:
result = results [b.name]
optstr = 'Yes' if result.optimized else '-'
row = (
' & ' + b.description +
# ' & ' + result.goal_count + \
' & ' + str(b.components) +
#' & ' + result.measure_count + \
' & ' + result.code_size +
' & ' + format_time(result.time) +
' & ' + format_time(result.nres_time) + ' \\\\'
#' & ' + result.nres_code_size + \
#' & ' + str(b.num_programs) + \
#' & ' + format_time(result.eac_time) + ' \\\\'
#' & ' + optstr + ' \\\\'
)
outfile.write (row)
outfile.write ('\n')
total_count = total_count + 1
if result.nres_time < 0.0:
to_nres = to_nres + 1
outfile.write ('\\hline')
print('Total:', total_count)
print('TO nres:', to_nres)
def cmdline():
import argparse
a = argparse.ArgumentParser()
a.add_argument('--medium', action='store_true')
a.add_argument('--small', action='store_true')
a.add_argument('--rerun', nargs=1, help='Rerun given benchmark')
a.add_argument('--rerun-micro', nargs=1, help='Rerun given micro benchmark')
return a.parse_args()
if __name__ == '__main__':
init()
cl_opts = cmdline()
# Check if there are serialized results
if os.path.isfile(DUMPFILE):
results = pickle.load(open(DUMPFILE, 'rb'))
else:
results = dict()
if os.path.isfile(MICRO_DUMPFILE):
micro_results = pickle.load(open(MICRO_DUMPFILE, 'rb'))
else:
micro_results = dict()
# Delete old log file
if os.path.isfile(LOGFILE):
os.remove(LOGFILE)
if os.path.isfile(MICRO_LOGFILE):
os.remove(MICRO_LOGFILE)
# Run experiments
groups = ALL_BENCHMARKS[:1] if cl_opts.small else ALL_BENCHMARKS
if cl_opts.rerun:
bs = [b for g in groups for b in g if b.name == cl_opts.rerun[0]]
for b in bs:
print(b.str())
run_benchmark(b.name, b.options, [])
with open(DUMPFILE, 'wb') as data_dump:
pickle.dump(results, data_dump)
if cl_opts.rerun_micro:
bs = [b for b in MICRO_BENCHMARKS if b.name == cl_opts.rerun_micro[0]]
for b in bs:
print(b.str())
run_micro_benchmark(b.name, b.options, [], b.eac, b.incremental)
with open(MICRO_DUMPFILE, 'wb') as data_dump:
pickle.dump(micro_results, data_dump)
else:
for group in groups:
for b in group.benchmarks:
if b.name in results:
print(b.str() + Back.YELLOW + Fore.YELLOW + Style.BRIGHT + 'SKIPPED' + Style.RESET_ALL)
else:
print(b.str())
run_benchmark(b.name, b.options, group.default_options)
with open(DUMPFILE, 'wb') as data_dump:
pickle.dump(results, data_dump)
for b in MICRO_BENCHMARKS:
if b.name in micro_results:
print(b.str() + Back.YELLOW + Fore.YELLOW + Style.BRIGHT + 'SKIPPED' + Style.RESET_ALL)
else:
print(b.str())
run_micro_benchmark(b.name, b.options, [], b.eac, b.incremental)
with open(MICRO_DUMPFILE, 'wb') as data_dump:
pickle.dump(micro_results, data_dump)
med_slowdown = median([results[b.name].pct_slowdown for g in groups for b in g.benchmarks])
print('Median slowdown = ' + str(med_slowdown))
# Generate CSV
write_csv()
# Generate Latex table
write_latex()
write_micro_csv()
write_micro_latex()
# Compare with previous solutions and print the diff
if os.path.isfile(ORACLE_FILE) and (not cl_opts.small):
fromlines = open(ORACLE_FILE).readlines()
tolines = open(LOGFILE, 'U').readlines()
diff = difflib.unified_diff(fromlines, tolines, n=0)
print()
sys.stdout.writelines(diff)
if os.path.isfile(MICRO_ORACLE_FILE) and (not cl_opts.small):
fromlines = open(MICRO_ORACLE_FILE).readlines()
tolines = open(MICRO_LOGFILE, 'U').readlines()
diff = difflib.unified_diff(fromlines, tolines, n=0)
print()
sys.stdout.writelines(diff)
# Copy results to paper directory
shutil.copy('./' + LATEX_FILE, PAPER_PATH + LATEX_FILE)
shutil.copy('./' + MICRO_LATEX_FILE, PAPER_PATH + MICRO_LATEX_FILE)
| 50.816614 | 185 | 0.555072 | 2,807 | 0.08658 | 0 | 0 | 0 | 0 | 0 | 0 | 13,481 | 0.415811 |
2055fa63f57d0f09cffaf4a00ba26911a5d36c1b | 977 | py | Python | plantcv/plantcv/hist_equalization.py | Howzit123/plantcv | b4ff6ad765da36353f40827ce3816b33d1d3596a | [
"MIT"
] | 2 | 2021-08-20T14:56:48.000Z | 2021-08-24T23:12:56.000Z | plantcv/plantcv/hist_equalization.py | Howzit123/plantcv | b4ff6ad765da36353f40827ce3816b33d1d3596a | [
"MIT"
] | null | null | null | plantcv/plantcv/hist_equalization.py | Howzit123/plantcv | b4ff6ad765da36353f40827ce3816b33d1d3596a | [
"MIT"
] | 1 | 2020-02-27T21:22:13.000Z | 2020-02-27T21:22:13.000Z | # Histogram equalization
import cv2
import numpy as np
import os
from plantcv.plantcv import print_image
from plantcv.plantcv import plot_image
from plantcv.plantcv import fatal_error
from plantcv.plantcv import params
def hist_equalization(gray_img):
"""Histogram equalization is a method to normalize the distribution of intensity values. If the image has low
contrast it will make it easier to threshold.
Inputs:
gray_img = Grayscale image data
Returns:
img_eh = normalized image
:param gray_img: numpy.ndarray
:return img_eh: numpy.ndarray
"""
if len(np.shape(gray_img)) == 3:
fatal_error("Input image must be gray")
img_eh = cv2.equalizeHist(gray_img)
params.device += 1
if params.debug == 'print':
print_image(img_eh, os.path.join(params.debug_outdir, str(params.device) + '_hist_equal_img.png'))
elif params.debug == 'plot':
plot_image(img_eh, cmap='gray')
return img_eh
| 26.405405 | 113 | 0.711361 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 426 | 0.436029 |
2057865edd4337026cdfafcc62a8e73e95d64e8e | 274 | py | Python | ehub/conftest.py | teofiln/ehub | 8412ac878f80bfbba2d4b2b44d895cfa07c44319 | [
"MIT"
] | null | null | null | ehub/conftest.py | teofiln/ehub | 8412ac878f80bfbba2d4b2b44d895cfa07c44319 | [
"MIT"
] | null | null | null | ehub/conftest.py | teofiln/ehub | 8412ac878f80bfbba2d4b2b44d895cfa07c44319 | [
"MIT"
] | null | null | null | import pytest
from ehub.users.models import User
from ehub.users.tests.factories import UserFactory
@pytest.fixture(autouse=True)
def media_storage(settings, tmpdir):
settings.MEDIA_ROOT = tmpdir.strpath
@pytest.fixture
def user() -> User:
return UserFactory()
| 18.266667 | 50 | 0.770073 | 0 | 0 | 0 | 0 | 167 | 0.609489 | 0 | 0 | 0 | 0 |
2058cf52b519e3fe86186af663b8941208fe3b37 | 1,448 | py | Python | Sort/Merge.py | sywh/algorithms | 8df353aa6716f2ed83e9167a27ceb6d3384891da | [
"MIT"
] | null | null | null | Sort/Merge.py | sywh/algorithms | 8df353aa6716f2ed83e9167a27ceb6d3384891da | [
"MIT"
] | null | null | null | Sort/Merge.py | sywh/algorithms | 8df353aa6716f2ed83e9167a27ceb6d3384891da | [
"MIT"
] | null | null | null | from Sort.Example import Example
class Merge(Example):
def __init__(self) -> None:
super().__init__()
def sort(self, a):
# create aux just once
self.aux = [None for i in range(len(a))]
self._sort(a, 0, len(a) - 1)
def _sort(self, a, lo, hi):
if lo >= hi:
return
mid = lo + (hi - lo) // 2
self._sort(a, lo, mid)
self._sort(a, mid + 1, hi)
self.merge(a, lo, mid, hi)
def merge(self, a, lo, mid, hi):
for i in range(lo, hi + 1):
self.aux[i] = a[i]
i, j = lo, mid + 1
for k in range(lo, hi + 1):
if i > mid:
a[k] = self.aux[j]
j += 1
elif j > hi:
a[k] = self.aux[i]
i += 1
elif self.less(self.aux[i], self.aux[j]):
a[k] = self.aux[i]
i += 1
else:
a[k] = self.aux[j]
j += 1
class MergeBU(Merge):
def __init__(self) -> None:
super().__init__()
def sort(self, a): # Great implementation !
self.aux = [None for i in range(len(a))]
N = len(a)
sz = 1
while sz < N:
lo = 0
while lo < N - sz: # must ensure lo, lo+sz-1, lo+2*sz-1 <= N-1
self.merge(a, lo, lo + sz - 1, min(lo + 2 * sz - 1, N - 1))
lo += 2 * sz
sz += sz
| 25.403509 | 75 | 0.410912 | 1,409 | 0.973066 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.061464 |
205949db776405b8c267e6e455da30481aae2d57 | 916 | py | Python | crowdsourcing/permissions/user.py | Kyeongan/crowdsource-platform | af34363158ff30ebfdade4a543648bf26a3c9698 | [
"MIT"
] | 138 | 2015-04-17T20:07:12.000Z | 2017-05-03T17:58:47.000Z | crowdsourcing/permissions/user.py | cescgie/crowdi | c16ab625f27915919e21f7eec93c45af551d9022 | [
"MIT"
] | 657 | 2015-04-19T04:54:51.000Z | 2017-06-26T18:07:42.000Z | crowdsourcing/permissions/user.py | cescgie/crowdi | c16ab625f27915919e21f7eec93c45af551d9022 | [
"MIT"
] | 311 | 2015-04-16T19:20:55.000Z | 2017-06-13T05:32:01.000Z | from rest_framework import permissions
from csp import settings
from rest_framework.exceptions import PermissionDenied
class IsWorker(permissions.BasePermission):
def has_permission(self, request, view):
return request.user.profile.is_worker
class IsRequester(permissions.BasePermission):
def has_object_permission(self, request, view, object):
return request.user.profile.is_requester
class CanCreateAccount(permissions.BasePermission):
def has_permission(self, request, view):
if view.action == 'create' and not (request.user.is_staff or settings.REGISTRATION_ALLOWED):
raise PermissionDenied(detail='We are currently in closed beta. '
'If you\'d like an account, email support@daemo.org '
'with a short description of what you\'d like to use Daemo for.')
return True
| 39.826087 | 107 | 0.689956 | 788 | 0.860262 | 0 | 0 | 0 | 0 | 0 | 0 | 160 | 0.174672 |
205a30215e693e91361fba6a10043eebc790a8b7 | 278 | py | Python | web/setup.py | ISTU-Labs/pt-2271-2018 | 4b35f9265420604a6c0d83e5af83936674448185 | [
"Apache-2.0"
] | null | null | null | web/setup.py | ISTU-Labs/pt-2271-2018 | 4b35f9265420604a6c0d83e5af83936674448185 | [
"Apache-2.0"
] | null | null | null | web/setup.py | ISTU-Labs/pt-2271-2018 | 4b35f9265420604a6c0d83e5af83936674448185 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
requires = [
'pyramid',
'waitress',
'python-dateutil'
]
setup(name='hello',
install_requires=requires,
package_dir={'': "hello"},
entry_points="""\
[paste.app_factory]
main = hello:main
""",
)
| 16.352941 | 32 | 0.564748 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.417266 |
64458d6c26d3a77fed375b45eb8d7b157c476d26 | 1,025 | py | Python | main.py | trollerfreak331/pornhub-pluenderer | ff4260cfbe78fa36d8cd9ab8edd571fbb292e708 | [
"MIT"
] | 12 | 2018-04-12T06:54:20.000Z | 2021-10-03T10:41:38.000Z | main.py | trollerfreak331/pornhub-pluenderer | ff4260cfbe78fa36d8cd9ab8edd571fbb292e708 | [
"MIT"
] | 3 | 2017-12-01T11:47:01.000Z | 2017-12-01T15:02:53.000Z | main.py | trollerfreak331/pornhub-pluenderer | ff4260cfbe78fa36d8cd9ab8edd571fbb292e708 | [
"MIT"
] | 5 | 2018-04-14T04:02:11.000Z | 2019-12-22T07:52:50.000Z | import sys
import signal
from clint.textui import colored, puts
from downloader import Downloader
from extractor import Extractor
signal.signal(signal.SIGINT, lambda x, y: sys.exit(0))
def main():
downloader = Downloader()
extractor = Extractor()
url = "https://pornhub.com"
puts(colored.green("getting video keys."))
main_page = downloader.get(url)
view_keys = extractor.get_viewkeys(main_page)
puts(colored.green("starting to download videos."))
for key in view_keys:
puts(colored.green("getting video information."))
absolute_url = "https://pornhub.com/view_video.php?viewkey=" + key
page = downloader.get(absolute_url)
info = extractor.get_video_info(page)
if info is None:
continue
hd_quality = info['mediaDefinitions'][0]
puts(colored.green("downloading video %s." % info['video_title']))
downloader.save_file(hd_quality["videoUrl"], info['video_title'] + ".mp4")
if __name__ == "__main__":
main()
| 28.472222 | 82 | 0.675122 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 238 | 0.232195 |
6446415ff2008281daa42e848a48f3cd5ca8c4ae | 6,259 | py | Python | dankbindings.py | actualdankcoder/erindashboard | 5c1e6e99e0280899b9150d0079e2dafc4d9b3818 | [
"MIT"
] | 1 | 2021-07-10T22:22:48.000Z | 2021-07-10T22:22:48.000Z | dankbindings.py | actualdankcoder/erindashboard | 5c1e6e99e0280899b9150d0079e2dafc4d9b3818 | [
"MIT"
] | 3 | 2021-07-10T05:08:51.000Z | 2021-07-13T01:24:48.000Z | dankbindings.py | actualdankcoder/erindashboard | 5c1e6e99e0280899b9150d0079e2dafc4d9b3818 | [
"MIT"
] | null | null | null | from cryptography.fernet import Fernet
import os
import discord
import aiohttp
import secrets
from urllib.parse import quote
from dotenv import load_dotenv
load_dotenv()
class OAuth:
def __init__(self):
# User Provided Data
self.client_id = os.getenv("CID")
self.client_secret = os.getenv("CIS")
self.scope = ["identify", "guilds"]
# Generated Data
self.redirect_uri = "http://localhost:5000/callback"
self.oauth_base = "https://discord.com/api/oauth2"
self.discord_login_url = self.generate_login_url()
self.discord_token_url = self.oauth_base+"/token"
self.discord_api_url = "https://discord.com/api"
self.bot_invite_url = self.generate_bot_invite_url()
self.client = None
async def create_client(self):
self.client = aiohttp.ClientSession()
def generate_login_url(self):
# Generate a login url, used during authorization
return self.oauth_base+"/authorize?client_id={}&redirect_uri={}&response_type=code&scope={}".format(
self.client_id, quote(self.redirect_uri), quote(" ".join(self.scope)))
def generate_bot_invite_url(self):
# Generate a bot invite url, used in the guild selection page
return self.oauth_base+"/authorize?client_id={}&permissions=0&redirect_uri={}&scope=bot".format(
self.client_id, quote(self.redirect_uri))
async def getaccesstoken(self, code):
# Get a user access token required to make api calls with discord
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'authorization_code',
'code': code,
'redirect_uri': self.redirect_uri,
'scope': " ".join(self.scope)
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
token = await self.client.post(self.discord_token_url,
data=data, headers=headers)
return await token.json()
async def refreshtoken(self, refresh_token):
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
'grant_type': 'refresh_token',
'refresh_token': refresh_token
}
headers = {
'Content-Type': 'application/x-www-form-urlencoded'
}
r = await self.client.post('%s/oauth2/token' % self.discord_api_url, data=data, headers=headers)
return await r.json()
async def getuser(self, accesstoken):
# Fetch user using an access token
url = self.discord_api_url+"/users/@me"
headers = {
'Authorization': 'Bearer {}'.format(accesstoken)
}
payload = await self.client.get(url, headers=headers)
data = await payload.json()
user = User(
name=data["username"],
userid=data["id"],
discriminator=data["discriminator"],
avatar=data["avatar"]
)
return user
async def getuserguilds(self, accesstoken):
# Fetch user guilds using an access token
url = self.discord_api_url+"/users/@me/guilds"
headers = {
'Authorization': 'Bearer {}'.format(accesstoken)
}
payload = await self.client.get(url, headers=headers)
data = await payload.json()
guilds = [
Guild(
name=i["name"],
guildid=i["id"],
features=i["features"],
icon=i["icon"],
owner=i["owner"],
permissions=i["permissions"]
) for i in data
]
return guilds
# Abstract Classes
class User:
'''
Encryption System:
On First Authentication:
key, session_id
[Server] -----------------> [Client]
* Server encrypts the access token using a special key,
tells the client about the key and then forgets it.
On succeding requests:
key, session_id
[Client] -----------------> [Server]
* Client requests the corresponding session from the server,
verifies their integrity using the special key. If the key
is invalid, server resets their session in order to protect
fraudulent attempts.
'''
def __init__(self, name, discriminator, userid, avatar):
self.name = name
self.discriminator = int(discriminator)
self.id = int(userid)
self.avatar_hash = avatar
self.session_id = None
self.access_token = None
def __repr__(self):
return f"{self.name}#{self.discriminator}"
def __str__(self):
return f"{self.name}#{self.discriminator}"
def avatar_url(self, size=256):
return f"https://cdn.discordapp.com/avatars/{self.id}/{self.avatar_hash}.png?size={size}"
def retrieve_access_token(self, key):
F = Fernet(key.encode("utf-8"))
try:
return F.decrypt(self.access_token)
except:
return False
def set_access_token(self, access_token):
key = Fernet.generate_key()
F = Fernet(key)
self.access_token = F.encrypt(access_token)
return key.decode("utf-8")
def create_session(self, access_token):
self.session_id=secrets.token_urlsafe(nbytes=16)
self.set_access_token(access_token)
class Guild:
def __init__(self, name, guildid, features, icon, owner, permissions):
self.name = name
self.id = int(guildid)
self.features = features
self.icon_hash = icon
self.is_owner = owner
self.permissions = discord.Permissions(permissions=int(permissions))
def __repr__(self):
return f"{self.name}"
def __str__(self):
return f"{self.name}"
def icon_url(self, size=256):
return f"https://cdn.discordapp.com/icons/{self.id}/{self.icon_hash}.png?size={size}"
class SessionHandler:
'''
This class will handle caching and loading dashboard sessions
it will be exposed via the OAuth Object, Mongo DB is required
'''
pass | 33.470588 | 108 | 0.596741 | 6,063 | 0.968685 | 0 | 0 | 0 | 0 | 2,371 | 0.378815 | 2,102 | 0.335836 |
6447533b56e44b1bd8d4cefebf7b0330a82c8a36 | 17,428 | py | Python | scripts/search/agr_mapping.py | dougli1sqrd/SGDBackend-Nex2 | 2ecb2436db142cf08c6f2dbab6b115a394116632 | [
"MIT"
] | 5 | 2015-11-24T23:09:46.000Z | 2019-11-06T17:48:13.000Z | scripts/search/agr_mapping.py | dougli1sqrd/SGDBackend-Nex2 | 2ecb2436db142cf08c6f2dbab6b115a394116632 | [
"MIT"
] | 188 | 2017-08-28T22:39:03.000Z | 2022-03-02T14:53:46.000Z | scripts/search/agr_mapping.py | dougli1sqrd/SGDBackend-Nex2 | 2ecb2436db142cf08c6f2dbab6b115a394116632 | [
"MIT"
] | 7 | 2018-05-13T01:58:07.000Z | 2021-06-25T19:08:33.000Z | mapping = {
"settings": {
"index": {
"max_result_window": 15000,
"analysis": {
"analyzer": {
"default": {
"type": "custom",
"tokenizer": "whitespace",
"filter": ["english_stemmer", "lowercase"]
},
"autocomplete": {
"type": "custom",
"tokenizer": "whitespace",
"filter": ["lowercase", "autocomplete_filter"]
},
"symbols": {
"type": "custom",
"tokenizer": "whitespace",
"filter": ["lowercase"]
}
},
"filter": {
"english_stemmer": {
"type": "stemmer",
"language": "english"
},
"autocomplete_filter": {
"type": "edge_ngram",
"min_gram": "1",
"max_gram": "20"
}
}
},
"number_of_replicas": "1", #temporarily
"number_of_shards": "5"
}
},
"mappings": { # having a raw field means it can be a facet or sorted by
"searchable_item": {
"properties": {
"name": {
"type": "string",
"fields": {
"symbol": {
"type": "string",
"analyzer": "symbols"
},
"raw": {
"type": "string",
"index": "not_analyzed"
},
"autocomplete": {
"type": "string",
"analyzer": "autocomplete"
}
}
},
"category": {
"type": "string",
"analyzer": "symbols"
},
"href": {
"type": "string",
"analyzer": "symbols"
},
"description": {
"type": "string"
},
"first_name": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"last_name": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"institution": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"position": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"country": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"state": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"colleague_loci": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
},
"number_annotations": {
"type": "integer"
},
"feature_type": {
"type": "string",
"fields": {
"symbol": {
"type": "string",
"analyzer": "symbols"
},
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"name_description": {
"type": "string"
},
"summary": {
"type": "string"
},
"phenotypes": {
"type": "string",
"fields": {
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
},
"cellular_component": {
"type": "string",
"fields": {
"symbol": {
"type": "string",
"analyzer": "symbols"
},
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"biological_process": {
"type": "string",
"fields": {
"symbol": {
"type": "string",
"analyzer": "symbols"
},
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"molecular_function": {
"type": "string",
"fields": {
"symbol": {
"type": "string",
"analyzer": "symbols"
},
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"ec_number": {
"type": "string",
"analyzer": "symbols"
},
"protein": {
"type": "string",
"fields": {
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
},
"tc_number": {
"type": "string",
"analyzer": "symbols"
},
"secondary_sgdid": {
"type": "string",
"analyzer": "symbols"
},
"sequence_history": {
"type": "string",
"analyzer": "symbols"
},
"gene_history": {
"type": "string",
"analyzer": "symbols"
},
"bioentity_id": {
"type": "string",
"analyzer": "symbols"
},
"keys": {
"type": "string",
"analyzer": "symbols"
},
"status": {
"type": "string",
"analyzer": "symbols",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"observable": {
"type": "string",
"fields": {
"symbol": {
"type": "string",
"analyzer": "symbols"
},
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"qualifier": {
"type": "string",
"fields": {
"symbol": {
"type": "string",
"analyzer": "symbols"
},
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"references": {
"type": "string",
"analyzer": "symbols",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"phenotype_loci": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
},
"chemical": {
"type": "string",
"fields": {
"symbol": {
"type": "string",
"analyzer": "symbols"
},
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"mutant_type": {
"type": "string",
"fields": {
"symbol": {
"type": "string",
"analyzer": "symbols"
},
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"synonyms": {
"type": "string"
},
"go_id": {
"type": "string",
"analyzer": "symbols"
},
"go_loci": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
},
"author": {
"type": "string",
"analyzer": "symbols",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"journal": {
"type": "string",
"analyzer": "symbols",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"year": {
"type": "string",
"analyzer": "symbols",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"reference_loci": {
"type": "string",
"analyzer": "symbols",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
}
}
},
"aliases": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
},
"format": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
},
"keyword": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
},
"file_size": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
},
"readme_url": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
},
"topic": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
},
"data": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
},
"is_quick_flag": {
"type": "string",
"fields": {
"raw": {
"type": "string",
"index": "not_analyzed"
},
"symbol": {
"type": "string",
"analyzer": "symbols"
}
}
}
}
}
}
}
| 34.105675 | 75 | 0.204441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,638 | 0.266123 |
6447640aa52e97e82796d679333b4a3d179ae7bb | 24,311 | py | Python | ryu/ryu/app/Ryuretic/Ryuretic_Intf_v6.py | Ryuretic/RAP | 7b0e58af7d8a932770e3c7f7620024e16992b531 | [
"Apache-2.0"
] | 2 | 2019-09-16T17:52:31.000Z | 2021-06-24T17:45:01.000Z | ryu/ryu/app/Ryuretic/Ryuretic_Intf_v6.py | Ryuretic/RAP | 7b0e58af7d8a932770e3c7f7620024e16992b531 | [
"Apache-2.0"
] | null | null | null | ryu/ryu/app/Ryuretic/Ryuretic_Intf_v6.py | Ryuretic/RAP | 7b0e58af7d8a932770e3c7f7620024e16992b531 | [
"Apache-2.0"
] | 3 | 2019-09-23T07:21:40.000Z | 2021-03-03T13:24:25.000Z | #########################################################################
# Ryuretic: A Modular Framework for RYU #
# !/ryu/ryu/app/Ryuretic/Ryuretic_Intf.py #
# Authors: #
# Jacob Cox (jcox70@gatech.edu) #
# Sean Donovan (sdonovan@gatech.edu) #
# Ryuretic_Intf.py #
# date 28 April 2016 #
#########################################################################
# Copyright (C) 2016 Jacob Cox - All Rights Reserved #
# You may use, distribute and modify this code under the #
# terms of the Ryuretic license, provided this work is cited #
# in the work for which it is used. #
# For latest updates, please visit: #
# https://github.com/Ryuretic/RAP #
#########################################################################
"""How To Run This Program
1) Ensure you have Ryu installed.
2) Save the following files to /home/ubuntu/ryu/ryu/app/Ryuretic directory
a) Ryuretic_Intf.py
b) Ryuretic.py
c) Pkt_Parse13.py
d) switch_mod13.py
3) In your controller terminal type: cd ryu
4) Enter PYTHONPATH=. ./bin/ryu-manager ryu/app/Ryuretic/Ryuretic_Intf_v1.py
"""
#########################################################################
from Ryuretic import coupler
#################1 Import Needed Libraries 1###################
#[1] Import needed libraries here #
#########################################################################
import string, random
class Ryuretic_coupler(coupler):
def __init__(self, *args, **kwargs):
super(Ryuretic_coupler, self).__init__(*args, **kwargs)
############## 2 Add User Variables 2 ###################
#[2] Add new global variables here. #
# Ex. ICMP_ECHO_REQUEST = 8, self.netView = {} #
#################################################################
self.cntrl = {'mac':'ca:ca:ca:ad:ad:ad','ip':'192.168.0.40','port':None}
self.validNAT = {'mac':'aa:aa:aa:aa:aa:aa','ip':'192.168.0.224'}
self.t_agentIP = '192.168.0.1'
self.t_agent = {} #Records TA parameter from respond_to_ping
self.dns_tbl = {} #Use to redirect DNS
self.tcp_tbl = {} #Use to redirect TCP
self.port_mac_map = {} #Used by multi-mac detector
self.port_AV = {} #Tracks per port Time-2-ack average
self.tta = {} #Tracks TCP handshake per (src,srcip,srcport,dstip)
self.tcpConnCount = 0 #Future var for tracking total TCP connections
self.policyTbl = {} #Tracks policies applied to port/mac
self.netView = {} #Maps switch connections by port,mac,ip
self.portTbl, self.macTbl, self.ipTbl = {},{},{}
self.testIP = '0.0.0.0' #'192.168.0.22'
#self.portTbl[9]='test'
#self.macTbl['aa:aa:aa:aa:00:22'] = 'test'
#self.ipTbl['192.168.0.22'] = 'test'
#Assigns flag to MAC/Port
self.keyID = 101
ICMP_ECHO_REPLY = 0
ICMP_ECHO_REQUEST = 8
################ 3 Proactive Rule Sets 3 ###################
#[3] Insert proactive rules defined below. Follow format below #
# Options include drop or redirect, fwd is the default. #
#####################################################################
def get_proactive_rules(self, dp, parser, ofproto):
return None, None
#fields, ops = self.honeypot(dp, parser, ofproto)
#return fields, ops
################# 4 Reactive Rule Sets 4 #####################
#[4] use below handles to direct packets to reactive user modules #
# defined in location #[5]. If no rule is added, then #
# the default self.default_Fields_Ops(pkt) must be used #
#####################################################################
# Determine highest priority fields and ops pair, if needed #
# xfields = [fields0, fields1, fields2] #
# xops = [ops0, ops1, ops2] #
# fields,ops = self._build_FldOps(xfields,xops) #
#####################################################################
def handle_eth(self,pkt):
print "Handle Ether: ", pkt['srcmac'],'->',pkt['dstmac']
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt,fields,ops)
#def handle_arp(self,pkt):
#print "-------------------------------------------------------------"
#print "Handle ARP: ",pkt['srcmac'],"->",pkt['dstmac']
#print "Handle ARP: ",pkt['srcip'],"->",pkt['dstip']
#fields, ops = self.respond_to_arp(pkt)
##Determin if mac or port has a status
##pkt_status = self.check_net_tbl(pkt['srcmac'],pkt['inport'])
##print pkt_status
#self.install_field_ops(pkt,fields,ops)
def handle_arp(self,pkt):
print "-------------------------------------------------------------"
print "Handle ARP: ",pkt['srcmac'],"->",pkt['dstmac']
print "Handle ARP: ",pkt['srcip'],"->",pkt['dstip']
fields, ops = self.respond_to_arp(pkt)
self.install_field_ops(pkt,fields,ops)
def handle_ip(self,pkt):
print "-------------------------------------------------------------"
print "Handle IP"
#fields, ops = self.TTL_Check(pkt) #Lab 9
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt,fields,ops)
def handle_icmp(self,pkt):
print "-------------------------------------------------------------"
print "Handle ICMP: ",pkt['srcmac'],"->",pkt['dstmac']
print "Handle ICMP: ",pkt['srcip'],"->",pkt['dstip']
fields,ops = self.respond_to_ping(pkt)
self.install_field_ops(pkt, fields, ops)
def handle_tcp(self,pkt):
#print "-------------------------------------------------------------"
#print "Handle TCP: ",pkt['srcmac'],"->",pkt['dstmac']
#print "Handle TCP: ",pkt['srcip'],"->",pkt['dstip']
#print "Handle TCP: ",pkt['srcport'],"->",pkt['dstport']
pkt_status = self.check_ip_tbl(pkt)
if pkt_status == 'test': #test src and dest
fields,ops = self.redirect_TCP(pkt)
elif pkt_status == 'deny':
fields,ops = self.redirect_TCP(pkt)
else:
#fields,ops = self.default_Field_Ops(pkt)
#fields,ops = self.test_TCP(pkt)
fields,ops = self.TTA_analysis(pkt)
self.install_field_ops(pkt, fields, ops)
def test_TCP(self,pkt):
fields,ops = self.default_Field_Ops(pkt)
if pkt['srcip'] == self.testIP:
print "IP detected: ", pkt['srcip']
self.flagHost(pkt,'test')
fields,ops=self.redirect_TCP(pkt)
return fields,ops
return fields,ops
def redirect_TCP(self,pkt):
print "Redirect_TCP: "
print "pkt info: ", pkt['srcmac'],' ',pkt['dstmac'],' ',pkt['srcip'],' ',pkt['dstip']
print pkt['srcport'],' ',pkt['dstport']
#Uses ipTbl, tcp_tbl, and t_agent
fields,ops = self.default_Field_Ops(pkt)
if self.ipTbl.has_key(pkt['srcip']):
if self.ipTbl[pkt['srcip']] in ['test','deny']:
print "ipTbl Contents", self.ipTbl
key = (pkt['srcip'],pkt['srcport'])
print "Key is : ", key
self.tcp_tbl[key] = {'dstip':pkt['dstip'],'dstmac':pkt['dstmac'],
'dstport':pkt['dstport']}
fields.update({'srcmac':pkt['srcmac'],'srcip':pkt['srcip']})
fields.update({'dstmac':self.t_agent['mac'],'dstip':self.t_agent['ip']})
#if pkt['dstport'] == 443:
#fields['dstport'] = 80
ops = {'hard_t':None, 'idle_t':None, 'priority':100,\
'op':'mod', 'newport':self.t_agent['port']}
print "TCP Table: ", self.tcp_tbl[key]
elif self.ipTbl.has_key(pkt['dstip']):
print "Returning to ", pkt['dstip']
if self.ipTbl[pkt['dstip']] in ['test','deny']:
print "ipTbl Contents", self.ipTbl
key = (pkt['dstip'],pkt['dstport'])
print "Key and table: ", key, ' ', self.tcp_tbl[key]
fields.update({'srcmac':self.tcp_tbl[key]['dstmac'],
'srcip':self.tcp_tbl[key]['dstip']})
#if self.tcp_tbl[key]['dstport'] == 443:
#fields.update({'srcport':443})
fields.update({'dstmac':pkt['dstmac'], 'dstip':pkt['dstip']})
ops = {'hard_t':None, 'idle_t':None, 'priority':100,\
'op':'mod', 'newport':None}
#self.tcp_tbl.pop(key)
#print "TCP Table: ", self.tcp_tbl
return fields, ops
# Add flag to policyTbl, macTbl, portTbl
def flagHost(self,pkt,flag):
print 'Flag Host: ', pkt['srcmac'],'->',flag
self.macTbl[pkt['srcmac']]={'stat':flag,'port':pkt['inport'],
'ip':pkt['srcip']}
self.portTbl[pkt['inport']]=flag
self.ipTbl[pkt['srcip']] = flag
if flag != 'norm':
keyID = self.keyID
self.keyID += 1
#create passkey
passkey =''.join(random.choice(string.ascii_letters) for x in range(8))
#update policy table
self.policyTbl[keyID]={'inport':pkt['inport'],'srcmac':pkt['srcmac'],
'ip':pkt['srcip'],'passkey':passkey,'stat':flag}
#Notify trusted agent of newly flagged client
self.update_TA(pkt, keyID, 'l') #load message'
def handle_udp(self,pkt):
print "-------------------------------------------------------------"
print "Handle UDP: ",pkt['srcmac'],"->",pkt['dstmac']
print "Handle UDP: ",pkt['srcip'],'->',pkt['dstip']
#Added to build MAC and port associations
pkt_status = self.check_ip_tbl(pkt)
if pkt_status == 'test': #test src and dest
fields,ops = self.redirect_DNS(pkt)
elif pkt_status == 'deny':
fields,ops = self.redirect_DNS(pkt)
else:
fields,ops = self.test_DNS(pkt)
self.install_field_ops(pkt, fields, ops)
def test_DNS(self,pkt):
print "Testing DNS"
fields,ops = self.default_Field_Ops(pkt)
if pkt['srcip'] == self.testIP:
print "IP detected: ", pkt['srcip']
self.flagHost(pkt,'test')
fields,ops=self.redirect_DNS(pkt)
return fields,ops
return fields,ops
def redirect_DNS(self,pkt):
print "Redirect_DNS: "
#Uses macTbl, dns_tbl, and t_agent
fields,ops = self.default_Field_Ops(pkt)
if self.ipTbl.has_key(pkt['srcip']):
if self.ipTbl[pkt['srcip']]== 'test':
key = (pkt['srcip'],pkt['srcport'])
print key
self.dns_tbl[key] = {'dstip':pkt['dstip'],'dstmac':pkt['dstmac']}
fields.update({'dstmac':self.t_agent['mac'],
'dstip':self.t_agent['ip']})
fields.update({'srcmac':pkt['srcmac'],'srcip':pkt['srcip']})
ops = {'hard_t':None, 'idle_t':None, 'priority':100,\
'op':'mod', 'newport':self.t_agent['port']}
elif self.ipTbl.has_key(pkt['dstip']):
if self.ipTbl[pkt['dstip']]== 'test':
key = (pkt['dstip'],pkt['dstport'])
print key
fields.update({'srcmac':self.dns_tbl[key]['dstmac'],
'srcip':self.dns_tbl[key]['dstip']})
fields.update({'dstmac':pkt['dstmac'], 'dstip':pkt['dstip']})
ops = {'hard_t':None, 'idle_t':None, 'priority':100,\
'op':'mod', 'newport':None}
#self.dns_tbl.pop(key)
#print "DNS Table: ", self.dns_tbl
return fields, ops
#Check status of port and mac.
def check_ip_tbl(self,pkt):
#print "Check_ip_tbl:"
srcip,dstip = pkt['srcip'],pkt['dstip']
if self.ipTbl.has_key(srcip):
#print "Found: ", srcip,'->', self.ipTbl[srcip]
return self.ipTbl[srcip]
elif self.ipTbl.has_key(dstip):
#print "Found: ", dstip,'->', self.ipTbl[dstip]
return self.ipTbl[dstip]
else:
#print "Not Found: ", srcip, ', ', dstip
return 'No_Flag'
# All packets not defined above are handled here.
def handle_unk(self,pkt):
print "-------------------------------------------------------------"
print "Handle Uknown"
fields, ops = self.default_Field_Ops(pkt)
self.install_field_ops(pkt, fields, ops)
######################################################################
# The following are from the old NFG file.
def default_Field_Ops(self,pkt):
def _loadFields(pkt):
#keys specifies match fields for action. Default is
#inport and srcmac. ptype used for craft icmp, udp, etc.
fields = {'keys':['inport','srcmac'],'ptype':[], 'dp':pkt['dp'],
'ofproto':pkt['ofproto'], 'msg':pkt['msg'],
'inport':pkt['inport'], 'srcmac':pkt['srcmac'],
'ethtype':pkt['ethtype'], 'dstmac':None, 'srcip':None,
'proto':None, 'dstip':None, 'srcport':None, 'dstport':None,
'com':None, 'id':0}
return fields
def _loadOps():
#print "Loading ops"
#Specifies the timeouts, priority, operation and outport
#options for op: 'fwd','drop', 'mir', 'redir', 'craft'
ops = {'hard_t':None, 'idle_t':None, 'priority':10, \
'op':'fwd', 'newport':None}
return ops
#print "default Field_Ops called"
fields = _loadFields(pkt)
ops = _loadOps()
return fields, ops
######################################################################
############ 5 Ryuretic Network Application Modules 5 ##############
#[5] Add user created methods below. Examples are provided to assist #
# the user with basic python, dictionary, list, and function calls #
######################################################################
# Confirm mac has been seen before and no issues are recorded
def TTL_Check(self, pkt):
#initialize fields and ops with default settings
fields, ops = self.default_Field_Ops(pkt)
if pkt['srcmac'] != self.validNAT['mac']:
if pkt['ttl']==63 or pkt['ttl']==127:
print 'TTL Decrement Detected on ',pkt['srcmac'],' TTL is :',pkt['ttl']
fields, ops = self.add_drop_params(pkt,fields,ops)
else:
ops['idle_t'] = 5
print "Packet TTL: ", pkt['ttl'], ' ', pkt['srcip'],' ', \
pkt['inport'],' ', pkt['srcmac']
else:
ops['idle_t'] = 20
priority = 10
return fields, ops
def Multi_MAC_Checker(self, pkt):
fields, ops = self.default_Field_Ops(pkt)
print "*** Checking MAC ***"
#self.port_mac_map = {}
if self.port_mac_map.has_key(pkt['inport']):
if pkt['srcmac'] != self.port_mac_map[pkt['inport']]:
print " Multi-mac port detected "
fields, ops = self.add_drop_params(pkt,fields,ops)
else:
fields, ops = self.fwd_persist(pkt,fields,ops)
else:
self.port_mac_map[pkt['inport']] = pkt['srcmac']
return fields, ops
#change name to monitor_TCP for RAP
def TTA_analysis(self,pkt):
fields, ops = self.default_Field_Ops(pkt)
bits = pkt['bits']
dst, dstip, dstport = pkt['dstmac'], pkt['dstip'], pkt['dstport']
src, srcip, srcport = pkt['srcmac'], pkt['srcip'], pkt['srcport']
inport = pkt['inport']
send = (src,srcip,srcport,dstip)
arrive = (dst,dstip,dstport,srcip)
t_in = pkt['t_in']
#print"*****\n"+self.tta+"/n******/n"+self.port_AV+"/n*****"
if bits == 20:
if self.tta.has_key(send):
self.tta[send]['stage'] = 0
elif self.tta.has_key(arrive):
#print pkt
self.tta[arrive]['stage'] = 0
return fields, ops
if bits == 2:
if self.tta.has_key(send):
self.tta[send].update({'inport':inport,'stage':1})
else:
self.tta.update({send:{'inport':inport,'stage':1}})
return fields, ops
if bits == 18:
if self.tta.has_key(arrive):
if self.tta[arrive]['stage']==1:
self.tta[arrive].update({'syn':t_in,'stage':2})
return fields,ops
if bits == 16:
if self.tta.has_key(send):
if self.tta[send]['stage']==2:
tta = t_in - self.tta[send]['syn']
self.tta[send].update({'stage':3, 'ack':t_in, 'tta':tta})
#print '** Calc TTA :', tta
if self.port_AV.has_key(self.tta[send]['inport']):
portAV = ((self.port_AV[self.tta[send]['inport']] * \
9) + tta)/10
self.port_AV[self.tta[send]['inport']] = portAV
else:
portAV = ((0.001*9)+tta)/10
self.port_AV.update({self.tta[send]['inport']:portAV})
#print "****"
#print "Port and TTA: ", inport, self.tta[send]['tta']
print '****\nPort Averages: ', self.port_AV, '\n****'
#print "****"
del self.tta[send]
return fields, ops
#print "Persist"
fields, ops = self.tcp_persist(pkt,fields,ops)
return fields, ops
if bits == 24:
#print "HTTP Push"
return fields, ops
if bits == 17:
print 'Port Averages: ', self.port_AV
if self.tta.has_key(send):
del self.tta[send]
elif self.tta.has_key(arrive):
del self.tta[arrive]
return fields, ops
print "Packet not addressed", bits, inport, src, dstip
return fields, ops
# Call to temporarily install drop parameter for a packet to switch
def add_drop_params(self, pkt, fields, ops):
#may need to include priority
fields['keys'] = ['inport']
fields['inport'] = pkt['inport']
ops['priority'] = 100
ops['idle_t'] = 60
ops['op']='drop'
return fields, ops
# Call to temporarily install TCP flow connection on switch
def tcp_persist(self, pkt,fields,ops):
#print "TCP_Persist: ", pkt['srcmac'],'->', pkt['dstmac']
#print "TCP_Persist: ", pkt['srcip'],'->',pkt['dstip']
fields['keys'] = ['inport', 'srcmac', 'srcip', 'ethtype', 'srcport']
fields['srcport'] = pkt['srcport']
fields['srcip'] = pkt['srcip']
ops['idle_t'] = 5
ops['priority'] = 10
return fields, ops
def fwd_persist(self, pkt,fields,ops):
ops['idle_t'] = 3
ops['priority'] = 10
return fields, ops
def arp_persist(self, pkt):
fields, ops = self.default_Field_Ops(pkt)
fields['keys'] = ['inport','srcmac','ethtype']
ops['idle_t'] = 10
ops['priority'] = 2
return fields, ops
################################################################
"""
The following code is implemented to allow the trusted agent to comm
with the controller and vice versa.
"""
################################################################
#Receive and respond to arp
def respond_to_arp(self,pkt):
print 'Respond to Arp:', pkt['srcmac'],'->',pkt['dstmac']
print 'Respond to Arp:', pkt['srcip'],'->',pkt['dstip']
fields, ops = self.default_Field_Ops(pkt)
#Added to build MAC and port associations
if not self.macTbl.has_key(pkt['srcmac']):
self.macTbl[pkt['srcmac']] = {'port':pkt['inport'], 'stat':'unk'}
if pkt['dstip'] == self.cntrl['ip']:
print "Message to Controller"
fields['keys']=['srcmac', 'srcip', 'ethtype', 'inport']
fields['ptype'] = 'arp'
fields['dstip'] = pkt['srcip']
fields['srcip'] = self.cntrl['ip']
fields['dstmac'] = pkt['srcmac']
fields['srcmac'] = self.cntrl['mac']
fields['ethtype'] = 0x0806
ops['op'] = 'craft'
ops['newport'] = pkt['inport']
#print "INPORT: ", pkt['inport']
return fields, ops
#Respond to ping. Forward or respond if to cntrl from trusted agent.
def respond_to_ping(self,pkt):
def get_fields(keyID):
srcmac = self.policyTbl[keyID]['srcmac']
inport = self.policyTbl[keyID]['inport']
srcip = self.policyTbl[keyID]['ip']
print inport, ', ', srcmac, ', ', srcip
return srcmac, inport, srcip
def remove_keyID(keyID):
print "Policy Table Contents: ", self.policyTbl
if self.policyTbl.has_key(keyID):
srcmac, inport, srcip = get_fields(keyID)
if self.macTbl.has_key(srcmac):
print "Removing MAC", srcmac
self.macTbl.pop(srcmac)
if self.portTbl.has_key(inport):
print "Removing Port", inport
self.portTbl.pop(inport)
if self.ipTbl.has_key(srcip):
print "Removing IP", srcip
self.ipTbl.pop(srcip)
self.policyTbl.pop(keyID)
print "Respond to Ping: ", pkt['srcmac'],'->',pkt['dstmac']
fields, ops = self.default_Field_Ops(pkt)
if pkt['dstip'] == self.cntrl['ip'] and pkt['srcip'] == self.t_agentIP:
#print'respond to ping'
rcvData = pkt['data'].data
#Actions {a-acknowledge, i-init, d-delete, r-result, v-verify}
#action, keyID = rcvData.split(',')
#keyID = keyID.rstrip(' \t\r\n\0')
print rcvData
try:
action, keyID, result = rcvData.split(',')
result = result.rstrip(' \t\r\n\0')
print "Received Result"
except:
action, keyID = rcvData.split(',')
print "Received Revocation."
keyID = keyID.rstrip(' \t\r\n\0')
print "Key ID Length: ", len(keyID)
keyID = int(keyID)
print "KeyID is ", keyID, ', ', type(keyID)
print "Action is ", action, "\n\n\n*********"
######################################################
if action == 'i':
self.t_agent = {'ip':pkt['srcip'],'mac':pkt['srcmac'],
'port':pkt['inport'],'msg':pkt['msg'],
'ofproto':pkt['ofproto'], 'dp':pkt['dp']}
print "T_AGENT Loaded"
elif action == 'd':
#Deleting flagged host policy
print "Removing (",keyID,") from Policy Table"
print "Existing Keys: ", self.policyTbl.keys()
remove_keyID(keyID)
elif action == 'r':
print "Validating result"
print "Key present?", self.policyTbl.has_key(keyID)
if self.policyTbl.has_key(keyID):
print "Test Result is: ", result
if result == 'P':
print "Removing keyID"
remove_keyID(keyID)
elif result =='F':
print "Flagging Host: ", self.policyTbl[keyID]['ip']
self.policyTbl[keyID]['stat'] = 'deny'
srcmac, inport, srcip = get_fields(keyID)
self.macTbl[srcmac].update({'stat':'deny'})
self.portTbl[inport],self.ipTbl[srcip] ='deny','deny'
self.update_TA(pkt, keyID,'e') #send edit message
#Notify TA of update_TA(self,pkt, keyID)
else:
print "An Error Occured"
elif action is 'u':
#This is more complicated it requires data not being stored
#may need to add fields to policyTable. Maybe not.
pass
elif action is 'a':
#Acknowledge receipt
pass
else:
print "No match"
fields.update({'srcmac':self.cntrl['mac'], 'dstmac':pkt['srcmac']})
fields.update({'srcip':self.cntrl['ip'], 'dstip':pkt['srcip']})
fields.update({'ptype':'icmp','ethtype':0x0800, 'proto':1})
fields['com'] = 'a,'+rcvData
ops.update({'op':'craft', 'newport':pkt['inport']})
return fields, ops
#Crafts tailored ICMP message for trusted agent
def update_TA(self,pkt, keyID, message):
table = self.policyTbl[keyID]
print 'Update Table: ', pkt['srcmac'],'->',keyID,'->',table['stat']
print 'Update Table: ', table['srcmac'],'->',keyID,'->',table['stat']
#print "Updating Trusted Agent"
fields, ops = {},{}
fields['keys'] = ['inport', 'srcip']
fields.update({'dstip':self.t_agent['ip'], 'srcip':self.cntrl['ip']})
fields.update({'dstmac':self.t_agent['mac'], 'srcmac':self.cntrl['mac']})
fields.update({'dp':self.t_agent['dp'], 'msg':self.t_agent['msg']})
fields.update({'inport':self.t_agent['port'],'ofproto':\
self.t_agent['ofproto']})
fields.update({'ptype':'icmp', 'ethtype':0x0800, 'proto':1, 'id':0})
fields['com'] = message+','+table['srcmac']+','+str(table['inport'])+\
','+str(table['passkey'])+','+table['stat']+\
','+str(keyID)
ops = {'hard_t':None, 'idle_t':None, 'priority':0, \
'op':'craft', 'newport':self.t_agent['port']}
self.install_field_ops(pkt, fields, ops)
################################################################
"""
The following code controls the redirection of packets from their intended
destination to our trusted agent. This occurs when a port is flagged.
"""
################################################################
#Create a method to inject a redirect anytime the sta4 IP address is
#Check status of port and mac.
def check_net_tbl(self,pkt):
mac, ip, port = pkt['srcmac'], pkt['srcip'], pkt['inport']
print "(536) Check NetTbl: ", mac, ' & ', port,'->',self.macTbl.keys()
if mac in self.macTbl.keys():
#print "Found: ", mac,'->', self.macTbl[mac]['stat']
return self.macTbl[mac]['stat']
elif port in self.portTbl.keys():
#print "Port ", port, " found in table."
return self.portTbl[port]
elif ip in self.ipTbl.keys():
#print "IP ", ip, " found in table."
return self.ipTbl[ip]
else:
#print "Not Found: ", mac
return 'new'
#Redirect ICMP packets to trusted agent
def Icmp_Redirect(self,pkt):
print "Redirecting ICMP", pkt['srcmac'],'->',pkt['dstmac'],'||',self.t_agent['mac']
fields, ops = self.default_Field_Ops(pkt)
fields['keys'] = ['inport', 'ethtype']
fields['dstmac'] = self.t_agent['mac']
fields['dstip'] = self.t_agent['ip']
fields['ethtype'] = pkt['ethtype']
ops['op'] = 'redir'
ops['newport'] = self.t_agent['port']
ops['priority'] = 100
ops['idle_t'] = 180
#ops['hard_t'] = 180
return fields, ops
| 37.003044 | 87 | 0.569948 | 22,457 | 0.923738 | 0 | 0 | 0 | 0 | 0 | 0 | 12,216 | 0.502489 |
644785cc2aa6d52bbb0267c2b681bfd059e3009d | 715 | py | Python | pyPQN/SoftmaxLoss2.py | steveli/mogp | d142e7b9e5b7dbc67cfae4760c837cafd9691a51 | [
"MIT"
] | 7 | 2019-07-18T19:55:26.000Z | 2022-02-14T13:55:04.000Z | pyPQN/SoftmaxLoss2.py | mlds-lab/mogp | d142e7b9e5b7dbc67cfae4760c837cafd9691a51 | [
"MIT"
] | null | null | null | pyPQN/SoftmaxLoss2.py | mlds-lab/mogp | d142e7b9e5b7dbc67cfae4760c837cafd9691a51 | [
"MIT"
] | 2 | 2020-07-08T06:14:23.000Z | 2022-02-16T17:55:22.000Z | from __future__ import division
import numpy as np
def SoftmaxLoss2(w, X, y, k):
# w(feature*class,1) - weights for last class assumed to be 0
# X(instance,feature)
# y(instance,1)
#
# version of SoftmaxLoss where weights for last class are fixed at 0
# to avoid overparameterization
n, p = X.shape
w = w.reshape((p, k - 1))
w = np.hstack((w, np.zeros((p, 1))))
Z = np.exp(X.dot(w)).sum(axis=1)
nll = -((X * w[:, y].T).sum(axis=1) - np.log(Z)).sum()
g = np.zeros((p, k - 1))
for c in xrange(k - 1):
g[:, c] = -(X * ((y == c) - np.exp(X.dot(w[:, c])) / Z)
[:, np.newaxis]).sum(axis=0)
g = np.ravel(g)
return nll, g
| 25.535714 | 72 | 0.524476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 199 | 0.278322 |
644786691ef180005030e8c01b1e2a7613ecc55a | 3,260 | py | Python | syfertext/string_store.py | socd06/SyferText | b68f8faffb0995945676b68a37b7c3b5e0b45833 | [
"Apache-2.0"
] | null | null | null | syfertext/string_store.py | socd06/SyferText | b68f8faffb0995945676b68a37b7c3b5e0b45833 | [
"Apache-2.0"
] | 1 | 2020-09-12T11:05:51.000Z | 2020-09-12T11:05:51.000Z | syfertext/string_store.py | socd06/SyferText | b68f8faffb0995945676b68a37b7c3b5e0b45833 | [
"Apache-2.0"
] | null | null | null | from .utils import hash_string
from typing import Union
class StringStore:
"""StringStore object acts as a lookup table.
It looks up strings by 64-bit hashes and vice-versa, looks up hashes by their corresponding strings.
"""
def __init__(self, strings=None):
"""Create the StringStore object
Args:
strings (list): List of Strings to add to store
"""
# key_to_str maps hashes to strings; i.e it stores (key == hash : value == string)
self.key_to_str = {}
# str_to_key maps strings to hashes; i.e it stores (key == string : value == hash)
self.str_to_key = {}
if strings is not None: # load strings
for word in strings:
self.add(word)
def __contains__(self, string):
"""Check whether string is in the store
Args:
string (str): string to check
Returns:
Boolean: True if string in store else False
"""
return string in self.str_to_key.keys()
def add(self, string: str):
"""Add a sting to the StringStore
Args:
string (str): The string to add to store
Returns:
key (int): Hash key for corresponding string
"""
if not isinstance(string, str):
raise TypeError(
f"Argument `string` is of type `{type(string)}`. Expected type is `str`"
)
if string in self: # store contains string
key = self.str_to_key[string]
else:
# get corresponding hash value
key = hash_string(string)
# add string to dictionaries
self.str_to_key[string] = key
self.key_to_str[key] = string
return key
def __getitem__(self, string_or_id: Union[str, int]):
"""Retrieve a string from a given hash or vice-versa.
If passed argument is a string which is not found in the store,
then it is added to the store and the corresponding key is returned.
Args:
string_or_id (str, int): The hash/string value
Returns:
key or string (int, str): Hash key for argument string or string for corresponding hash key
"""
if not (isinstance(string_or_id, str) or isinstance(string_or_id, int)):
# TODO: Add custom SyferText error messgage
raise TypeError(
f"Argument `key` is of type `{type(string_or_id)}`. Expected type is `str` or `int`"
)
# If string_or_id is hash value return corresponding string
if isinstance(string_or_id, int):
return self.key_to_str[string_or_id]
# If string_or_id is of type string then return the corresponding key (hash value).
# And if string is not found in store, add it to the store and then return the corresponding key
elif isinstance(string_or_id, str):
if string_or_id not in self:
key = self.add(string_or_id) # add string to store
else:
key = self.str_to_key[string_or_id]
return key
def __len__(self):
"""Get the number of strings in the store."""
return len(self.str_to_key)
| 31.346154 | 104 | 0.593252 | 3,201 | 0.981902 | 0 | 0 | 0 | 0 | 0 | 0 | 1,819 | 0.557975 |