hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eacc2257b27de8eb2645ec30a826a06701432acc | 972 | py | Python | addons/meme.py | 916253/Kurisu-Reswitched | 143c27e42049de8ccc8c5c76f503ea96e89c179c | [
"Apache-2.0"
] | 13 | 2017-08-18T00:25:26.000Z | 2020-12-06T00:59:47.000Z | addons/meme.py | 916253/Kurisu-Reswitched | 143c27e42049de8ccc8c5c76f503ea96e89c179c | [
"Apache-2.0"
] | 11 | 2018-04-13T16:57:13.000Z | 2018-12-23T11:52:19.000Z | addons/meme.py | 916253/Kurisu-Reswitched | 143c27e42049de8ccc8c5c76f503ea96e89c179c | [
"Apache-2.0"
] | 21 | 2017-08-04T16:33:15.000Z | 2019-03-11T17:01:48.000Z | import discord
import random
from discord.ext import commands
class Meme:
"""
Meme commands.
"""
def __init__(self, bot):
self.bot = bot
print('Addon "{}" loaded'.format(self.__class__.__name__))
@commands.command(pass_context=True, hidden=True, name="bam")
async def bam_member(self, ctx, user: discord.Member, *, reason=""):
"""Bams a user owo"""
await self.bot.say("{} is ̶n͢ow b̕&̡.̷ 👍̡".format(self.bot.escape_name(user)))
@commands.command(pass_context=True, hidden=True, name="warm")
async def warm_member(self, ctx, user: discord.Member, *, reason=""):
"""Warms a user :3"""
await self.bot.say("{} warmed. User is now {}°C.".format(user.mention, str(random.randint(0, 100))))
@commands.command(hidden=True)
async def frolics(self):
"""test"""
await self.bot.say("https://www.youtube.com/watch?v=VmarNEsjpDI")
def setup(bot):
bot.add_cog(Meme(bot))
| 29.454545 | 108 | 0.622428 | 872 | 0.887984 | 0 | 0 | 688 | 0.700611 | 520 | 0.529532 | 224 | 0.228106 |
eacd933360da63672e942fa0e5cf98b6ea63add5 | 3,180 | py | Python | google/cloud/recommender/v1beta1/recommender-v1beta1-py/google/cloud/recommender/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 7 | 2021-02-21T10:39:41.000Z | 2021-12-07T07:31:28.000Z | google/cloud/recommender/v1beta1/recommender-v1beta1-py/google/cloud/recommender/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 6 | 2021-02-02T23:46:11.000Z | 2021-11-15T01:46:02.000Z | google/cloud/recommender/v1beta1/recommender-v1beta1-py/google/cloud/recommender/__init__.py | googleapis/googleapis-gen | d84824c78563d59b0e58d5664bfaa430e9ad7e7a | [
"Apache-2.0"
] | 4 | 2021-01-28T23:25:45.000Z | 2021-08-30T01:55:16.000Z | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from google.cloud.recommender_v1beta1.services.recommender.client import RecommenderClient
from google.cloud.recommender_v1beta1.services.recommender.async_client import RecommenderAsyncClient
from google.cloud.recommender_v1beta1.types.insight import Insight
from google.cloud.recommender_v1beta1.types.insight import InsightStateInfo
from google.cloud.recommender_v1beta1.types.recommendation import CostProjection
from google.cloud.recommender_v1beta1.types.recommendation import Impact
from google.cloud.recommender_v1beta1.types.recommendation import Operation
from google.cloud.recommender_v1beta1.types.recommendation import OperationGroup
from google.cloud.recommender_v1beta1.types.recommendation import Recommendation
from google.cloud.recommender_v1beta1.types.recommendation import RecommendationContent
from google.cloud.recommender_v1beta1.types.recommendation import RecommendationStateInfo
from google.cloud.recommender_v1beta1.types.recommendation import ValueMatcher
from google.cloud.recommender_v1beta1.types.recommender_service import GetInsightRequest
from google.cloud.recommender_v1beta1.types.recommender_service import GetRecommendationRequest
from google.cloud.recommender_v1beta1.types.recommender_service import ListInsightsRequest
from google.cloud.recommender_v1beta1.types.recommender_service import ListInsightsResponse
from google.cloud.recommender_v1beta1.types.recommender_service import ListRecommendationsRequest
from google.cloud.recommender_v1beta1.types.recommender_service import ListRecommendationsResponse
from google.cloud.recommender_v1beta1.types.recommender_service import MarkInsightAcceptedRequest
from google.cloud.recommender_v1beta1.types.recommender_service import MarkRecommendationClaimedRequest
from google.cloud.recommender_v1beta1.types.recommender_service import MarkRecommendationFailedRequest
from google.cloud.recommender_v1beta1.types.recommender_service import MarkRecommendationSucceededRequest
__all__ = ('RecommenderClient',
'RecommenderAsyncClient',
'Insight',
'InsightStateInfo',
'CostProjection',
'Impact',
'Operation',
'OperationGroup',
'Recommendation',
'RecommendationContent',
'RecommendationStateInfo',
'ValueMatcher',
'GetInsightRequest',
'GetRecommendationRequest',
'ListInsightsRequest',
'ListInsightsResponse',
'ListRecommendationsRequest',
'ListRecommendationsResponse',
'MarkInsightAcceptedRequest',
'MarkRecommendationClaimedRequest',
'MarkRecommendationFailedRequest',
'MarkRecommendationSucceededRequest',
)
| 49.6875 | 105 | 0.839308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,060 | 0.333333 |
eacf62541cfea44c5aa6f4ef694688addf50cbbc | 232 | py | Python | catsndogs/training.py | simonpf/catsndogs | 36732a7c2c767b2bb6efa87a849598170c8026e8 | [
"MIT"
] | 1 | 2020-12-18T17:19:37.000Z | 2020-12-18T17:19:37.000Z | catsndogs/training.py | simonpf/catsndogs | 36732a7c2c767b2bb6efa87a849598170c8026e8 | [
"MIT"
] | null | null | null | catsndogs/training.py | simonpf/catsndogs | 36732a7c2c767b2bb6efa87a849598170c8026e8 | [
"MIT"
] | null | null | null | import os
import glob
from catsndogs.data import get_training_data
folder = get_training_data()
cats = glob.glob(os.path.join(get_training_data(), "cat", "*.jpg"))
dogs = glob.glob(os.path.join(get_training_data(), "dog", "*.jpg"))
| 33.142857 | 67 | 0.737069 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 24 | 0.103448 |
eacf68a9a529d44fee079f8928813444594dc1f5 | 1,893 | py | Python | models/ir_module.py | linjiyou/Odoo-UpgreadOnly | 7a0ac5ebbdce66f945f129b7227e13c7dd8c8106 | [
"Unlicense"
] | 2 | 2022-03-05T10:37:27.000Z | 2022-03-05T10:37:42.000Z | models/ir_module.py | linjiyou/Odoo-UpgreadOnly | 7a0ac5ebbdce66f945f129b7227e13c7dd8c8106 | [
"Unlicense"
] | 1 | 2022-03-05T10:40:21.000Z | 2022-03-05T10:40:21.000Z | models/ir_module.py | linjiyou/Odoo-UpgreadOnly | 7a0ac5ebbdce66f945f129b7227e13c7dd8c8106 | [
"Unlicense"
] | 1 | 2022-03-05T10:37:21.000Z | 2022-03-05T10:37:21.000Z | # -*- coding: utf-8 -*-
# ========================================
# Author: wjh
# Date:2021/1/19
# FILE: ir_module
# ========================================
from odoo import api, fields, models, _
from odoo.exceptions import UserError
ACTION_DICT = {
'view_type': 'form',
'view_mode': 'form',
'res_model': 'base.module.upgrade',
'target': 'new',
'type': 'ir.actions.act_window',
}
class ModuleModel(models.Model):
_inherit = 'ir.module.module'
@api.multi
def button_immediate_upgrade_only(self):
"""单独模块升级"""
return self._button_immediate_function(type(self).button_upgrade_only)
@api.multi
def button_upgrade_only(self):
self.update_list()
todo = list(self)
i = 0
while i < len(todo):
module = todo[i]
i += 1
if module.state not in ('installed', 'to upgrade'):
raise UserError(_("Can not upgrade module '%s'. It is not installed.") % (module.name,))
self.check_external_dependencies(module.name, 'to upgrade')
# search parent
self.browse(module.id for module in todo).write({'state': 'to upgrade'})
# search children
to_install = []
for module in todo:
for dep in module.dependencies_id:
if dep.state == 'unknown':
raise UserError(_(
'You try to upgrade the module %s that depends on the module: %s.\nBut this module is not available in your system.') % (
module.name, dep.name,))
if dep.state == 'uninstalled':
to_install += self.search([('name', '=', dep.name)]).ids
self.browse(to_install).button_install()
return dict(ACTION_DICT, name=_('Apply Schedule Upgrade')) | 35.055556 | 146 | 0.529847 | 1,476 | 0.773991 | 0 | 0 | 1,393 | 0.730467 | 0 | 0 | 626 | 0.328264 |
ead03442756f356eeb9a2afa9e9ac38d136649ee | 4,978 | py | Python | nullunit/MCP.py | pnhowe/MCP-agent | df6a7db3ae1c59907acde35968ba06eda333c19d | [
"Apache-2.0"
] | null | null | null | nullunit/MCP.py | pnhowe/MCP-agent | df6a7db3ae1c59907acde35968ba06eda333c19d | [
"Apache-2.0"
] | null | null | null | nullunit/MCP.py | pnhowe/MCP-agent | df6a7db3ae1c59907acde35968ba06eda333c19d | [
"Apache-2.0"
] | null | null | null | import logging
from cinp import client
MCP_API_VERSIONS = ( '0.10', '0.11', )
class MCP( object ):
def __init__( self, host, proxy, job_id, instance_id, cookie, stop_event ):
self.cinp = client.CInP( host, '/api/v1/', proxy, retry_event=stop_event )
self.job_id = job_id
self.instance_id = instance_id
self.cookie = cookie
root, _ = self.cinp.describe( '/api/v1/', retry_count=30 ) # very tollerant for the initial describe, let things settle
if root[ 'api-version' ] not in MCP_API_VERSIONS:
raise Exception( 'Expected API version (one of) "{0}" found "{1}"'.format( MCP_API_VERSIONS, root[ 'api-version' ] ) )
def contractorInfo( self ):
logging.info( 'MCP: Get Contractor Info' )
return self.cinp.call( '/api/v1/config(getContractorInfo)', {}, retry_count=10 )
def packratInfo( self ):
logging.info( 'MCP: Get Packrat Info' )
return self.cinp.call( '/api/v1/config(getPackratInfo)', {}, retry_count=10 )
def confluenceInfo( self ):
logging.info( 'MCP: Get Confluence Info' )
return self.cinp.call( '/api/v1/config(getConfluenceInfo)', {}, retry_count=10 )
def signalJobRan( self ):
logging.info( 'MCP: Signal Job Ran' )
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(jobRan)'.format( self.instance_id ), { 'cookie': self.cookie }, retry_count=20 )
def sendMessage( self, message ):
logging.info( 'MCP: Message "{0}"'.format( message ) )
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(setMessage)'.format( self.instance_id ), { 'cookie': self.cookie, 'message': message }, retry_count=20 )
def setSuccess( self, success ):
logging.info( 'MCP: Success "{0}"'.format( success ) )
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(setSuccess)'.format( self.instance_id ), { 'cookie': self.cookie, 'success': success }, retry_count=20 )
def setResults( self, target, results ):
if results is not None:
logging.info( 'MCP: Results "{0}"'.format( results[ -100: ].strip() ) )
else:
logging.info( 'MCP: Results <empty>' )
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(setResults)'.format( self.instance_id ), { 'cookie': self.cookie, 'target': target, 'results': results }, retry_count=20 )
def setScore( self, target, score ):
if score is not None:
logging.info( 'MCP: Score "{0}"'.format( score ) )
else:
logging.info( 'MCP: Score <undefined>' )
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(setScore)'.format( self.instance_id ), { 'cookie': self.cookie, 'target': target, 'score': score }, retry_count=20 )
def uploadedPackages( self, package_file_map ):
if not package_file_map:
return
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(addPackageFiles)'.format( self.instance_id ), { 'cookie': self.cookie, 'package_file_map': package_file_map }, retry_count=20 )
def getInstanceState( self, name=None ):
logging.info( 'MCP: Instance State for "{0}"'.format( name ) )
args = {}
if name is not None:
args[ 'name' ] = name
# json encoding turns the numeric dict keys into strings, this will undo that # TODO: this is fixed in CInP now??
result = {}
state_map = self.cinp.call( '/api/v1/Processor/BuildJob:{0}:(getInstanceState)'.format( self.job_id ), args, retry_count=10 )
if name is None:
for name in state_map:
result[ name ] = {}
for index, state in state_map[ name ].items():
result[ name ][ int( index ) ] = state
else:
for index, state in state_map.items():
result[ int( index ) ] = state
return result
def getInstanceStructureId( self, name=None ):
logging.info( 'MCP: Instance Structure Id(s) for "{0}"'.format( name ) )
args = {}
if name is not None:
args[ 'name' ] = name
# json encoding turns the numeric dict keys into strings, this will undo that
result = {}
detail_map = self.cinp.call( '/api/v1/Processor/BuildJob:{0}:(getInstanceStructureId)'.format( self.job_id ), args, retry_count=10 )
if name is None:
for name in detail_map:
result[ name ] = {}
for index, detail in detail_map[ name ].items():
result[ name ][ int( index ) ] = detail
else:
for index, detail in detail_map.items():
result[ int( index ) ] = detail
return result
def updateValueMap( self, value_map ):
logging.info( 'MCP: Setting Value "{0}"'.format( value_map ) )
self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(updateValueMap)'.format( self.instance_id ), { 'cookie': self.cookie, 'value_map': value_map }, retry_count=20 )
return True
def getValueMap( self, name=None ):
logging.info( 'MCP: Getting Value Map' )
return self.cinp.call( '/api/v1/Processor/BuildJobResourceInstance:{0}:(getValueMap)'.format( self.instance_id ), { 'cookie': self.cookie }, retry_count=10 )
| 41.831933 | 196 | 0.659301 | 4,895 | 0.983327 | 0 | 0 | 0 | 0 | 0 | 0 | 1,558 | 0.312977 |
ead2a27fde0318e6470fc8deff78230ddd7bed04 | 803 | py | Python | fitter.py | quantummind/quantum-rcs-boundaries | 5c1da3378b72db061960f113dfed77b506f9acae | [
"MIT"
] | null | null | null | fitter.py | quantummind/quantum-rcs-boundaries | 5c1da3378b72db061960f113dfed77b506f9acae | [
"MIT"
] | null | null | null | fitter.py | quantummind/quantum-rcs-boundaries | 5c1da3378b72db061960f113dfed77b506f9acae | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
import datetime
def func(x, a, b):
return a + b*x
def exp_regression(x, y):
p, _ = curve_fit(func, x, np.log(y))
p[0] = np.exp(p[0])
return p
def r2(coeffs, x, y):
return r2_score(np.log(y), np.log(out[0]*np.exp(out[1]*x)))
# calculate exponential fit for error rate extrapolation
# report as annual decay (i.e. error rate decreases by fixed factor every year)
errors = pd.read_csv('error_rates.csv')
x = pd.to_datetime(errors.iloc[:, 0]).astype(int)
y = errors.iloc[:, 1]
out = exp_regression(x, y)
print('annual error rate decay', np.exp(out[1]*pd.Timedelta(datetime.timedelta(days=365.2422)).delta))
print('R^2', r2(out, x, y)) | 30.884615 | 102 | 0.697385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.22665 |
ead38cb655e5734b62dd667db5d5a633324f7aa3 | 1,546 | py | Python | modules/misc.py | OpenXAIProject/dac | 652776e21b56dcb68839363bb077d5c5ea28d81e | [
"MIT"
] | 17 | 2020-07-28T18:41:45.000Z | 2021-09-19T15:13:39.000Z | modules/misc.py | OpenXAIProject/dac | 652776e21b56dcb68839363bb077d5c5ea28d81e | [
"MIT"
] | 1 | 2021-11-15T00:42:48.000Z | 2021-11-15T00:42:48.000Z | modules/misc.py | OpenXAIProject/dac | 652776e21b56dcb68839363bb077d5c5ea28d81e | [
"MIT"
] | 2 | 2021-09-27T17:31:23.000Z | 2021-12-31T02:35:25.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
class Flatten(nn.Module):
def __init__(self, dim_start=-3):
super().__init__()
self.dim_start = dim_start
def forward(self, x):
return x.view(x.shape[:self.dim_start] + (-1,))
class View(nn.Module):
def __init__(self, *shape):
super().__init__()
self.shape = shape
def forward(self, x):
return x.view(self.shape)
class FixupResUnit(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.bias1a = nn.Parameter(torch.zeros(1))
self.conv1 = nn.Conv2d(in_channels, out_channels, 3,
padding=1, stride=stride, bias=False)
self.bias1b = nn.Parameter(torch.zeros(1))
self.bias2a = nn.Parameter(torch.zeros(1))
self.conv2 = nn.Conv2d(out_channels, out_channels, 3,
padding=1, bias=False)
self.scale = nn.Parameter(torch.ones(1))
self.bias2b = nn.Parameter(torch.zeros(1))
if in_channels != out_channels or stride != 1:
self.shortcut = nn.Conv2d(in_channels, out_channels, 1,
stride=stride, bias=False)
else:
self.shortcut = nn.Identity()
def forward(self, x):
out = F.relu(x)
out = self.conv1(out + self.bias1a)
out = out + self.bias1b
out = F.relu(out)
out = self.conv2(out + self.bias2a)
out = out * self.scale + self.bias2b
return self.shortcut(x) + out
| 30.92 | 67 | 0.596378 | 1,473 | 0.952781 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ead73abff93c9ef507eee67a64d1ff0edc6aedeb | 5,738 | py | Python | env/Lib/site-packages/IPython/lib/tests/test_latextools.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 2 | 2022-02-26T11:19:40.000Z | 2022-03-28T08:23:25.000Z | env/Lib/site-packages/IPython/lib/tests/test_latextools.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | null | null | null | env/Lib/site-packages/IPython/lib/tests/test_latextools.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 1 | 2022-03-28T09:19:34.000Z | 2022-03-28T09:19:34.000Z | """Tests for IPython.utils.path.py"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from contextlib import contextmanager
from unittest.mock import patch
import pytest
from IPython.lib import latextools
from IPython.testing.decorators import (
onlyif_cmds_exist,
skipif_not_matplotlib,
)
from IPython.utils.process import FindCmdError
@pytest.mark.parametrize('command', ['latex', 'dvipng'])
def test_check_latex_to_png_dvipng_fails_when_no_cmd(command):
def mock_find_cmd(arg):
if arg == command:
raise FindCmdError
with patch.object(latextools, "find_cmd", mock_find_cmd):
assert latextools.latex_to_png_dvipng("whatever", True) is None
@contextmanager
def no_op(*args, **kwargs):
yield
@onlyif_cmds_exist("latex", "dvipng")
@pytest.mark.parametrize("s, wrap", [(u"$$x^2$$", False), (u"x^2", True)])
def test_latex_to_png_dvipng_runs(s, wrap):
"""
Test that latex_to_png_dvipng just runs without error.
"""
def mock_kpsewhich(filename):
assert filename == "breqn.sty"
return None
latextools.latex_to_png_dvipng(s, wrap)
with patch_latextool(mock_kpsewhich):
latextools.latex_to_png_dvipng(s, wrap)
def mock_kpsewhich(filename):
assert filename == "breqn.sty"
return None
@contextmanager
def patch_latextool(mock=mock_kpsewhich):
with patch.object(latextools, "kpsewhich", mock):
yield
@pytest.mark.parametrize('context', [no_op, patch_latextool])
@pytest.mark.parametrize('s_wrap', [("$x^2$", False), ("x^2", True)])
def test_latex_to_png_mpl_runs(s_wrap, context):
"""
Test that latex_to_png_mpl just runs without error.
"""
try:
import matplotlib
except ImportError:
pytest.skip("This needs matplotlib to be available")
return
s, wrap = s_wrap
with context():
latextools.latex_to_png_mpl(s, wrap)
@skipif_not_matplotlib
def test_latex_to_html():
img = latextools.latex_to_html("$x^2$")
assert "data:image/png;base64,iVBOR" in img
def test_genelatex_no_wrap():
"""
Test genelatex with wrap=False.
"""
def mock_kpsewhich(filename):
assert False, ("kpsewhich should not be called "
"(called with {0})".format(filename))
with patch_latextool(mock_kpsewhich):
assert '\n'.join(latextools.genelatex("body text", False)) == r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
body text
\end{document}'''
def test_genelatex_wrap_with_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is installed.
"""
def mock_kpsewhich(filename):
assert filename == "breqn.sty"
return "path/to/breqn.sty"
with patch_latextool(mock_kpsewhich):
assert '\n'.join(latextools.genelatex("x^2", True)) == r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\usepackage{breqn}
\pagestyle{empty}
\begin{document}
\begin{dmath*}
x^2
\end{dmath*}
\end{document}'''
def test_genelatex_wrap_without_breqn():
"""
Test genelatex with wrap=True for the case breqn.sty is not installed.
"""
def mock_kpsewhich(filename):
assert filename == "breqn.sty"
return None
with patch_latextool(mock_kpsewhich):
assert '\n'.join(latextools.genelatex("x^2", True)) == r'''\documentclass{article}
\usepackage{amsmath}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{bm}
\pagestyle{empty}
\begin{document}
$$x^2$$
\end{document}'''
@skipif_not_matplotlib
@onlyif_cmds_exist('latex', 'dvipng')
def test_latex_to_png_color():
"""
Test color settings for latex_to_png.
"""
latex_string = "$x^2$"
default_value = latextools.latex_to_png(latex_string, wrap=False)
default_hexblack = latextools.latex_to_png(latex_string, wrap=False,
color='#000000')
dvipng_default = latextools.latex_to_png_dvipng(latex_string, False)
dvipng_black = latextools.latex_to_png_dvipng(latex_string, False, 'Black')
assert dvipng_default == dvipng_black
mpl_default = latextools.latex_to_png_mpl(latex_string, False)
mpl_black = latextools.latex_to_png_mpl(latex_string, False, 'Black')
assert mpl_default == mpl_black
assert default_value in [dvipng_black, mpl_black]
assert default_hexblack in [dvipng_black, mpl_black]
# Test that dvips name colors can be used without error
dvipng_maroon = latextools.latex_to_png_dvipng(latex_string, False,
'Maroon')
# And that it doesn't return the black one
assert dvipng_black != dvipng_maroon
mpl_maroon = latextools.latex_to_png_mpl(latex_string, False, 'Maroon')
assert mpl_black != mpl_maroon
mpl_white = latextools.latex_to_png_mpl(latex_string, False, 'White')
mpl_hexwhite = latextools.latex_to_png_mpl(latex_string, False, '#FFFFFF')
assert mpl_white == mpl_hexwhite
mpl_white_scale = latextools.latex_to_png_mpl(latex_string, False,
'White', 1.2)
assert mpl_white != mpl_white_scale
def test_latex_to_png_invalid_hex_colors():
"""
Test that invalid hex colors provided to dvipng gives an exception.
"""
latex_string = "$x^2$"
pytest.raises(
ValueError,
lambda: latextools.latex_to_png(
latex_string, backend="dvipng", color="#f00bar"
),
)
pytest.raises(
ValueError,
lambda: latextools.latex_to_png(latex_string, backend="dvipng", color="#f00"),
)
| 29.73057 | 97 | 0.686999 | 0 | 0 | 146 | 0.025444 | 3,217 | 0.560648 | 0 | 0 | 1,733 | 0.302022 |
ead810e7aa0a5da8afdac88e5f50187918893a93 | 736 | py | Python | crits/ips/urls.py | dutrow/crits | 6b357daa5c3060cf622d3a3b0c7b41a9ca69c049 | [
"MIT"
] | 738 | 2015-01-02T12:39:55.000Z | 2022-03-23T11:05:51.000Z | crits/ips/urls.py | dutrow/crits | 6b357daa5c3060cf622d3a3b0c7b41a9ca69c049 | [
"MIT"
] | 605 | 2015-01-01T01:03:39.000Z | 2021-11-17T18:51:07.000Z | crits/ips/urls.py | dutrow/crits | 6b357daa5c3060cf622d3a3b0c7b41a9ca69c049 | [
"MIT"
] | 316 | 2015-01-07T12:35:01.000Z | 2022-03-30T04:44:30.000Z | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^search/$', views.ip_search, name='crits-ips-views-ip_search'),
url(r'^search/(?P<ip_str>\S+)/$', views.ip_search, name='crits-ips-views-ip_search'),
url(r'^details/(?P<ip>\S+)/$', views.ip_detail, name='crits-ips-views-ip_detail'),
url(r'^remove/$', views.remove_ip, name='crits-ips-views-remove_ip'),
url(r'^list/$', views.ips_listing, name='crits-ips-views-ips_listing'),
url(r'^list/(?P<option>\S+)/$', views.ips_listing, name='crits-ips-views-ips_listing'),
url(r'^bulkadd/$', views.bulk_add_ip, name='crits-ips-views-bulk_add_ip'),
url(r'^(?P<method>\S+)/$', views.add_update_ip, name='crits-ips-views-add_update_ip'),
]
| 49.066667 | 91 | 0.669837 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 373 | 0.506793 |
ead9a8850641b407ccfaaf3b32efd4d1d2d9ec12 | 2,869 | py | Python | notes/reference/moocs/udacity/cs50-introduction-to-computer-science/assignments/pset1/pennies/pennies.py | aav789/study-notes | 34eca00cd48869ba7a79c0ea7d8948ee9bde72b9 | [
"MIT"
] | 43 | 2015-06-10T14:48:00.000Z | 2020-11-29T16:22:28.000Z | notes/reference/moocs/udacity/cs50-introduction-to-computer-science/assignments/pset1/pennies/pennies.py | aav789/study-notes | 34eca00cd48869ba7a79c0ea7d8948ee9bde72b9 | [
"MIT"
] | 1 | 2021-11-01T12:01:44.000Z | 2021-11-01T12:01:44.000Z | notes/reference/moocs/udacity/cs50-introduction-to-computer-science/assignments/pset1/pennies/pennies.py | lextoumbourou/notes | 5f94c59a467eb3eb387542bdce398abc0365e6a7 | [
"MIT"
] | 40 | 2015-03-02T10:33:59.000Z | 2020-05-24T12:17:05.000Z | """
pennies.py
Computer Science 50 in Python (Hacker Edition)
Problem Set 1
Get num of days in month then works out how many $$ you'd have by end of the month
if you received a penny on the first day, two on second, four on third and so on
"""
def get_input(question):
"""Get the days input from the user """
return raw_input(question + " ")
def is_valid_days(days):
"""Check if the number of days is between 28 and 31"""
try:
days = int(days)
except ValueError:
print "Not a valid integer"
return False
if days < 28 or days > 31:
print "Not a valid number of days"
return False
return days
def is_valid_cents(cents):
"""Ensure number of cents is a valid int"""
try:
return int(cents)
except ValueError:
print "That's not a number."
return False
class Exponenter():
days = None
cents = None
dols = None
def __init__(self, cents, days):
"""
Takes an integer of cents and days
then performs exponent analysis
"""
self.cents = cents
self.days = days
self.final_cents = self._exponent()
def __unicode__(self):
"""Return the string output in $00,000,000 format"""
output = ""
# Reverse the string using extended slice syntax
rev_dols = "{0:.2f}".format(self.dols)[::-1]
# If the dollars is more than 1000, add a comma every 3 digits
if self.dols >= 1000:
for count, char in enumerate(rev_dols):
count += 1
output += char
if count <= 3:
# Ignore first 3 characters, as they're decimal points
continue
if count % 3 is 0 and len(rev_dols) is not count:
# For every 3 characters, that's not the last one, add a ,
output += ","
else:
output = rev_dols
# Reverse the output and return it
str_dols = output[::-1]
return "${0}".format(str_dols)
def __repr__(self):
"""Return the representation as float of dollars"""
return "{0}".format(self.dols)
def _exponent(self):
# For each day, multiply the cents by an incrementer that doubles each time
inc = 1
for day in range(1, self.days+1):
final_cents = self.cents*inc
inc += inc
# Return the number of dollars (cents/100)
self.dols = final_cents/float(100)
return final_cents
if __name__ == '__main__':
days = False
while not days:
days = is_valid_days(get_input("Number of days in month? "))
cents = False
while not cents:
cents = is_valid_cents(get_input("Numbers of cents on first day? "))
exp = Exponenter(cents, days)
print "{0}".format(unicode(exp))
| 27.586538 | 83 | 0.580342 | 1,687 | 0.58801 | 0 | 0 | 0 | 0 | 0 | 0 | 1,129 | 0.393517 |
ead9fd7d58e52453c9ee8498ed88b548dd31636b | 266 | py | Python | bcc-apps/python/hello_world.py | Huweicai/ebpf-apps | 09847c44e9a823a331df0f30e2b5cf570a3aa29b | [
"Apache-2.0"
] | 59 | 2022-01-17T11:59:14.000Z | 2022-03-20T13:22:09.000Z | bcc-apps/python/hello_world.py | Huweicai/ebpf-apps | 09847c44e9a823a331df0f30e2b5cf570a3aa29b | [
"Apache-2.0"
] | 1 | 2022-02-20T09:18:25.000Z | 2022-02-20T09:18:25.000Z | bcc-apps/python/hello_world.py | Huweicai/ebpf-apps | 09847c44e9a823a331df0f30e2b5cf570a3aa29b | [
"Apache-2.0"
] | 20 | 2022-01-19T01:47:29.000Z | 2022-03-21T06:29:59.000Z | #!/usr/bin/python3
#
# This is a Hello World example of BPF.
from bcc import BPF
# define BPF program
prog = """
int kprobe__sys_clone(void *ctx)
{
bpf_trace_printk("Hello, World!\\n");
return 0;
}
"""
# load BPF program
b = BPF(text=prog)
b.trace_print()
| 14.777778 | 41 | 0.665414 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.736842 |
eadcff9016e474eb3c8f61f0826e29c9a4160b3b | 159 | py | Python | accounts/admin.py | kamel2700/Build-a-team-for-Startup | 3955eb5a990e27f100981b7186f3593f7b821128 | [
"MIT"
] | null | null | null | accounts/admin.py | kamel2700/Build-a-team-for-Startup | 3955eb5a990e27f100981b7186f3593f7b821128 | [
"MIT"
] | null | null | null | accounts/admin.py | kamel2700/Build-a-team-for-Startup | 3955eb5a990e27f100981b7186f3593f7b821128 | [
"MIT"
] | null | null | null | from django.contrib import admin
from accounts.models import *
admin.site.register(UserProfile)
admin.site.register(ProjectPage)
admin.site.register(Comment)
| 22.714286 | 32 | 0.830189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
eadd7e5f3ab2c3c49e8117ccef42a626f10050df | 697 | py | Python | RandomScripts/FibonacciGenerator.py | AlexGatz/PythonFor100Days | 84f7b8687b6e9ef6c5f4c92f525ea6d72cc364f8 | [
"MIT"
] | 2 | 2018-06-19T20:22:07.000Z | 2018-06-28T23:08:46.000Z | RandomScripts/FibonacciGenerator.py | AlexGatz/PythonFor100Days | 84f7b8687b6e9ef6c5f4c92f525ea6d72cc364f8 | [
"MIT"
] | null | null | null | RandomScripts/FibonacciGenerator.py | AlexGatz/PythonFor100Days | 84f7b8687b6e9ef6c5f4c92f525ea6d72cc364f8 | [
"MIT"
] | null | null | null | """
Created By: Alex J. Gatz
Date: 06/07/2018
This is some code playing with the usage of a python
"Generator" which is really very cool. Another use case
I want to play with is properly ordering installation of
packages to ensure that if there are dependencies that they are installed in the proper order.
Created a recursive fibonacci function.
"""
# Fibonacci generator
def fibonacci(n):
a, b, counter = 0, 1, 0
while True:
if (counter > n):
return
yield a
a, b = b, a + b
counter += 1
# How many values donyou want?
f = fibonacci(30)
# Iterate multiple calls of the fibonacci generator
for x in f:
print(x, " ", end="")
| 24.034483 | 95 | 0.654232 | 0 | 0 | 174 | 0.249641 | 0 | 0 | 0 | 0 | 461 | 0.661406 |
eaddd0f58ee1e0957f0264ad5e92e223afa6c673 | 2,753 | py | Python | minotaur/_mask.py | trel/minotaur | 608f13582e88677fc946c6cb84ec03459f29b4f9 | [
"Apache-2.0"
] | null | null | null | minotaur/_mask.py | trel/minotaur | 608f13582e88677fc946c6cb84ec03459f29b4f9 | [
"Apache-2.0"
] | null | null | null | minotaur/_mask.py | trel/minotaur | 608f13582e88677fc946c6cb84ec03459f29b4f9 | [
"Apache-2.0"
] | null | null | null | from enum import IntFlag as _IntFlag
from . import _inotify
__all__ = ('Mask',)
class Mask(_IntFlag):
show_help: bool
def __new__(cls, value, doc=None, show_help=True):
# int.__new__ needs a stub in the typeshed
# https://github.com/python/typeshed/issues/2686
#
# but that broke something else, so they removed it
# https://github.com/python/typeshed/issues/1464
#
# We have no choice but to ignore mypy error here :(
self = int.__new__(cls, value) # type: ignore
self._value_ = value
if doc is not None:
self.__doc__ = doc
self.show_help = show_help
return self
"""
Flags for establishing inotify watches.
"""
ACCESS = _inotify.IN_ACCESS, 'File was accessed'
ATTRIB = _inotify.IN_ATTRIB, 'Metaata changed, eg. permissions'
CLOSE_WRITE = _inotify.IN_CLOSE_WRITE, 'File for writing was closed'
CLOSE_NOWRITE = _inotify.IN_CLOSE_NOWRITE, \
'File or dir not opened for writing was closed'
CREATE = _inotify.IN_CREATE, 'File/dir was created'
DELETE = _inotify.IN_DELETE, 'File or dir was deleted'
DELETE_SELF = _inotify.IN_DELETE_SELF, \
'Watched file/dir was itself deleted'
MODIFY = _inotify.IN_MODIFY, 'File was modified'
MOVE_SELF = _inotify.IN_MOVE_SELF, 'Watched file/dir was itself moved'
MOVED_FROM = _inotify.IN_MOVED_FROM, \
'Generated for dir containing old filename when a file is renamed'
MOVED_TO = _inotify.IN_MOVED_TO, \
'Generated for dir containing new filename when a file is renamed'
OPEN = _inotify.IN_OPEN, 'File or dir was opened'
MOVE = _inotify.IN_MOVE, 'MOVED_FROM | MOVED_TO'
CLOSE = _inotify.IN_CLOSE, 'IN_CLOSE_WRITE | IN_CLOSE_NOWRITE'
DONT_FOLLOW = _inotify.IN_DONT_FOLLOW, \
"Don't dereference pathname if it is a symbolic link"
EXCL_UNLINK = _inotify.IN_EXCL_UNLINK, \
"Don't generate events after files have been unlinked"
MASK_ADD = _inotify.IN_MASK_ADD, 'Add flags to an existing watch', False
ONESHOT = _inotify.IN_ONESHOT, 'Only generate one event for this watch'
ONLYDIR = _inotify.IN_ONLYDIR, 'Watch pathname only if it is a dir'
MASK_CREATE = _inotify.IN_MASK_CREATE, \
"Only watch path if it isn't already being watched"
# These are returned in events
IGNORED = _inotify.IN_IGNORED, 'Watch was removed', False
ISDIR = _inotify.IN_ISDIR, 'This event is a dir', False
Q_OVERFLOW = _inotify.IN_Q_OVERFLOW, 'Event queue overflowed', False
UNMOUNT = _inotify.IN_UNMOUNT, \
'Filesystem containing watched object was unmounted', False
EVENT_TYPE = _inotify.EVENT_TYPE_MASK, 'Mask of all event types', False
| 39.328571 | 76 | 0.68834 | 2,667 | 0.968761 | 0 | 0 | 0 | 0 | 0 | 0 | 1,236 | 0.448965 |
eadee64286add827e7acf16d36f21a8972ee0cfb | 4,991 | py | Python | 24_immune_system_simulator.py | KanegaeGabriel/advent-of-code-2018 | b57f21901b731b4ffe6a2bf134d0bda28d326997 | [
"MIT"
] | null | null | null | 24_immune_system_simulator.py | KanegaeGabriel/advent-of-code-2018 | b57f21901b731b4ffe6a2bf134d0bda28d326997 | [
"MIT"
] | null | null | null | 24_immune_system_simulator.py | KanegaeGabriel/advent-of-code-2018 | b57f21901b731b4ffe6a2bf134d0bda28d326997 | [
"MIT"
] | null | null | null | ################################################
# --- Day 24: Immune System Simulator 20XX --- #
################################################
import AOCUtils
def getTargets(atkArmy, defArmy):
targeted = set()
for atkGroup in atkArmy:
if len(targeted) < len(defArmy):
dmgGiven = []
for defGroup in defArmy:
dmg = (defGroup.calcDmgTaken(atkGroup), defGroup.getEffectivePower(), defGroup.initiative, defGroup)
dmgGiven.append(dmg)
dmgGiven.sort(reverse=True)
# Find best target that hasn't been targeted yet
take = 0
while dmgGiven[take][-1] in targeted:
take += 1
# Only select targets that would deal damage to
if dmgGiven[take][0] > 0:
targeted.add(dmgGiven[take][-1])
atkGroup.target = dmgGiven[take][-1]
else:
atkGroup.target = None
def battle(rawImmune, rawInfection, boost=0):
immuneArmy = [Group(rawGroup) for rawGroup in rawImmune]
infectionArmy = [Group(rawGroup) for rawGroup in rawInfection]
for g in immuneArmy: g.dmgAmt += boost
immuneArmyUnits = sum(g.units for g in immuneArmy)
infectionArmyUnits = sum(g.units for g in infectionArmy)
# Main battle round
while immuneArmyUnits > 0 and infectionArmyUnits > 0:
# Remove dead groups
effAndInit = lambda x: (x.getEffectivePower(), x.initiative)
immuneArmy = sorted(g for g in immuneArmy if g.alive, key=effAndInit, reverse=True)
infectionArmy = sorted(g for g in infectionArmy if g.alive, key=effAndInit, reverse=True)
getTargets(immuneArmy, infectionArmy)
getTargets(infectionArmy, immuneArmy)
kills = 0
allArmies = sorted(immuneArmy+infectionArmy, key=lambda x: x.initiative, reverse=True)
for army in allArmies:
if army.alive: # Only alive groups can attack, will be removed in the next round
kills += army.attack()
if kills == 0: # No kills in round = tie, would result in endless rounds
return None, None
immuneArmyUnits = sum(g.units for g in immuneArmy)
infectionArmyUnits = sum(g.units for g in infectionArmy)
return immuneArmyUnits, infectionArmyUnits
class Group:
def __init__(self, raw):
rawSplit = raw.split()
self.units = int(rawSplit[0])
self.hp = int(rawSplit[4])
self.immunities = []
self.weaknesses = []
if rawSplit[7].startswith("("):
weaksAndImmunes = raw.split("(")[1].split(")")[0].split("; ")
for wai in weaksAndImmunes:
if wai.startswith("weak"): self.weaknesses = wai[8:].split(", ")
elif wai.startswith("immune"): self.immunities = wai[10:].split(", ")
self.dmgAmt = int(rawSplit[-6])
self.dmgType = rawSplit[-5]
self.initiative = int(rawSplit[-1])
self.alive = True
self.target = None
def calcDmgTaken(self, attacker):
dmgAmtMult = 1
if attacker.dmgType in self.immunities: dmgAmtMult = 0
if attacker.dmgType in self.weaknesses: dmgAmtMult = 2
return attacker.getEffectivePower() * dmgAmtMult
def receiveAttack(self, attacker):
dmgAmt = self.calcDmgTaken(attacker)
unitsLost = dmgAmt // self.hp
if unitsLost > self.units: unitsLost = self.units
self.units -= unitsLost
if self.units <= 0:
self.alive = False
return unitsLost
def attack(self):
unitsLost = 0
if self.target:
unitsLost = self.target.receiveAttack(self)
self.target = None
return unitsLost
def getEffectivePower(self):
return self.units * self.dmgAmt
# def __repr__(self):
# return "U:{}, HP:{}, IMM:{}, WKN:{}, DMG:{}({}), EP:{}, INI:{}".format(
# self.units, self.hp, self.immunities, self.weaknesses,
# self.dmgAmt, self.dmgType, self.getEffectivePower(), self.initiative)
################################################
rawInput = [s for s in AOCUtils.loadInput(24) if s]
immuneStart, infectionStart = 0, rawInput.index("Infection:")
rawImmune = rawInput[immuneStart+1:infectionStart]
rawInfection = rawInput[infectionStart+1:]
immuneArmyUnits, infectionArmyUnits = battle(rawImmune, rawInfection)
print("Part 1: {}".format(max(immuneArmyUnits, infectionArmyUnits)))
boostLo, boostHi = 0, 1000 # Binary Search
while boostLo != boostHi:
boost = (boostLo + boostHi) // 2
immuneArmyUnits, infectionArmyUnits = battle(rawImmune, rawInfection, boost)
if immuneArmyUnits is None or immuneArmyUnits == 0: # Tie or loss
boostLo = boost + 1
else:
boostHi = boost
immuneArmyUnits, infectionArmyUnits = battle(rawImmune, rawInfection, boost)
print("Part 2: {}".format(immuneArmyUnits))
AOCUtils.printTimeTaken() | 34.659722 | 116 | 0.60569 | 1,789 | 0.358445 | 0 | 0 | 0 | 0 | 0 | 0 | 796 | 0.159487 |
eadf7939143131af2b0df7b217b4c1285abd158c | 2,552 | py | Python | tasks.py | Egor4ik325/experiment-bot | 5a10b1c4110707b8d6fac4b90d9f594f005566bf | [
"MIT"
] | null | null | null | tasks.py | Egor4ik325/experiment-bot | 5a10b1c4110707b8d6fac4b90d9f594f005566bf | [
"MIT"
] | null | null | null | tasks.py | Egor4ik325/experiment-bot | 5a10b1c4110707b8d6fac4b90d9f594f005566bf | [
"MIT"
] | null | null | null | from io import BytesIO
import requests
from celery import Celery
from api import send_message, send_photo
from imdb2_api import get_movie_by_imdb_id
from imdb_api import IMDBAPIClient
# celery -A tasks worker --log-level INFO
app = Celery(
"tasks", backend="redis://localhost:6379/0", broker="redis://localhost:6379/0"
)
@app.task
def hello():
return "Hello"
@app.task
def reply(token: str, chat_id: int, text: str):
return send_message(token, chat_id, text)
@app.task
def search_movie(token: str, chat_id: int, rapidapi_key: str, movie_title: str):
c = IMDBAPIClient(rapidapi_key)
results = c.search_movies_by_title(movie_title)
result_message = "Movies found for search:\n"
result_message += "".join(
[f"- {result.title} ({result.year}) [{result.imdb_id}]\n" for result in results]
)
send_message(token, chat_id, result_message)
DETAILS_MESSAGE = """
{title}
{description}
- "{tagline}"
- Year: {year}
- Rating: {rating} ({vote_count})
"""
def show_movie(token: str, chat_id: int, rapidapi_key: str, imdb_id: str):
c = IMDBAPIClient(rapidapi_key)
details = c.get_movie_details(imdb_id)
image = c.get_movie_images(imdb_id)
i = image.poster_image
i.save("poster.jpg", "JPEG")
# Send photo
send_photo(token, chat_id, open("poster.jpg", "rb"))
# Send details
send_message(
token,
chat_id,
DETAILS_MESSAGE.format(
title=details.title,
description=details.description,
tagline=details.tagline,
year=details.year,
rating=details.imdb_rating,
vote_count=details.vote_count,
),
)
def show_movie2(token: str, chat_id: int, imdb_api_key: str, imdb_id: str):
# c = IMDBAPIClient(rapidapi_key)
# details = c.get_movie_details(imdb_id)
# image = c.get_movie_images(imdb_id)
movie = get_movie_by_imdb_id(imdb_api_key, imdb_id)
details = movie["results"]
banner = details["banner"]
# i = image.poster_image
# i.save("poster.jpg", "JPEG")
banner_response = requests.get(banner)
banner_response.raise_for_status()
# Send photo
send_photo(token, chat_id, banner_response.content)
# Send details
send_message(
token,
chat_id,
DETAILS_MESSAGE.format(
title=details["title"],
description=details["description"],
tagline="No",
year=details["year"],
rating=details["rating"],
vote_count=100,
),
)
| 24.304762 | 88 | 0.648119 | 0 | 0 | 0 | 0 | 549 | 0.215125 | 0 | 0 | 590 | 0.231191 |
eae24d8c828bb6bb378412becf7ee1a02837535c | 1,279 | py | Python | Imap_append.py | satheesheppalapelli/imap | 064ce69f9fdd63e3f7fbf402ef383c38b31fea14 | [
"MIT"
] | null | null | null | Imap_append.py | satheesheppalapelli/imap | 064ce69f9fdd63e3f7fbf402ef383c38b31fea14 | [
"MIT"
] | null | null | null | Imap_append.py | satheesheppalapelli/imap | 064ce69f9fdd63e3f7fbf402ef383c38b31fea14 | [
"MIT"
] | null | null | null | import imaplib
import email
from email import message
import time
username = 'gmail_id'
password = 'gmail_password'
new_message = email.message.Message()
new_message.set_unixfrom('satheesh')
new_message['Subject'] = 'Sample Message'
# from gmail id
new_message['From'] = 'eppalapellisatheesh1@gmail.com'
# to gmail id
new_message['To'] = 'eppalapellisatheesh1@gmail.com'
# message data
new_message.set_payload('This is the body of the message.\n')
# print(new_message)
# you want to connect to a server; specify which server and port
# server = imaplib.IMAP4('server', 'port')
server = imaplib.IMAP4_SSL('imap.googlemail.com')
# after connecting, tell the server who you are to login to gmail
# server.login('user', 'password')
server.login(username, password)
# this will show you a list of available folders
# possibly your Inbox is called INBOX, but check the list of mailboxes
response, mailboxes = server.list()
if response == 'OK':
response, data = server.select("Inbox")
response = server.append('INBOX', '', imaplib.Time2Internaldate(time.time()), str(new_message).encode('utf-8'))
# print(response)
if response[0] == 'OK':
print("Gmail Appended Successfully")
else:
print("Not Appended")
server.close()
server.logout()
| 32.794872 | 115 | 0.723221 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 669 | 0.523065 |
eae3be40aa24ad4b9655afb4fbddb913e8d4cb32 | 361 | py | Python | autoflow/workflow/components/classification/lda.py | auto-flow/autoflow | f5903424ad8694d57741a0bd6dfeaba320ea6517 | [
"BSD-3-Clause"
] | 49 | 2020-04-16T11:17:28.000Z | 2020-05-06T01:32:44.000Z | autoflow/workflow/components/classification/lda.py | auto-flow/auto-flow | f5903424ad8694d57741a0bd6dfeaba320ea6517 | [
"BSD-3-Clause"
] | null | null | null | autoflow/workflow/components/classification/lda.py | auto-flow/auto-flow | f5903424ad8694d57741a0bd6dfeaba320ea6517 | [
"BSD-3-Clause"
] | 3 | 2021-04-10T13:58:39.000Z | 2022-03-25T08:27:53.000Z | from copy import deepcopy
from typing import Dict
from autoflow.workflow.components.classification_base import AutoFlowClassificationAlgorithm
__all__=["LinearDiscriminantAnalysis"]
class LinearDiscriminantAnalysis(AutoFlowClassificationAlgorithm):
class__ = "LinearDiscriminantAnalysis"
module__ = "sklearn.discriminant_analysis"
OVR__ = True
| 25.785714 | 92 | 0.833795 | 173 | 0.479224 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.240997 |
eae5610490f4bff538d28053548c7a7377626c1f | 4,362 | py | Python | wradlib/georef/misc.py | ElmerJeanpierreLopez/wradlib | ae6aa24c68f431b735a742510cea3475fb55059d | [
"MIT"
] | null | null | null | wradlib/georef/misc.py | ElmerJeanpierreLopez/wradlib | ae6aa24c68f431b735a742510cea3475fb55059d | [
"MIT"
] | null | null | null | wradlib/georef/misc.py | ElmerJeanpierreLopez/wradlib | ae6aa24c68f431b735a742510cea3475fb55059d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# Copyright (c) 2011-2019, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
"""
Miscellaneous
^^^^^^^^^^^^^
.. autosummary::
:nosignatures:
:toctree: generated/
bin_altitude
bin_distance
site_distance
"""
import numpy as np
def bin_altitude(r, theta, sitealt, re, ke=4./3.):
"""Calculates the height of a radar bin taking the refractivity of the \
atmosphere into account.
Based on :cite:`Doviak1993` the bin altitude is calculated as
.. math::
h = \\sqrt{r^2 + (k_e r_e)^2 + 2 r k_e r_e \\sin\\theta} - k_e r_e
Parameters
----------
r : :class:`numpy:numpy.ndarray`
Array of ranges [m]
theta : scalar or :class:`numpy:numpy.ndarray` broadcastable to the shape
of r elevation angles in degrees with 0° at horizontal and +90°
pointing vertically upwards from the radar
sitealt : float
Altitude in [m] a.s.l. of the referencing radar site
re : float
earth's radius [m]
ke : float
adjustment factor to account for the refractivity gradient that
affects radar beam propagation. In principle this is wavelength-
dependent. The default of 4/3 is a good approximation for most
weather radar wavelengths
Returns
-------
altitude : :class:`numpy:numpy.ndarray`
Array of heights of the radar bins in [m]
"""
reff = ke * re
sr = reff + sitealt
return np.sqrt(r ** 2 + sr ** 2 +
2 * r * sr * np.sin(np.radians(theta))) - reff
def bin_distance(r, theta, sitealt, re, ke=4./3.):
"""Calculates great circle distance from radar site to radar bin over \
spherical earth, taking the refractivity of the atmosphere into account.
.. math::
s = k_e r_e \\arctan\\left(
\\frac{r \\cos\\theta}{r \\cos\\theta + k_e r_e + h}\\right)
where :math:`h` would be the radar site altitude amsl.
Parameters
----------
r : :class:`numpy:numpy.ndarray`
Array of ranges [m]
theta : scalar or :class:`numpy:numpy.ndarray` broadcastable to the shape
of r elevation angles in degrees with 0° at horizontal and +90°
pointing vertically upwards from the radar
sitealt : float
site altitude [m] amsl.
re : float
earth's radius [m]
ke : float
adjustment factor to account for the refractivity gradient that
affects radar beam propagation. In principle this is wavelength-
dependent. The default of 4/3 is a good approximation for most
weather radar wavelengths
Returns
-------
distance : :class:`numpy:numpy.ndarray`
Array of great circle arc distances [m]
"""
reff = ke * re
sr = reff + sitealt
theta = np.radians(theta)
return reff * np.arctan(r * np.cos(theta) / (r * np.sin(theta) + sr))
def site_distance(r, theta, binalt, re=None, ke=4./3.):
"""Calculates great circle distance from bin at certain altitude to the \
radar site over spherical earth, taking the refractivity of the \
atmosphere into account.
Based on :cite:`Doviak1993` the site distance may be calculated as
.. math::
s = k_e r_e \\arcsin\\left(
\\frac{r \\cos\\theta}{k_e r_e + h_n(r, \\theta, r_e, k_e)}\\right)
where :math:`h_n` would be provided by
:func:`~wradlib.georef.misc.bin_altitude`.
Parameters
----------
r : :class:`numpy:numpy.ndarray`
Array of ranges [m]
theta : scalar or :class:`numpy:numpy.ndarray` broadcastable to the shape
of r elevation angles in degrees with 0° at horizontal and +90°
pointing vertically upwards from the radar
binalt : :class:`numpy:numpy.ndarray`
site altitude [m] amsl. same shape as r.
re : float
earth's radius [m]
ke : float
adjustment factor to account for the refractivity gradient that
affects radar beam propagation. In principle this is wavelength-
dependent. The default of 4/3 is a good approximation for most
weather radar wavelengths
Returns
-------
distance : :class:`numpy:numpy.ndarray`
Array of great circle arc distances [m]
"""
reff = ke * re
return reff * np.arcsin(r * np.cos(np.radians(theta)) / (reff + binalt))
| 31.381295 | 77 | 0.631133 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,774 | 0.864011 |
eae5f44f40b19ebe3bff78ee768e402bba9911ee | 1,860 | py | Python | notebooks/models/dnn.py | AFAgarap/dnn-trust | 4e7ba13a30e61a12403181b60274289c1f340fc7 | [
"Apache-2.0"
] | 4 | 2019-12-31T06:11:41.000Z | 2022-02-15T18:54:14.000Z | notebooks/models/dnn.py | AFAgarap/dnn-trust | 4e7ba13a30e61a12403181b60274289c1f340fc7 | [
"Apache-2.0"
] | 2 | 2022-02-09T23:32:48.000Z | 2022-02-10T01:21:09.000Z | notebooks/models/dnn.py | AFAgarap/dnn-trust | 4e7ba13a30e61a12403181b60274289c1f340fc7 | [
"Apache-2.0"
] | 2 | 2019-09-30T08:46:33.000Z | 2020-03-23T13:59:42.000Z | # Copyright 2019-2020 Abien Fred Agarap
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of a feed-forward neural network model"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__author__ = "Abien Fred Agarap"
__version__ = "1.0.1"
import tensorflow as tf
class NeuralNet(tf.keras.Model):
def __init__(self, **kwargs):
super(NeuralNet, self).__init__()
self.hidden_layer_1 = tf.keras.layers.Dense(
units=kwargs["units"][0],
activation=tf.nn.relu,
input_shape=kwargs["input_shape"],
)
self.dropout_layer_1 = tf.keras.layers.Dropout(rate=kwargs["dropout_rate"])
self.hidden_layer_2 = tf.keras.layers.Dense(
units=kwargs["units"][1], activation=tf.nn.relu
)
self.dropout_layer_2 = tf.keras.layers.Dropout(rate=kwargs["dropout_rate"])
self.output_layer = tf.keras.layers.Dense(
units=kwargs["num_classes"], activation=tf.nn.softmax
)
def call(self, features):
activation = self.hidden_layer_1(features)
activation = self.dropout_layer_1(activation)
activation = self.hidden_layer_2(activation)
activation = self.dropout_layer_2(activation)
output = self.output_layer(activation)
return output
| 37.959184 | 83 | 0.702688 | 1,021 | 0.548925 | 0 | 0 | 0 | 0 | 0 | 0 | 726 | 0.390323 |
eae66dce66bcef404a3f5bf876e881287f4b4e44 | 1,985 | py | Python | 2D CNN/eval2dcnn.py | sarosijbose/A-Fusion-architecture-for-Human-Activity-Recognition | f55ee9a2297001088af2f9feb9cd61a2dcf28203 | [
"Apache-2.0"
] | null | null | null | 2D CNN/eval2dcnn.py | sarosijbose/A-Fusion-architecture-for-Human-Activity-Recognition | f55ee9a2297001088af2f9feb9cd61a2dcf28203 | [
"Apache-2.0"
] | null | null | null | 2D CNN/eval2dcnn.py | sarosijbose/A-Fusion-architecture-for-Human-Activity-Recognition | f55ee9a2297001088af2f9feb9cd61a2dcf28203 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow import keras
from keras.applications.xception import Xception
import h5py
import json
import cv2
import math
import logging
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.xception import preprocess_input, decode_predictions
logging.basicConfig(level = logging.INFO)
sampling_rate = 5
sampled_frames = frame_stamps = []
top1_labels = top1_scores = []
def sampling_time_stamps(_sample_path):
cap = cv2.VideoCapture(_sample_path)
total_frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
logging.info('Total no. of frames in video:', total_frame_count)
for i in range(sampling_rate):
val = round(total_frame_count/sampling_rate)*(i+1)
frame_stamps.append(val)
def sampling_frames():
frameId , frame_count = 5, 0
success,frame = cap.read()
while success:
frame_count+=1
if frame_count in frame_stamps and frameId >= 1:
frame = cv2.resize(frame, (299,299))
sampled_frames.append(frame)
success,frame = cap.read()
frameId-=1
else:
success,frame = cap.read()
pass
def generate_and_average_predictions():
base_model = keras.applications.Xception(
weights='imagenet') # Load weights pre-trained on ImageNet.
for i in range(len(sampled_frames)):
img = sampled_frames[i]
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
preds = base_model.predict(x)
print('Prediction level:', (i+1), decode_predictions(preds, top=5)[0])
top1_labels.append(decode_predictions(preds, top=1)[0][0][1])
top1_scores.append(decode_predictions(preds, top=1)[0][0][2])
return top1_labels, top1_scores
def run():
sampling_time_stamps(_sample_path)
sampling_frames()
labels, scores = generate_and_average_predictions()
return labels, scores
| 26.118421 | 88 | 0.693199 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.050378 |
eae8b1571de983e30e5b62cbb49111d3bbe371a5 | 3,773 | py | Python | tests/test_redis.py | arrrlo/python-transfer | 653c74e8db8da0ff6ee37dff4534b52b4fc7a29c | [
"MIT"
] | null | null | null | tests/test_redis.py | arrrlo/python-transfer | 653c74e8db8da0ff6ee37dff4534b52b4fc7a29c | [
"MIT"
] | null | null | null | tests/test_redis.py | arrrlo/python-transfer | 653c74e8db8da0ff6ee37dff4534b52b4fc7a29c | [
"MIT"
] | 1 | 2021-07-30T06:01:20.000Z | 2021-07-30T06:01:20.000Z | import os
import pytest
import fakeredis
from db_transfer.adapter_redis import Redis
from db_transfer.transfer import Transfer, sent_env
@pytest.fixture()
def fake_redis(monkeypatch):
fake_redis = lambda *args, **kwargs: fakeredis.FakeStrictRedis(decode_responses=True)
monkeypatch.setattr(Redis, 'connect', fake_redis)
#fake_redis().flushall()
return fake_redis
@pytest.fixture()
def redis_transfer(fake_redis):
os.environ['test_host_1'] = 'localhost'
os.environ['test_port_1'] = '6379'
os.environ['test_db_1'] = '0'
@sent_env('redis', 'HOST', 'test_host_1')
@sent_env('redis', 'PORT', 'test_port_1')
@sent_env('redis', 'DB', 'test_db_1')
class TestHandlerRedis_1(Transfer):
pass
redis_transfer = TestHandlerRedis_1(namespace='namespace_1', adapter_name='redis')
return redis_transfer
def test_redis_string(redis_transfer):
redis_transfer['key_1'] = 'value'
redis_transfer['key_2:key_3'] = 'value'
with redis_transfer:
redis_transfer['key_4'] = 'value'
redis_transfer['key_2:key_5'] = 'value'
assert str(redis_transfer['key_1']) == 'value'
assert str(redis_transfer['key_2:key_3']) == 'value'
assert str(redis_transfer['key_4']) == 'value'
assert str(redis_transfer['key_2:key_5']) == 'value'
def test_redis_list(redis_transfer):
redis_transfer['key_6:key_7'] = ['list_element_1', 'list_element_2']
with redis_transfer:
redis_transfer['key_8:key_9'] = [['list_element_1', 'list_element_2']]
redis_transfer['key_10'] = [{'key': 'value', 'foo': 'bar'}, {'key': 'value'}]
assert list(redis_transfer['key_6:key_7']) == ['list_element_1', 'list_element_2']
assert list(redis_transfer['key_8:key_9']) == [['list_element_1', 'list_element_2']]
assert list(redis_transfer['key_10']) == [{'key': 'value', 'foo': 'bar'}, {'key': 'value'}]
def test_redis_set(redis_transfer):
redis_transfer['key_11:key_12'] = set(['list_element_1', 'list_element_2'])
assert set(redis_transfer['key_11:key_12']) == {'list_element_1', 'list_element_2'}
def test_redis_hash(redis_transfer):
test_dict = {'foo': 'bar', 'doo': {'goo': 'gar'}, 'zoo': [1, 2, 3, {'foo': 'bar'}]}
redis_transfer['hash_key'] = test_dict
assert dict(redis_transfer['hash_key']) == test_dict
assert redis_transfer['hash_key']['foo'] == test_dict['foo']
assert redis_transfer['hash_key']['doo'] == test_dict['doo']
assert redis_transfer['hash_key']['zoo'] == test_dict['zoo']
for key, value in redis_transfer['hash_key']:
assert test_dict[key] == value
def test_redis_hash_iterator(redis_transfer):
test_dict = {'foo': 'bar', 'doo': {'goo': 'gar'}, 'zoo': [1, 2, 3, {'foo': 'bar'}]}
redis_transfer['hash_key'] = test_dict
for key, value in iter(redis_transfer['hash_key']):
assert test_dict[key] == value
def test_redis_delete(redis_transfer):
redis_transfer['some_key_1'] = 'some_value'
assert str(redis_transfer['some_key_1']) == 'some_value'
del redis_transfer['some_key_1']
assert redis_transfer['some_key_1'] is None
def test_redis_keys(redis_transfer):
assert redis_transfer.keys() == ['hash_key', 'key_1', 'key_10',
'key_11:key_12', 'key_2:key_3',
'key_2:key_5', 'key_4', 'key_6:key_7',
'key_8:key_9']
assert redis_transfer['key_2'].keys() == ['key_2:key_3', 'key_2:key_5']
del redis_transfer['key_2:key_3']
del redis_transfer['key_2:key_5']
assert redis_transfer.keys() == ['hash_key', 'key_1', 'key_10',
'key_11:key_12', 'key_4', 'key_6:key_7',
'key_8:key_9']
| 34.935185 | 95 | 0.642725 | 48 | 0.012722 | 0 | 0 | 710 | 0.188179 | 0 | 0 | 1,171 | 0.310363 |
eaea558a7bb60d7e0462270ff2a5a8f1d8c33727 | 248 | py | Python | tests/python-reference/bool/bool-isinstance.py | jpolitz/lambda-py-paper | 746ef63fc1123714b4adaf78119028afbea7bd76 | [
"Apache-2.0"
] | 25 | 2015-04-16T04:31:49.000Z | 2022-03-10T15:53:28.000Z | tests/python-reference/bool/bool-isinstance.py | jpolitz/lambda-py-paper | 746ef63fc1123714b4adaf78119028afbea7bd76 | [
"Apache-2.0"
] | 1 | 2018-11-21T22:40:02.000Z | 2018-11-26T17:53:11.000Z | tests/python-reference/bool/bool-isinstance.py | jpolitz/lambda-py-paper | 746ef63fc1123714b4adaf78119028afbea7bd76 | [
"Apache-2.0"
] | 1 | 2021-03-26T03:36:19.000Z | 2021-03-26T03:36:19.000Z | ___assertIs(isinstance(True, bool), True)
___assertIs(isinstance(False, bool), True)
___assertIs(isinstance(True, int), True)
___assertIs(isinstance(False, int), True)
___assertIs(isinstance(1, bool), False)
___assertIs(isinstance(0, bool), False)
| 35.428571 | 42 | 0.782258 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
eaea8ef5c89fd0ec4d7cbbc57ba9faabf52111af | 2,051 | py | Python | sql_controller/crud.py | hp5441/chatbottest | da74a2fb58abbb48903336228055884304e3580b | [
"MIT"
] | null | null | null | sql_controller/crud.py | hp5441/chatbottest | da74a2fb58abbb48903336228055884304e3580b | [
"MIT"
] | null | null | null | sql_controller/crud.py | hp5441/chatbottest | da74a2fb58abbb48903336228055884304e3580b | [
"MIT"
] | null | null | null | from pydantic.types import Json
import json
from sqlalchemy.orm import Session
from sqlalchemy import desc
from . import models, schemas
def get_questions(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Question).offset(skip).all()
def get_top_questions(db: Session, limit: int = 5):
return db.query(models.Question).order_by(desc(models.Question.popularity)).limit(limit).all()
def get_question(db: Session, question_id: int):
return db.query(models.Question).filter(models.Question.id==question_id).first()
def create_question(db: Session, question: schemas.QuestionCreate):
db_question = models.Question(**question.dict())
db.add(db_question)
db.commit()
db.refresh(db_question)
return db_question
def get_answers(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Answer).offset(skip).all()
def get_answer(db: Session, answer_id:int):
return db.query(models.Answer).filter(models.Answer.id==answer_id).first()
def create_answer(db: Session, answer: schemas.AnswerCreate, question_id: int):
db_answer = models.Answer(answer=answer.answer, question_id=question_id)
db.add(db_answer)
db.commit()
db.refresh(db_answer)
return db_answer
def delete_question(db: Session, question_id: int):
q_list = db.query(models.Question).filter(models.Question.id==question_id).all()
for q in q_list:
for ans in q.answers:
db_ans = delete_answer(db, ans.id)
db_question = db.query(models.Question).filter(models.Question.id==question_id).delete(synchronize_session=False)
db.commit()
return db_question
def delete_answer(db: Session, answer_id: int):
db_answer = db.query(models.Answer).filter(models.Answer.id==answer_id).delete(synchronize_session=False)
db.commit()
return db_answer
def increment_popularity(db: Session, question_id: int):
q_list = db.query(models.Question).filter(models.Question.id==question_id).all()
for q in q_list:
q.popularity+=1
db.commit()
| 31.553846 | 117 | 0.7255 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
eaeb3cdb20f9f64c42c6a45abbc350367d3fd2fa | 3,044 | py | Python | consult/api.py | project-sabai/Sabai-Backend | fbc25d6ec774900d2602f6f8cf57544d873f401a | [
"MIT"
] | null | null | null | consult/api.py | project-sabai/Sabai-Backend | fbc25d6ec774900d2602f6f8cf57544d873f401a | [
"MIT"
] | 9 | 2020-06-05T22:10:47.000Z | 2021-06-10T18:38:13.000Z | consult/api.py | project-sabai/Sabai-Backend | fbc25d6ec774900d2602f6f8cf57544d873f401a | [
"MIT"
] | 1 | 2019-09-01T04:41:58.000Z | 2019-09-01T04:41:58.000Z | from django.contrib.auth.models import User
from django.core import serializers
from django.core.exceptions import ObjectDoesNotExist
from django.db import DataError
from django.http import JsonResponse, HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework.decorators import api_view
from clinicmodels.models import ConsultType, Visit
from consult.forms import ConsultForm
@api_view(['GET'])
def get_all_consult_types(request):
try:
consulttypes = ConsultType.objects.all()
if consulttypes.count() == 0:
return JsonResponse({"message": "ConsultType matching query does not exist"}, status=404)
response = serializers.serialize("json", consulttypes)
return HttpResponse(response, content_type='application/json')
except ObjectDoesNotExist as e:
return JsonResponse({"message": str(e)}, status=404)
except DataError as e:
return JsonResponse({"message": str(e)}, status=400)
@api_view(['POST'])
def create_new_consult_type(request):
try:
if 'consult_type' not in request.POST:
return JsonResponse({"message": "POST: parameter 'consult_type' not found"}, status=400)
consult_type_field = request.POST['consult_type']
consulttype = ConsultType(type=consult_type_field)
consulttype.save()
response = serializers.serialize("json", [consulttype, ])
return HttpResponse(response, content_type='application/json')
except ObjectDoesNotExist as e:
return JsonResponse({"message": str(e)}, status=404)
except DataError as e:
return JsonResponse({"message": str(e)}, status=400)
@api_view(['POST'])
@csrf_exempt
def create_new_consult(request):
try:
if 'visit' not in request.POST:
return JsonResponse({"message": "POST: parameter 'visit' not found"}, status=400)
if 'doctor' not in request.POST:
return JsonResponse({"message": "POST: parameter 'doctor' not found"}, status=400)
if 'consult_type' not in request.POST:
return JsonResponse({"message": "POST: parameter 'consult_type' not found"}, status=400)
visit_id = request.POST['visit']
doctor_id = request.POST['doctor']
consult_type_name = request.POST['consult_type']
Visit.objects.get(pk=visit_id)
User.objects.get(pk=doctor_id)
consult_type = ConsultType.objects.get(type=consult_type_name)
consult_form = ConsultForm(request.POST)
consult_form.consult_type = consult_type
if consult_form.is_valid():
consult = consult_form.save()
response = serializers.serialize("json", [consult, ])
return HttpResponse(response, content_type='application/json')
else:
return JsonResponse({"message": consult_form.errors}, status=400)
except ObjectDoesNotExist as e:
return JsonResponse({"message": str(e)}, status=404)
except DataError as e:
return JsonResponse({"message": str(e)}, status=400)
| 42.277778 | 101 | 0.688896 | 0 | 0 | 0 | 0 | 2,628 | 0.863338 | 0 | 0 | 481 | 0.158016 |
eaed09332cd7612e815a573ad8ab315253289352 | 11,295 | py | Python | tgficbot/main.py | ufoptg/telegram-find-in-channel-bot | 853853fb74c9bcaecece3d67b94725378b4c042e | [
"BSD-3-Clause"
] | 1 | 2021-05-11T21:16:32.000Z | 2021-05-11T21:16:32.000Z | tgficbot/main.py | ufoptg/telegram-find-in-channel-bot | 853853fb74c9bcaecece3d67b94725378b4c042e | [
"BSD-3-Clause"
] | null | null | null | tgficbot/main.py | ufoptg/telegram-find-in-channel-bot | 853853fb74c9bcaecece3d67b94725378b4c042e | [
"BSD-3-Clause"
] | null | null | null | from telethon import TelegramClient
from telethon.events import NewMessage, CallbackQuery, MessageEdited
from telethon.events import StopPropagation
from telethon.tl import types, functions
from telethon.tl.custom import Button
from telethon.errors.rpcerrorlist import ChannelPrivateError
import os
import re
import shlex
import signal
from pathlib import Path
import logging
import asyncio
import argparse
import configparser
from typing import List
from . import states
from .db import Database
from . import i18n
argp = argparse.ArgumentParser(description='Start Telegram FindInChannelBot.')
argp.add_argument(
'--config',
type=str,
default=os.path.expanduser('~/.config/tgficbot.cfg'),
help='specify config file')
argp.add_argument(
'--dbpath',
type=str,
default=os.path.expanduser('~/.cache/'),
help='specify directory to store databases')
args = argp.parse_args()
db = Database(Path(args.dbpath) / 'tgficbot.db')
onstate = states.StateHandler(db)
withi18n = i18n.I18nHandler(db)
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
config = configparser.ConfigParser()
config.read(args.config)
bot = TelegramClient(
str(Path(args.dbpath) / 'bot.session'), config['api']['id'],
config['api']['hash']).start(bot_token=config['bot']['token'])
@bot.on(NewMessage(pattern='/start'))
@withi18n
async def start_command_handler(event: NewMessage.Event, _):
if not event.is_private:
return
await event.respond(
_('Hi! To /find in your channel, you must /add it to this bot first.'))
chat = await event.get_chat()
db.save_user(chat)
db.conn.commit()
raise StopPropagation
@bot.on(NewMessage(pattern='/add'))
@onstate(states.Empty)
@withi18n
async def add_command_handler(event, _):
await event.respond(
_('To add your channel, do the following:\n'
'\n'
'1. Add this bot to your channel as an admin;\n'
'2. Forward a message from the channel to me.'))
user = await event.get_chat()
db.set_user_state(user, states.AddingAChannel)
@bot.on(NewMessage(pattern='/cancel'))
@withi18n
async def cancel_command_handler(event: NewMessage.Event, _):
user = await event.get_chat()
current_state = db.get_user_state(user)
if current_state == states.Empty:
return
db.clear_user_state(user)
db.set_user_selected(user.id, None)
await event.respond(_('Aborted.'))
@bot.on(NewMessage())
@onstate(states.AddingAChannel)
@withi18n
async def adding_forward_handler(event: NewMessage.Event, _):
user = await event.get_chat()
if event.message.fwd_from is None:
await event.respond(
_('Please forward any message from your channel to me, '
'or /cancel to abort.'))
return
if event.message.fwd_from.channel_id is None:
await event.respond(_('Please forward from a channel.'))
return
await event.respond(_('Getting channel infos...'))
try:
channel = await bot.get_entity(event.message.fwd_from.channel_id)
except ChannelPrivateError:
await event.respond(
_('Please add this bot to your channel before you forward me channel messages.'
))
return
if channel.admin_rights is None:
await event.respond(
_('Please add this bot to your channel before you forward me channel messages.'
))
return
if db.check_channel_saved(channel):
await event.respond(_('Channel already added. Abort.'))
db.clear_user_state(user)
return
db.save_channel(channel)
async for admin in bot.iter_participants(
channel, filter=types.ChannelParticipantsAdmins):
db.save_channel_admin_relation(channel.id, admin)
full_channel = await bot(
functions.channels.GetFullChannelRequest(channel=channel))
await event.respond(_('Obtaining previous messages...'))
for i in range(full_channel.full_chat.read_inbox_max_id):
message = await bot.get_messages(channel, ids=i)
db.save_message(message)
db.conn.commit()
await event.respond(_('Add finished.'))
db.clear_user_state(user)
@bot.on(NewMessage(pattern=r'^/find +(.+)'))
@onstate(states.Empty)
@withi18n
async def arged_find_command_handler(event: NewMessage.Event, _):
"""Find a pattern in a channel using a CLI-like syntax"""
raw_args = event.pattern_match.group(1)
try:
args = shlex.split(raw_args)
except ValueError:
await event.respond(
_('Invalid command, use `/help find` for more information'))
return
channel_pattern = args[0]
pattern = ' '.join(args[1:])
user = await event.get_chat()
if channel_pattern.startswith('@'):
channel_id = db.get_channel_id_from_name(user,
channel_pattern.lstrip('@'))
if not channel_id:
await event.respond(
_('No such channel: **{}**').format(channel_pattern))
return
else:
matched_ids = db.match_user_owned_channels_with_pattern(
user, channel_pattern)
if not len(matched_ids):
await event.respond(
_('No such channel: **{}**').format(channel_pattern))
return
if len(matched_ids) > 1:
await event.respond(
_('Multiple channels matched, finding in **{}**').format(
db.get_channel_title(matched_ids[0])))
channel_id = matched_ids[0]
found_message_ids = db.find_in_messages(channel_id, pattern)
if not len(found_message_ids):
await event.respond('No results.')
return
for message_id in found_message_ids:
await bot.forward_messages(user, message_id, channel_id)
def three_buttons_each_line(buttons: List[Button]) -> List[List[Button]]:
res = []
for i in range(0, len(buttons), 3):
res.append(buttons[i:i + 3])
return res
@bot.on(NewMessage(pattern=r'^/find$'))
@onstate(states.Empty)
@withi18n
async def find_command_handler(event: NewMessage.Event, _):
"""Finding interactively"""
if not event.is_private:
await event.respond(
_('This command can only be used in private chat.'))
return
user = await event.get_chat()
user_owned_channel_ids = db.get_user_owned_channels(user)
if len(user_owned_channel_ids) == 0:
await event.respond(
_("You haven't had any channel added to this bot. Please /add a channel first."
))
return
def channel_id2button(channel_id):
channel_title = db.get_channel_title(channel_id)
return Button.inline(channel_title, data=channel_id)
buttons = list(map(channel_id2button, user_owned_channel_ids))
buttons = three_buttons_each_line(buttons)
await event.respond(_('Select a channel to search:'), buttons=buttons)
db.set_user_state(user, states.SelectingAChannelToFind)
@bot.on(CallbackQuery())
@onstate(states.SelectingAChannelToFind)
@withi18n
async def select_channel_to_find_handler(event: CallbackQuery.Event, _):
user = await event.get_chat()
channel_id = int(event.data)
if user.id not in db.get_channel_admins(channel_id):
await event.respond(
_("Sorry, you don't have the permission to access this channel."))
db.clear_user_state(user)
return
channel_title = db.get_channel_title(channel_id)
db.set_user_state(user, states.FindingInAChannel)
db.set_user_selected(user.id, channel_id)
await event.respond(
_('Now type in what you want to find in **{}**, or /cancel to quit.').
format(channel_title))
@bot.on(NewMessage())
@onstate(states.FindingInAChannel)
@withi18n
async def finding_handler(event: NewMessage.Event, _):
user = await event.get_chat()
channel_id = db.get_user_selected(user.id)
pattern = event.raw_text
found_message_ids = db.find_in_messages(channel_id, pattern)
if len(found_message_ids) == 0:
await event.respond(_('No results.'))
return
for message_id in found_message_ids:
await bot.forward_messages(user, message_id, channel_id)
@bot.on(NewMessage())
async def channel_newmessage_handler(event: NewMessage.Event):
"""Continuously listen to channel updates, save new messages"""
if event.is_channel:
db.save_message(event.message)
@bot.on(MessageEdited())
async def channel_messageedited_handler(event: MessageEdited.Event):
if event.is_channel:
db.update_message(event.message)
@bot.on(NewMessage(pattern='/lang'))
@onstate(states.Empty)
@withi18n
async def lang_command_handler(event: NewMessage.Event, _):
user = await event.get_chat()
buttons = [
Button.inline(i18n.languages[code], data=code)
for code in i18n.langcodes
]
buttons = three_buttons_each_line(buttons)
buttons.insert(
0, [Button.inline(_('Follow Telegram settings'), data='follow')])
db.set_user_state(user, states.SettingLang)
await event.respond(_('Select your language:'), buttons=buttons)
@bot.on(CallbackQuery())
@onstate(states.SettingLang)
async def setting_lang_handler(event: CallbackQuery.Event):
user = await event.get_chat()
langcode = event.data.decode()
if (langcode not in i18n.langcodes) and (langcode != 'follow'):
await event.respond('Unsupported language selected.')
return
db.set_user_lang(user.id, langcode)
db.clear_user_state(user)
async def respond(event, _):
await event.respond(
_('Hi! To /find in your channel, you must /add it to this bot first.'
))
await withi18n(respond)(event)
@bot.on(NewMessage(pattern=r'/help ?(\w*)'))
@onstate(states.Empty)
@withi18n
async def help_command_handler(event: NewMessage.Event, _):
# May be the specific command or ''
command = event.pattern_match.group(1)
if not command:
await event.respond(
_('/add - Add a channel to the bot\n'
'/find - Find in a channel\n'
'/cancel - Cancel or quit current operation\n'
'/lang - Set bot language\n'
'\n'
'Use `/help [command]` to view help about a specific command.'))
return
if command == 'add':
await event.respond(
_('**Usage**:\n `/add`\n\nAdd a channel to the bot'))
elif command == 'find':
await event.respond(_('**Usage**:\n `/find`\n\nFind in a channel'))
elif command == 'cancel':
await event.respond(
_('**Usage**:\n `/cancel`\n\nCancel or quit current operation'))
elif command == 'lang':
await event.respond(_('**Usage**:\n `/lang`\n\nSet bot language'))
else:
await event.respond(_('Command not found: `/{}`').format(command))
def sigterm_handler(num, frame):
db.conn.commit()
os.sys.exit(130)
def main():
# Save database when being killed
signal.signal(signal.SIGTERM, sigterm_handler)
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(bot.disconnected)
except KeyboardInterrupt:
db.conn.commit()
if __name__ == '__main__':
main()
| 31.997167 | 91 | 0.66587 | 0 | 0 | 0 | 0 | 9,353 | 0.828066 | 8,580 | 0.759628 | 2,232 | 0.19761 |
eaedb119b1aaac0cbb296953345e2a7e95e0dc48 | 339 | py | Python | onadata/apps/fsforms/rceivers.py | awemulya/fieldsight-kobocat | f302d084e30fb637d43ec638c701e01a3dddc721 | [
"BSD-2-Clause"
] | 38 | 2017-02-28T05:39:40.000Z | 2019-01-16T04:39:04.000Z | onadata/apps/fsforms/rceivers.py | awemulya/fieldsightt | f302d084e30fb637d43ec638c701e01a3dddc721 | [
"BSD-2-Clause"
] | 20 | 2017-04-27T09:14:27.000Z | 2019-01-17T06:35:52.000Z | onadata/apps/fsforms/rceivers.py | awemulya/fieldsightt | f302d084e30fb637d43ec638c701e01a3dddc721 | [
"BSD-2-Clause"
] | 5 | 2017-02-22T12:25:19.000Z | 2019-01-15T11:16:40.000Z | # from django.db.models.signals import post_save
# from django.dispatch import receiver
# from onadata.apps.logger.models import XForm
#
# from onadata.apps.fsforms.models import FieldSightXF
#
#
# @receiver(post_save, sender=XForm)
# def save_to_fieldsight_form(sender, instance, **kwargs):
# FieldSightXF.objects.create(xf=instance)
| 30.818182 | 58 | 0.781711 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 329 | 0.970501 |
eaeeb3cbaa78604b0db2b05ad778edd4eb43aa98 | 2,383 | py | Python | highlander/api/controllers/resource.py | StephenTao/stephen | 06da7cbc93b40fcd089eeed2972adc1fe6bd3cb9 | [
"Apache-2.0"
] | 1 | 2020-01-21T11:31:39.000Z | 2020-01-21T11:31:39.000Z | terracotta/api/controllers/resource.py | kvshamray/terracota | 8f6419693a2add12c0cd27005e6f58f8295ad7e6 | [
"Apache-2.0"
] | null | null | null | terracotta/api/controllers/resource.py | kvshamray/terracota | 8f6419693a2add12c0cd27005e6f58f8295ad7e6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2013 - Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from wsme import types as wtypes
class Resource(wtypes.Base):
"""REST API Resource."""
_wsme_attributes = []
def to_dict(self):
d = {}
for attr in self._wsme_attributes:
attr_val = getattr(self, attr.name)
if not isinstance(attr_val, wtypes.UnsetType):
d[attr.name] = attr_val
return d
@classmethod
def from_dict(cls, d):
obj = cls()
for key, val in d.items():
if hasattr(obj, key):
setattr(obj, key, val)
return obj
def __str__(self):
"""WSME based implementation of __str__."""
res = "%s [" % type(self).__name__
first = True
for attr in self._wsme_attributes:
if not first:
res += ', '
else:
first = False
res += "%s='%s'" % (attr.name, getattr(self, attr.name))
return res + "]"
def to_string(self):
return json.dumps(self.to_dict())
class ResourceList(Resource):
"""Resource containing the list of other resources."""
def to_dict(self):
d = {}
for attr in self._wsme_attributes:
attr_val = getattr(self, attr.name)
if isinstance(attr_val, list):
if isinstance(attr_val[0], Resource):
d[attr.name] = [v.to_dict() for v in attr_val]
elif not isinstance(attr_val, wtypes.UnsetType):
d[attr.name] = attr_val
return d
class Link(Resource):
"""Web link."""
href = wtypes.text
target = wtypes.text
@classmethod
def sample(cls):
return cls(href='http://example.com/here',
target='here')
| 25.084211 | 77 | 0.579522 | 1,694 | 0.710869 | 0 | 0 | 306 | 0.12841 | 0 | 0 | 807 | 0.338649 |
eaf0021867e5f8655350350f668d136cce9a9e15 | 3,030 | py | Python | LoadContracts.py | DZahar0v/DeployCheck | 009900657eb4c02927383e50c8afcc3e343b903f | [
"MIT"
] | 1 | 2021-12-24T12:09:06.000Z | 2021-12-24T12:09:06.000Z | LoadContracts.py | DZahar0v/DeployCheck | 009900657eb4c02927383e50c8afcc3e343b903f | [
"MIT"
] | null | null | null | LoadContracts.py | DZahar0v/DeployCheck | 009900657eb4c02927383e50c8afcc3e343b903f | [
"MIT"
] | null | null | null | import json
import requests
import os
import subprocess
import platform
HTTP = requests.session()
class ClientException(Exception):
message = 'unhandled error'
def __init__(self, message=None):
if message is not None:
self.message = message
def getURL(address):
url = "https://api.etherscan.io/api"
url += "?module=contract"
url += "&action=getsourcecode"
url += "&address=" + address
url += "&apikey=Y4VHI8GSCYU1JWR4KKFVZC1VZUTG81N3Y6"
return url
def connect(url):
try:
req = HTTP.get(url)
except requests.exceptions.ConnectionError:
raise ClientException
if req.status_code == 200:
# Check for empty response
if req.text:
data = req.json()
status = data.get('status')
if status == '1' or status == '0':
return data
else:
raise ClientException
raise ClientException
def getCode(jsonCode, fileName):
code = jsonCode[0]['SourceCode']
contractName = jsonCode[0]['ContractName']
if (code == ''):
print(fileName + ': not verified yet!')
return code
if (code.find('"content": "') == -1):
return code
# removing unnecessary braces
code = code[1:-1]
code = code.replace("\r", "")
code = code.replace("\n", "")
# Etherscan API send bad JSON
index = code.find('"content": "')
clearCode = ''
while index != -1:
clearCode += code[:index+12]
code = code[index+12:]
index2 = code.find('" },')
if (index2 == -1):
index2 = code.find('" }')
tmpString = code[:index2]
tmpString = tmpString.replace('\\"', "'")
clearCode += tmpString
code = code[index2:]
index = code.find('"content": "')
clearCode += code
code = json.loads(clearCode)
contractCode = ''
for src in code['sources']:
if (src.find(contractName) != -1):
contractCode = code['sources'][src]['content']
break
return contractCode
if __name__ == "__main__":
with open("Config.json") as jsonFile:
jsonObject = json.load(jsonFile)
jsonFile.close()
dir = jsonObject['directory']
addresses = jsonObject['addresses']
isExist = os.path.exists(dir)
if not isExist:
os.makedirs(dir)
for address in addresses:
url = getURL(address[1])
req = connect(url)
code = getCode(req['result'], address[0])
if (code == ''):
continue
file = open(dir + address[0] + '.sol', "w+")
file.write(code)
file.close()
# File comparison
print('Open: ' + address[0])
etherscanCode = dir + address[0] + '.sol'
githubCode = address[2]
if (platform.system() == 'Windows'):
subprocess.call(['C:\\Program Files (x86)\\Meld\\meld.exe', etherscanCode, githubCode])
else:
os.system('meld ' + etherscanCode + ' ' + githubCode) | 27.545455 | 99 | 0.560396 | 170 | 0.056106 | 0 | 0 | 0 | 0 | 0 | 0 | 556 | 0.183498 |
eaf2773ac6533f918328896eb209b664483d98ae | 1,193 | py | Python | oci/config.py | bennyz/oci | d7d0af782542c89b79b2af3858bae394a38aedde | [
"Apache-2.0"
] | null | null | null | oci/config.py | bennyz/oci | d7d0af782542c89b79b2af3858bae394a38aedde | [
"Apache-2.0"
] | null | null | null | oci/config.py | bennyz/oci | d7d0af782542c89b79b2af3858bae394a38aedde | [
"Apache-2.0"
] | null | null | null | from collections import namedtuple
import os
from six.moves import configparser
DEFAULT_CONFIG = {
('jenkins', 'host', 'jenkins.ovirt.org'),
('gerrit', 'host', 'gerrit.ovirt.org')}
Jenkins = namedtuple('jenkins', 'host, user_id, api_token')
Gerrit = namedtuple('gerrit', 'host')
Config = namedtuple('config', 'jenkins, gerrit')
class Error(Exception):
pass
def config_parser():
cfg = configparser.RawConfigParser()
# Setup default configuration
for (section, key, value) in DEFAULT_CONFIG:
cfg.add_section(section)
cfg.set(section, key, value)
return cfg
def load(path=os.path.expanduser("~/.config/oci.conf")):
cfg = config_parser()
cfg.read(path)
try:
return Config(
jenkins=Jenkins(
host=cfg.get('jenkins', 'host'),
user_id=cfg.get('jenkins', 'user_id'),
api_token=cfg.get('jenkins', 'api_token')),
gerrit=Gerrit(
host=cfg.get('gerrit', 'host')
))
except configparser.NoOptionError as err:
raise Error(
"Option {!r} in section {!r} is required"
.format(err.option, err.section))
| 25.934783 | 59 | 0.605197 | 32 | 0.026823 | 0 | 0 | 0 | 0 | 0 | 0 | 297 | 0.248952 |
eaf350eaef15875b69577a0cc860781804858973 | 2,148 | py | Python | config/site_setting.py | tiantaozhang/site-basics | e3245aeba689765862d399da8d239f6ad0d8f466 | [
"MIT"
] | 1 | 2018-03-12T11:47:32.000Z | 2018-03-12T11:47:32.000Z | config/site_setting.py | tiantaozhang/ep_site | e3245aeba689765862d399da8d239f6ad0d8f466 | [
"MIT"
] | null | null | null | config/site_setting.py | tiantaozhang/ep_site | e3245aeba689765862d399da8d239f6ad0d8f466 | [
"MIT"
] | null | null | null | import os
from datetime import timedelta
UPLOAD_FOLDER = ''
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
MAX_CONTENT_LENGTH = 16 * 1024 * 1024
class Config:
ROOT_PATH = os.path.abspath('.')
LOG_PATH = ROOT_PATH + '/logs'
APP_LOG_FILE = ROOT_PATH + '/logs/admin.log'
SECRET_KEY = os.environ.get("SECRET_KEY") or "xd9\x85\x9c\xbc\x19\x9b\xe6ch\xdd\x12\x04F\x87%R5\xb3\xa7\xc2P\x93P\xe2"
class DevelopmentConfig(Config):
DEBUG = True
# 废弃
SSO_SWITCH = False
# SSO测试账号
SSO_TEST_ACCOUNT = 'admin@youmi.net'
# session lifetime
PERMANENT_SESSION_LIFETIME = timedelta(days=1)
SESSION_COOKIE_NAME = "auth"
SESSION_COOKIE_HTTPONLY = False
SQLALCHEMY_DATABASE_URI = "mysql+cymysql://youmi:iloveUMLife123@172.16.1.50/youmi"
SQLALCHEMY_BINDS = {
# "youmi": SQLALCHEMY_DATABASE_URI,
"admin": "mysql+cymysql://youmi:iloveUMLife123@172.16.1.50/youmi_admin",
"spot": "mysql+cymysql://youmi:iloveUMLife123@172.16.1.50/youmi_spot",
"stat": "mysql+cymysql://youmi:iloveUMLife123@172.16.1.50/youmi_stat",
"data": "mysql+cymysql://youmi:iloveUMLife123@172.16.1.50/youmi_data",
"rtb_report": "mysql+cymysql://youmi:iloveUMLife123@172.16.1.50/youmi_rtb_report",
"rtb": "mysql+cymysql://youmi:iloveUMLife123@172.16.1.50/youmi_rtb"
}
SQLALCHEMY_TRACK_MODIFICATIONS = True
REDSHIFT = "dbname=youmi_stats user=ymserver password=host=youmi-statsdata.cpwyku9ohxzt.cn-north-1.redshift.amazonaws.com.cn port=5439"
AUDIENCE_REDSHIFT = "dbname=youmi_analyser user=ymserver password=host=youmi-statsdata.cpwyku9ohxzt.cn-north-1.redshift.amazonaws.com.cn port=5439"
SENTRY_DSN = "http://29e17e66e54747d796a76d10d73d13d3:136a2735eba84f65af8461776eb3d197@sentry.awscn.umlife.net/20"
RTB_LOG_01 = 'http://censor.y.cn'
UPLOAD_FOLDED = '/home/liqifeng/YouMiCode/operate/tmp'
class TestingConfig(Config):
pass
class ProductionConfig(Config):
pass
config = {
"development": DevelopmentConfig,
"testing": TestingConfig,
"production": ProductionConfig,
"default": DevelopmentConfig
} | 34.095238 | 151 | 0.712756 | 1,827 | 0.845833 | 0 | 0 | 0 | 0 | 0 | 0 | 1,169 | 0.541204 |
eaf3f83969eefc20c915399632dfbe8f86dda3dd | 8,087 | py | Python | atlas/aws_utils/src/test/test_aws_bucket.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 296 | 2020-03-16T19:55:00.000Z | 2022-01-10T19:46:05.000Z | atlas/aws_utils/src/test/test_aws_bucket.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 57 | 2020-03-17T11:15:57.000Z | 2021-07-10T14:42:27.000Z | atlas/aws_utils/src/test/test_aws_bucket.py | DeepLearnI/atlas | 8aca652d7e647b4e88530b93e265b536de7055ed | [
"Apache-2.0"
] | 38 | 2020-03-17T21:06:05.000Z | 2022-02-08T03:19:34.000Z |
import unittest
from mock import Mock
from foundations_spec import *
from foundations_aws.aws_bucket import AWSBucket
class TestAWSBucket(Spec):
class MockListing(object):
def __init__(self, bucket, files):
self._bucket = bucket
self._files = files
def __call__(self, Bucket, Prefix, Delimiter):
if Bucket != self._bucket:
return {}
return {
'Contents': [{'Key': Prefix + key} for key in self._grouped_and_prefixed_files(Prefix, Delimiter)],
'CommonPrefixes': [{'Prefix': Prefix + new_prefix} for new_prefix in self._unique_delimited_prefixes(Prefix, Delimiter)]
}
def _unique_delimited_prefixes(self, prefix, delimiter):
items = set()
# below is done to preserve order
for key in self._prefixes(prefix, delimiter):
if not key in items:
items.add(key)
yield key
def _prefixes(self, prefix, delimiter):
for key in self._prefixed_files(prefix):
if delimiter in key:
yield key.split(delimiter)[0]
def _grouped_and_prefixed_files(self, prefix, delimiter):
for key in self._prefixed_files(prefix):
if not delimiter in key:
yield key
def _prefixed_files(self, prefix):
prefix_length = len(prefix)
for key in self._files:
if key.startswith(prefix):
yield key[prefix_length:]
connection_manager = let_patch_mock(
'foundations_aws.global_state.connection_manager'
)
connection = let_mock()
mock_file = let_mock()
@let
def file_name(self):
return self.faker.name()
@let
def data(self):
return self.faker.sha256()
@let
def data_body(self):
mock = Mock()
mock.read.return_value = self.data
mock.iter_chunks.return_value = [self.data]
return mock
@let
def bucket_prefix(self):
return self.faker.name()
@let
def bucket_postfix(self):
return self.faker.uri_path()
@let
def bucket_name_with_slashes(self):
return self.bucket_prefix + '/' + self.bucket_postfix
@let
def upload_file_name_with_slashes(self):
return self.bucket_postfix + '/' + self.file_name
@let
def bucket(self):
return AWSBucket(self.bucket_path)
@let
def bucket_with_slashes(self):
return AWSBucket(self.bucket_name_with_slashes)
@let
def bucket_path(self):
return 'testing-bucket'
@let
def source_path(self):
return self.faker.name()
@let
def source_path_with_slashes(self):
return self.bucket_postfix + '/' + self.source_path
@set_up
def set_up(self):
self.connection_manager.bucket_connection.return_value = self.connection
def test_upload_from_string_uploads_data_to_bucket_with_prefix(self):
self.bucket_with_slashes.upload_from_string(self.file_name, self.data)
self.connection.put_object.assert_called_with(Bucket=self.bucket_prefix, Key=self.upload_file_name_with_slashes, Body=self.data)
def test_exists_returns_true_when_file_exists_with_prefix(self):
self.bucket_with_slashes.exists(self.file_name)
self.connection.head_object.assert_called_with(Bucket=self.bucket_prefix, Key=self.upload_file_name_with_slashes)
def test_download_as_string_uploads_data_to_bucket_with_prefix(self):
self.connection.get_object = ConditionalReturn()
self.connection.get_object.return_when({'Body': self.data_body}, Bucket=self.bucket_prefix, Key=self.upload_file_name_with_slashes)
result = self.bucket_with_slashes.download_as_string(self.file_name)
self.assertEqual(self.data, result)
def test_download_to_file_uploads_data_to_bucket_with_prefix(self):
self.connection.get_object = ConditionalReturn()
self.connection.get_object.return_when({'Body': self.data_body}, Bucket=self.bucket_prefix, Key=self.upload_file_name_with_slashes)
result = self.bucket_with_slashes.download_to_file(self.file_name, self.mock_file)
self.mock_file.write.assert_called_with(self.data)
def test_remove_removes_prefixed_files(self):
self.bucket_with_slashes.remove(self.file_name)
self.connection.delete_object.assert_called_with(Bucket=self.bucket_prefix, Key=self.upload_file_name_with_slashes)
def test_move_moves_prefixed_files(self):
self.bucket_with_slashes.move(self.source_path, self.file_name)
source_info = {'Bucket': self.bucket_prefix, 'Key': self.source_path_with_slashes}
self.connection.copy_object.assert_called_with(Bucket=self.bucket_prefix, CopySource=source_info, Key=self.upload_file_name_with_slashes)
def test_list_files_returns_empty(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
[]
)
self.assertEqual([], self._fetch_listing('*'))
def test_list_files_returns_all_results(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['my.txt', 'scheduler.log']
)
self.assertEqual(['my.txt', 'scheduler.log'], self._fetch_listing('*'))
def test_list_files_returns_file_type_filter(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['my.txt', 'scheduler.log']
)
self.assertEqual(['my.txt'], self._fetch_listing('*.txt'))
def test_list_files_returns_all_results_dot_directory(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['my.txt', 'scheduler.log']
)
self.assertEqual(['my.txt', 'scheduler.log'],
self._fetch_listing('./*'))
def test_list_files_returns_file_type_filter_dot_directory(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['my.txt', 'scheduler.log']
)
self.assertEqual(['my.txt'], self._fetch_listing('./*.txt'))
def test_list_files_returns_only_local_directory(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['my.txt', 'scheduler.log', 'path/to/some/other/files']
)
self.assertEqual(['my.txt', 'scheduler.log', 'path'], self._fetch_listing('*'))
def test_list_files_returns_only_sub_directory(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['my.txt', 'scheduler.log', 'path/to/some/other/files']
)
self.assertEqual(['path/to/some/other/files'], self._fetch_listing('path/to/some/other/*'))
def test_list_files_returns_folder_within_sub_directory(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['path/to/some/other/files']
)
self.assertEqual(['path/to'], self._fetch_listing('path/*'))
def test_list_files_returns_arbitrary_filter(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_path,
['some_stuff_here', 'no_stuff_there', 'some_more_stuff_here']
)
self.assertEqual(['some_stuff_here', 'some_more_stuff_here'], self._fetch_listing('some_*_here'))
def test_list_files_supports_prefixes(self):
self.connection.list_objects_v2.side_effect = self.MockListing(
self.bucket_prefix,
[self.upload_file_name_with_slashes]
)
result = list(self.bucket_with_slashes.list_files('*'))
self.assertEqual([self.file_name], result)
def _fetch_listing(self, pathname):
generator = self.bucket.list_files(pathname)
return list(generator)
| 37.267281 | 145 | 0.66712 | 7,964 | 0.98479 | 848 | 0.10486 | 1,142 | 0.141214 | 0 | 0 | 683 | 0.084457 |
eaf500661c99c51c47381cb5a31580af77de6e77 | 11,948 | py | Python | psychrochartmaker/remote.py | azogue/psychrocam | d18d745362380c911320e3e336596c6d68b822c5 | [
"MIT"
] | 4 | 2019-02-22T04:15:33.000Z | 2021-11-14T19:53:19.000Z | psychrochartmaker/remote.py | azogue/psychrocam | d18d745362380c911320e3e336596c6d68b822c5 | [
"MIT"
] | null | null | null | psychrochartmaker/remote.py | azogue/psychrocam | d18d745362380c911320e3e336596c6d68b822c5 | [
"MIT"
] | null | null | null | """
Support for an interface to work with a remote instance of Home Assistant.
If a connection error occurs while communicating with the API a
HomeAssistantError will be raised.
For more details about the Python API, please refer to the documentation at
https://home-assistant.io/developers/python_api/
"""
import datetime as dt
import enum
import json
import logging
import pytz
import re
from types import MappingProxyType
from typing import Optional, Dict, Any, List
import urllib.parse
# from aiohttp.hdrs import METH_GET, METH_POST, METH_DELETE, CONTENT_TYPE
from aiohttp.hdrs import METH_GET, CONTENT_TYPE
import requests
UTC = DEFAULT_TIME_ZONE = pytz.utc # type: dt.tzinfo
ATTR_FRIENDLY_NAME = 'friendly_name'
CONTENT_TYPE_JSON = 'application/json'
HTTP_HEADER_HA_AUTH = 'X-HA-access'
SERVER_PORT = 8123
URL_API = '/api/'
URL_API_CONFIG = '/api/config'
URL_API_STATES = '/api/states'
URL_API_STATES_ENTITY = '/api/states/{}'
URL_API_EVENTS = '/api/events'
URL_API_EVENTS_EVENT = '/api/events/{}'
URL_API_SERVICES = '/api/services'
URL_API_SERVICES_SERVICE = '/api/services/{}/{}'
_LOGGER = logging.getLogger(__name__)
# Pattern for validating entity IDs (format: <domain>.<entity>)
ENTITY_ID_PATTERN = re.compile(r"^(\w+)\.(\w+)$")
# Copyright (c) Django Software Foundation and individual contributors.
# All rights reserved.
# https://github.com/django/django/blob/master/LICENSE
DATETIME_RE = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$')
def parse_datetime(dt_str: str) -> Optional[dt.datetime]:
"""Parse a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = DATETIME_RE.match(dt_str)
if not match:
return None
kws = match.groupdict() # type: Dict[str, Any]
if kws['microsecond']:
kws['microsecond'] = kws['microsecond'].ljust(6, '0')
tzinfo_str = kws.pop('tzinfo')
# tzinfo = None # type: # Optional[dt.tzinfo]
if tzinfo_str == 'Z':
tzinfo = UTC
elif tzinfo_str is not None:
offset_mins = int(tzinfo_str[-2:]) if len(tzinfo_str) > 3 else 0
offset_hours = int(tzinfo_str[1:3])
offset = dt.timedelta(hours=offset_hours, minutes=offset_mins)
if tzinfo_str[0] == '-':
offset = -offset
tzinfo = dt.timezone(offset)
else:
tzinfo = None
kws = {k: int(v) for k, v in kws.items() if v is not None}
kws['tzinfo'] = tzinfo
return dt.datetime(**kws)
def split_entity_id(entity_id: str) -> List[str]:
"""Split a state entity_id into domain, object_id."""
return entity_id.split(".", 1)
def valid_entity_id(entity_id: str) -> bool:
"""Test if an entity ID is a valid format."""
return ENTITY_ID_PATTERN.match(entity_id) is not None
def valid_state(state: str) -> bool:
"""Test if a state is valid."""
return len(state) < 256
class HomeAssistantError(Exception):
"""General Home Assistant exception occurred."""
pass
class InvalidEntityFormatError(HomeAssistantError):
"""When an invalid formatted entity is encountered."""
pass
class InvalidStateError(HomeAssistantError):
"""When an invalid state is encountered."""
pass
def as_local(dattim: dt.datetime) -> dt.datetime:
"""Convert a UTC datetime object to local time zone."""
if dattim.tzinfo == DEFAULT_TIME_ZONE:
return dattim
elif dattim.tzinfo is None:
dattim = UTC.localize(dattim)
return dattim.astimezone(DEFAULT_TIME_ZONE)
def repr_helper(inp: Any) -> str:
"""Help creating a more readable string representation of objects."""
if isinstance(inp, (dict, MappingProxyType)):
return ", ".join(
repr_helper(key)+"="+repr_helper(item) for key, item
in inp.items())
elif isinstance(inp, dt.datetime):
return as_local(inp).isoformat()
return str(inp)
class State(object):
"""Object to represent a state within the state machine.
entity_id: the entity that is represented.
state: the state of the entity
attributes: extra information on entity and state
last_changed: last time the state was changed, not the attributes.
last_updated: last time this object was updated.
"""
__slots__ = ['entity_id', 'state', 'attributes',
'last_changed', 'last_updated']
def __init__(self, entity_id, state, attributes=None, last_changed=None,
last_updated=None):
"""Initialize a new state."""
state = str(state)
if not valid_entity_id(entity_id):
raise InvalidEntityFormatError((
"Invalid entity id encountered: {}. "
"Format should be <domain>.<object_id>").format(entity_id))
if not valid_state(state):
raise InvalidStateError((
"Invalid state encountered for entity id: {}. "
"State max length is 255 characters.").format(entity_id))
self.entity_id = entity_id.lower()
self.state = state
self.attributes = MappingProxyType(attributes or {})
self.last_updated = last_updated or dt.datetime.now(UTC)
self.last_changed = last_changed or self.last_updated
@property
def domain(self):
"""Domain of this state."""
return split_entity_id(self.entity_id)[0]
@property
def object_id(self):
"""Object id of this state."""
return split_entity_id(self.entity_id)[1]
@property
def name(self):
"""Name of this state."""
return (
self.attributes.get(ATTR_FRIENDLY_NAME) or
self.object_id.replace('_', ' '))
def as_dict(self):
"""Return a dict representation of the State.
Async friendly.
To be used for JSON serialization.
Ensures: state == State.from_dict(state.as_dict())
"""
return {'entity_id': self.entity_id,
'state': self.state,
'attributes': dict(self.attributes),
'last_changed': self.last_changed,
'last_updated': self.last_updated}
@classmethod
def from_dict(cls, json_dict):
"""Initialize a state from a dict.
Async friendly.
Ensures: state == State.from_json_dict(state.to_json_dict())
"""
if not (json_dict and 'entity_id' in json_dict and
'state' in json_dict):
return None
last_changed = json_dict.get('last_changed')
if isinstance(last_changed, str):
last_changed = parse_datetime(last_changed)
last_updated = json_dict.get('last_updated')
if isinstance(last_updated, str):
last_updated = parse_datetime(last_updated)
return cls(json_dict['entity_id'], json_dict['state'],
json_dict.get('attributes'), last_changed, last_updated)
def __eq__(self, other):
"""Return the comparison of the state."""
return (self.__class__ == other.__class__ and
self.entity_id == other.entity_id and
self.state == other.state and
self.attributes == other.attributes)
def __repr__(self):
"""Return the representation of the states."""
attr = "; {}".format(repr_helper(self.attributes)) \
if self.attributes else ""
return "<state {}={}{} @ {}>".format(
self.entity_id, self.state, attr,
as_local(self.last_changed).isoformat())
class APIStatus(enum.Enum):
"""Representation of an API status."""
OK = "ok"
INVALID_PASSWORD = "invalid_password"
CANNOT_CONNECT = "cannot_connect"
UNKNOWN = "unknown"
def __str__(self) -> str:
"""Return the state."""
return self.value # type: ignore
class API:
"""Object to pass around Home Assistant API location and credentials."""
def __init__(self, host: str, api_password: Optional[str] = None,
port: Optional[int] = SERVER_PORT,
use_ssl: bool = False) -> None:
"""Init the API."""
self.host = host
self.port = port
self.api_password = api_password
if host.startswith(("http://", "https://")):
self.base_url = host
elif use_ssl:
self.base_url = "https://{}".format(host)
else:
self.base_url = "http://{}".format(host)
if port is not None:
self.base_url += ':{}'.format(port)
self.status = None # type: Optional[APIStatus]
self._headers = {CONTENT_TYPE: CONTENT_TYPE_JSON}
if api_password is not None:
self._headers[HTTP_HEADER_HA_AUTH] = api_password
def validate_api(self, force_validate: bool = False) -> bool:
"""Test if we can communicate with the API."""
if self.status is None or force_validate:
self.status = validate_api(self)
return self.status == APIStatus.OK
def __call__(self, method: str, path: str, data: Optional[Dict] = None,
timeout: int = 5) -> requests.Response:
"""Make a call to the Home Assistant API."""
if data is None:
data_str = None
else:
data_str = json.dumps(data, cls=JSONEncoder)
url = urllib.parse.urljoin(self.base_url, path)
try:
if method == METH_GET:
return requests.get(
url, params=data_str, timeout=timeout,
headers=self._headers)
return requests.request(
method, url, data=data_str, timeout=timeout,
headers=self._headers)
except requests.exceptions.ConnectionError:
_LOGGER.exception("Error connecting to server")
raise HomeAssistantError("Error connecting to server")
except requests.exceptions.Timeout:
error = "Timeout when talking to {}".format(self.host)
_LOGGER.error(error)
raise HomeAssistantError(error)
def __repr__(self) -> str:
"""Return the representation of the API."""
return "<API({}, password: {})>".format(
self.base_url, 'yes' if self.api_password is not None else 'no')
class JSONEncoder(json.JSONEncoder):
"""JSONEncoder that supports Home Assistant objects."""
# pylint: disable=method-hidden
def default(self, o: Any) -> Any:
"""Convert Home Assistant objects.
Hand other objects to the original method.
"""
if isinstance(o, dt.datetime):
return o.isoformat()
if isinstance(o, set):
return list(o)
if hasattr(o, 'as_dict'):
return o.as_dict()
return json.JSONEncoder.default(self, o)
def validate_api(api: API) -> APIStatus:
"""Make a call to validate API."""
try:
req = api(METH_GET, URL_API)
if req.status_code == 200:
return APIStatus.OK
if req.status_code == 401:
return APIStatus.INVALID_PASSWORD
return APIStatus.UNKNOWN
except HomeAssistantError:
return APIStatus.CANNOT_CONNECT
def get_states(api: API) -> List[State]:
"""Query given API for all states."""
try:
req = api(METH_GET,
URL_API_STATES)
return [State.from_dict(item) for
item in req.json()]
except (HomeAssistantError, ValueError, AttributeError):
# ValueError if req.json() can't parse the json
_LOGGER.error("Error fetching states")
return []
| 30.953368 | 78 | 0.623452 | 7,177 | 0.600686 | 0 | 0 | 1,186 | 0.099263 | 0 | 0 | 3,963 | 0.331687 |
eaf701263125585ba9578e0154162d4b8a98da01 | 4,305 | py | Python | CIM14/ENTSOE/Dynamics/IEC61970/Dynamics/PowerSystemStabilizers/PowerSystemStabilizersPssIEEE2B.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 58 | 2015-04-22T10:41:03.000Z | 2022-03-29T16:04:34.000Z | CIM14/ENTSOE/Dynamics/IEC61970/Dynamics/PowerSystemStabilizers/PowerSystemStabilizersPssIEEE2B.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 12 | 2015-08-26T03:57:23.000Z | 2020-12-11T20:14:42.000Z | CIM14/ENTSOE/Dynamics/IEC61970/Dynamics/PowerSystemStabilizers/PowerSystemStabilizersPssIEEE2B.py | MaximeBaudette/PyCIM | d68ee5ccfc1d32d44c5cd09fb173142fb5ff4f14 | [
"MIT"
] | 35 | 2015-01-10T12:21:03.000Z | 2020-09-09T08:18:16.000Z | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM14.ENTSOE.Dynamics.IEC61970.Core.CorePowerSystemResource import CorePowerSystemResource
class PowerSystemStabilizersPssIEEE2B(CorePowerSystemResource):
def __init__(self, t10=0.0, a=0.0, ks1=0.0, ks3=0.0, t11=0.0, ks2=0.0, vstmin=0.0, vsi1max=0.0, vsi2max=0.0, tb=0.0, t2=0.0, ta=0.0, t1=0.0, t4=0.0, n=0, t3=0.0, m=0, j1=0, t6=0.0, j2=0, t8=0.0, vsi1min=0.0, t7=0.0, t9=0.0, ks4=0.0, tw2=0.0, tw1=0.0, tw4=0.0, vsi2min=0.0, tw3=0.0, vstmax=0.0, *args, **kw_args):
"""Initialises a new 'PowerSystemStabilizersPssIEEE2B' instance.
@param t10:
@param a:
@param ks1:
@param ks3:
@param t11:
@param ks2:
@param vstmin:
@param vsi1max:
@param vsi2max:
@param tb:
@param t2:
@param ta:
@param t1:
@param t4:
@param n:
@param t3:
@param m:
@param j1:
@param t6:
@param j2:
@param t8:
@param vsi1min:
@param t7:
@param t9:
@param ks4:
@param tw2:
@param tw1:
@param tw4:
@param vsi2min:
@param tw3:
@param vstmax:
"""
self.t10 = t10
self.a = a
self.ks1 = ks1
self.ks3 = ks3
self.t11 = t11
self.ks2 = ks2
self.vstmin = vstmin
self.vsi1max = vsi1max
self.vsi2max = vsi2max
self.tb = tb
self.t2 = t2
self.ta = ta
self.t1 = t1
self.t4 = t4
self.n = n
self.t3 = t3
self.m = m
self.j1 = j1
self.t6 = t6
self.j2 = j2
self.t8 = t8
self.vsi1min = vsi1min
self.t7 = t7
self.t9 = t9
self.ks4 = ks4
self.tw2 = tw2
self.tw1 = tw1
self.tw4 = tw4
self.vsi2min = vsi2min
self.tw3 = tw3
self.vstmax = vstmax
super(PowerSystemStabilizersPssIEEE2B, self).__init__(*args, **kw_args)
_attrs = ["t10", "a", "ks1", "ks3", "t11", "ks2", "vstmin", "vsi1max", "vsi2max", "tb", "t2", "ta", "t1", "t4", "n", "t3", "m", "j1", "t6", "j2", "t8", "vsi1min", "t7", "t9", "ks4", "tw2", "tw1", "tw4", "vsi2min", "tw3", "vstmax"]
_attr_types = {"t10": float, "a": float, "ks1": float, "ks3": float, "t11": float, "ks2": float, "vstmin": float, "vsi1max": float, "vsi2max": float, "tb": float, "t2": float, "ta": float, "t1": float, "t4": float, "n": int, "t3": float, "m": int, "j1": int, "t6": float, "j2": int, "t8": float, "vsi1min": float, "t7": float, "t9": float, "ks4": float, "tw2": float, "tw1": float, "tw4": float, "vsi2min": float, "tw3": float, "vstmax": float}
_defaults = {"t10": 0.0, "a": 0.0, "ks1": 0.0, "ks3": 0.0, "t11": 0.0, "ks2": 0.0, "vstmin": 0.0, "vsi1max": 0.0, "vsi2max": 0.0, "tb": 0.0, "t2": 0.0, "ta": 0.0, "t1": 0.0, "t4": 0.0, "n": 0, "t3": 0.0, "m": 0, "j1": 0, "t6": 0.0, "j2": 0, "t8": 0.0, "vsi1min": 0.0, "t7": 0.0, "t9": 0.0, "ks4": 0.0, "tw2": 0.0, "tw1": 0.0, "tw4": 0.0, "vsi2min": 0.0, "tw3": 0.0, "vstmax": 0.0}
_enums = {}
_refs = []
_many_refs = []
| 26.574074 | 448 | 0.560046 | 3,106 | 0.721487 | 0 | 0 | 0 | 0 | 0 | 0 | 2,289 | 0.531707 |
eaf751a9a0bcab3d9157ea5ae5ba0acfa2a5324c | 1,413 | py | Python | molecular_computation/procedures/gel_electrophoresis.py | shakedmanes/molecular-computation | 80d759c74288f99dfb7c3dab2a5d1e88b17e3171 | [
"MIT"
] | null | null | null | molecular_computation/procedures/gel_electrophoresis.py | shakedmanes/molecular-computation | 80d759c74288f99dfb7c3dab2a5d1e88b17e3171 | [
"MIT"
] | null | null | null | molecular_computation/procedures/gel_electrophoresis.py | shakedmanes/molecular-computation | 80d759c74288f99dfb7c3dab2a5d1e88b17e3171 | [
"MIT"
] | null | null | null | from molecules.dna_molecule import DNAMolecule
from molecules.dna_sequence import DNASequence
class GelElectrophoresis:
"""
Produce the Gel Electrophoresis procedure to sort DNA molecules by their size.
"""
@staticmethod
def run_gel(dna_molecules):
"""
Runs the Gel Electrophoresis procedure to sort DNA molecules by their size.
:param dna_molecules: DNA molecules.
:return: Sorted list of the DNA molecules given.
"""
molecules = list(dna_molecules)
molecules.sort(key=lambda mol: mol.length)
return molecules
if __name__ == '__main__':
dna_sequences = [
DNASequence.create_random_sequence(size=20),
DNASequence.create_random_sequence(size=10),
DNASequence.create_random_sequence(size=30),
DNASequence.create_random_sequence(size=5),
DNASequence.create_random_sequence(size=15)
]
ex_dna_molecules = [
DNAMolecule(dna_sequences[index], dna_sequences[index].get_complement())
for index in range(len(dna_sequences))
]
print('DNA molecules:')
for molecule in ex_dna_molecules:
print(f'{molecule}\n')
print('\nRun Gel Electrophoresis on DNA molecules:')
gel_dna_molecules = GelElectrophoresis.run_gel(ex_dna_molecules)
print('Results DNA molecules:')
for molecule in gel_dna_molecules:
print(f'{molecule}\n')
| 28.836735 | 83 | 0.690729 | 502 | 0.355272 | 0 | 0 | 372 | 0.26327 | 0 | 0 | 421 | 0.297948 |
eaf89a6ffff99ef2e6291f264aa42721610c16ea | 153 | py | Python | stocks/__init__.py | jianyex/stocks | 124ccbda452af2e2bebc76d6ee95997c0d2417f4 | [
"BSD-3-Clause"
] | null | null | null | stocks/__init__.py | jianyex/stocks | 124ccbda452af2e2bebc76d6ee95997c0d2417f4 | [
"BSD-3-Clause"
] | null | null | null | stocks/__init__.py | jianyex/stocks | 124ccbda452af2e2bebc76d6ee95997c0d2417f4 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for stocks."""
__author__ = """Jianye Xu"""
__email__ = 'jianye.xu.stats@gmail.com'
__version__ = '0.1.0'
| 19.125 | 39 | 0.627451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.699346 |
eaf8d812f3b4c657aa0c2b65e78b3b540116a7eb | 1,147 | py | Python | 2021/11/__init__.py | cascandaliato/Advent-of-Code | 37d96cd6b02cfe1a1f78f83c9c01058ce8a07448 | [
"MIT"
] | null | null | null | 2021/11/__init__.py | cascandaliato/Advent-of-Code | 37d96cd6b02cfe1a1f78f83c9c01058ce8a07448 | [
"MIT"
] | 11 | 2020-12-22T09:03:51.000Z | 2021-12-03T19:56:08.000Z | 2021/11/__init__.py | cascandaliato/advent-of-code | 251ebe7ea122582e4349d0c7f5ce6565c0410f8e | [
"MIT"
] | null | null | null | from itertools import product
from pyutils import *
def parse(lines):
return [ints(line) for line in lines]
def neighbors(r, c, d):
return ((r+dr, c+dc) for dr in [-1, 0, 1] for dc in [-1, 0, 1] if (dc != 0 or dr != 0) and 0 <= r+dr < d and 0 <= c+dc < d)
def update_and_store(grid, r, c, store):
grid[r][c] = (grid[r][c]+1) % 10
if grid[r][c] == 0:
store.add((r, c))
def play(grid):
count = 0
lit = set()
# first pass
for r, c in product(range(len(grid)), repeat=2):
update_and_store(grid, r, c, lit)
# keep looping until no octopus flashes
while lit:
count += len(lit)
newly_lit = set()
for r, c in lit:
for nr, nc in neighbors(r, c, len(grid)):
if grid[nr][nc]:
update_and_store(grid, nr, nc, newly_lit)
lit = newly_lit
return count
@expect({'test': 1656})
def solve1(grid):
return sum(play(grid) for _ in range(100))
@expect({'test': 195})
def solve2(grid):
i = 1
while play(grid) != len(grid)**2:
i += 1
return i
| 22.490196 | 128 | 0.517001 | 0 | 0 | 0 | 0 | 211 | 0.183958 | 0 | 0 | 65 | 0.05667 |
eaf917474f89876f0657f27beb9e7f2cba71b85e | 1,462 | py | Python | test/test_02_expressions.py | jmeppley/py-metagenomics | 0dbab073cb7e52c4826054e40eb802c9e0298e9a | [
"MIT"
] | 7 | 2015-05-14T09:36:36.000Z | 2022-03-30T14:32:21.000Z | test/test_02_expressions.py | jmeppley/py-metagenomics | 0dbab073cb7e52c4826054e40eb802c9e0298e9a | [
"MIT"
] | 1 | 2015-07-14T11:47:25.000Z | 2015-07-17T01:45:26.000Z | test/test_02_expressions.py | jmeppley/py-metagenomics | 0dbab073cb7e52c4826054e40eb802c9e0298e9a | [
"MIT"
] | 7 | 2015-07-25T22:29:29.000Z | 2022-03-01T21:26:14.000Z | from edl.expressions import *
def test_accession_re():
with open('test/data/sample.1.blastx.b50.m8') as F:
try:
for line in F:
acc = accessionRE.search(line).group(1)
except AttributeError:
# There should have been a match in every line of this file
assert False
test_data = {
'ref|YP_002498923.1|': 'YP_002498923',
'ref|YP_002498923.1': 'YP_002498923',
'gi|109900248|ref|YP_663503.1|': 'YP_663503',
'YP_663503.1': 'YP_663503',
}
for data, acc in test_data.items():
new_acc = accessionRE.search(data).group(1)
assert acc == new_acc
def test_fasta_re():
file_data = {
'test/data/test.gbk.faa': 3941,
'test/data/test.gbk.fna': 3941,
'test/data/createPrimerNextera.fasta': 2,
'test/data/createPrimerTruseq.fasta': 4,
'test/data/HOT_100_reads.8.fasta': 8,
'test/data/HOT_100_reads.fasta': 100,
}
count=0
for file_name, expected_count in file_data.items():
new_count = _count_re_hits(file_name, fastaRE)
assert new_count == expected_count
count+=1
assert count == len(file_data)
assert count == 6
def _count_re_hits(file_name, regex):
count=0
with open(file_name) as INF:
for line in INF:
if regex.search(line):
count+=1
return count
| 28.666667 | 71 | 0.579343 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 413 | 0.28249 |
eaf9a002328e5d19246bf0ad315b485aec268e7a | 2,969 | py | Python | .leetcode/230.kth-smallest-element-in-a-bst.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | .leetcode/230.kth-smallest-element-in-a-bst.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | .leetcode/230.kth-smallest-element-in-a-bst.py | KuiyuanFu/PythonLeetCode | 8962df2fa838eb7ae48fa59de272ba55a89756d8 | [
"MIT"
] | null | null | null | # @lc app=leetcode id=230 lang=python3
#
# [230] Kth Smallest Element in a BST
#
# https://leetcode.com/problems/kth-smallest-element-in-a-bst/description/
#
# algorithms
# Medium (63.45%)
# Likes: 4133
# Dislikes: 90
# Total Accepted: 558.7K
# Total Submissions: 876.8K
# Testcase Example: '[3,1,4,null,2]\n1'
#
# Given the root of a binary search tree, and an integer k, return the k^th
# (1-indexed) smallest element in the tree.
#
#
# Example 1:
#
#
# Input: root = [3,1,4,null,2], k = 1
# Output: 1
#
#
# Example 2:
#
#
# Input: root = [5,3,6,2,4,null,null,1], k = 3
# Output: 3
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is n.
# 1 <= k <= n <= 10^4
# 0 <= Node.val <= 10^4
#
#
#
# Follow up: If the BST is modified often (i.e., we can do insert and delete
# operations) and you need to find the kth smallest frequently, how would you
# optimize?
#
# @lc tags=binary-search;tree
# @lc imports=start
from imports import *
# @lc imports=end
# @lc idea=start
#
# 找二叉搜索树的第k小的元素。
# 直接深度优先,递归。
#
# @lc idea=end
# @lc group=
# @lc rank=
# @lc code=start
class Solution:
def kthSmallest(self, root: TreeNode, k: int) -> int:
def rkthSmallest(root: TreeNode, k: int):
if not root:
return None, 0
retL, countL = rkthSmallest(root.left, k)
if retL is not None:
return retL, 0
k -= countL
if k == 1:
return root.val, 0
retR, countR = rkthSmallest(root.right, k - 1)
if retR is not None:
return retR, 0
return None, countL + countR + 1
return rkthSmallest(root, k)[0]
# @lc code=end
# @lc main=start
if __name__ == '__main__':
print(
str(Solution().kthSmallest(
listToTreeNode([
31, 30, 48, 3, None, 38, 49, 0, 16, 35, 47, None, None, None,
2, 15, 27, 33, 37, 39, None, 1, None, 5, None, 22, 28, 32, 34,
36, None, None, 43, None, None, 4, 11, 19, 23, None, 29, None,
None, None, None, None, None, 40, 46, None, None, 7, 14, 17,
21, None, 26, None, None, None, 41, 44, None, 6, 10, 13, None,
None, 18, 20, None, 25, None, None, 42, None, 45, None, None,
8, None, 12, None, None, None, None, None, 24, None, None,
None, None, None, None, 9
]), 1)))
print('Example 1:')
print('Input : ')
print('root = [3,1,4,null,2], k = 1')
print('Exception :')
print('1')
print('Output :')
print(str(Solution().kthSmallest(listToTreeNode([3, 1, 4, None, 2]), 1)))
print()
print('Example 2:')
print('Input : ')
print('root = [5,3,6,2,4,null,null,1], k = 3')
print('Exception :')
print('3')
print('Output :')
print(
str(Solution().kthSmallest(
listToTreeNode([5, 3, 6, 2, 4, None, None, 1]), 3)))
print()
pass
# @lc main=end | 23.377953 | 78 | 0.54227 | 589 | 0.195357 | 0 | 0 | 0 | 0 | 0 | 0 | 1,260 | 0.41791 |
eaf9b9336a7a5da3543fc406dd64b5100923c1bf | 592 | py | Python | python/solutions/kyu6_replace_with_alphabet_position.py | StefanAvra/codewars-katas | a2b11453eb6fe28ff56ffdcca956d8b9493dd879 | [
"MIT"
] | null | null | null | python/solutions/kyu6_replace_with_alphabet_position.py | StefanAvra/codewars-katas | a2b11453eb6fe28ff56ffdcca956d8b9493dd879 | [
"MIT"
] | null | null | null | python/solutions/kyu6_replace_with_alphabet_position.py | StefanAvra/codewars-katas | a2b11453eb6fe28ff56ffdcca956d8b9493dd879 | [
"MIT"
] | null | null | null | """
Replace With Alphabet Position
Welcome.
In this kata you are required to, given a string, replace every letter with its position in the alphabet.
If anything in the text isn't a letter, ignore it and don't return it.
"a" = 1, "b" = 2, etc.
Example
alphabet_position("The sunset sets at twelve o' clock.")
Should return "20 8 5 19 21 14 19 5 20 19 5 20 19 1 20 20 23 5 12 22 5 15 3 12 15 3 11" (as a string)
"""
def alphabet_position(text):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
return ' '.join(str(alphabet.find(char)+1) for char in text.casefold() if char in alphabet)
| 26.909091 | 105 | 0.709459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 450 | 0.760135 |
eafbcaee9d308410a952a5c6a4a7a59550910130 | 1,463 | py | Python | pykafka/test/utils.py | Instamojo/pykafka | c8c3e445773beefc52039adf708295ed2b8394d2 | [
"Apache-2.0"
] | 1,174 | 2015-01-26T22:11:37.000Z | 2022-03-22T14:42:18.000Z | pykafka/test/utils.py | Instamojo/pykafka | c8c3e445773beefc52039adf708295ed2b8394d2 | [
"Apache-2.0"
] | 845 | 2015-01-26T16:02:35.000Z | 2021-03-23T11:07:12.000Z | pykafka/test/utils.py | Instamojo/pykafka | c8c3e445773beefc52039adf708295ed2b8394d2 | [
"Apache-2.0"
] | 295 | 2015-02-28T10:44:08.000Z | 2021-12-04T23:05:18.000Z | import time
import os
from pykafka.test.kafka_instance import KafkaInstance, KafkaConnection
def get_cluster():
"""Gets a Kafka cluster for testing, using one already running is possible.
An already-running cluster is determined by environment variables:
BROKERS, ZOOKEEPER, KAFKA_BIN. This is used primarily to speed up tests
in our Travis-CI environment.
"""
if os.environ.get('BROKERS', None) and \
os.environ.get('ZOOKEEPER', None) and \
os.environ.get('KAFKA_BIN', None):
# Broker is already running. Use that.
return KafkaConnection(os.environ['KAFKA_BIN'],
os.environ['BROKERS'],
os.environ['ZOOKEEPER'],
os.environ.get('BROKERS_SSL', None))
else:
return KafkaInstance(num_instances=3)
def stop_cluster(cluster):
"""Stop a created cluster, or merely flush a pre-existing one."""
if isinstance(cluster, KafkaInstance):
cluster.terminate()
else:
cluster.flush()
def retry(assertion_callable, retry_time=10, wait_between_tries=0.1, exception_to_retry=AssertionError):
"""Retry assertion callable in a loop"""
start = time.time()
while True:
try:
return assertion_callable()
except exception_to_retry as e:
if time.time() - start >= retry_time:
raise e
time.sleep(wait_between_tries)
| 33.25 | 104 | 0.632946 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 484 | 0.330827 |
eafd1adf2d0fafadc3e2004933f7da698a51fc56 | 2,107 | py | Python | vca_cli/print_utils.py | KohliRocks/vca-cli-iot | 22d32c287ab04214c3f273937628092623fbb7fe | [
"Apache-2.0"
] | null | null | null | vca_cli/print_utils.py | KohliRocks/vca-cli-iot | 22d32c287ab04214c3f273937628092623fbb7fe | [
"Apache-2.0"
] | null | null | null | vca_cli/print_utils.py | KohliRocks/vca-cli-iot | 22d32c287ab04214c3f273937628092623fbb7fe | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 VMware. All rights reserved
import prettytable
import six
import sys
from oslo_utils import encodeutils
def _print(pt, order):
if sys.version_info >= (3, 0):
print(pt.get_string(sortby=order))
else:
print(encodeutils.safe_encode(pt.get_string(sortby=order)))
def print_dict(d, property="Property"):
pt = prettytable.PrettyTable([property, 'Value'], caching=False)
pt.align = 'l'
[pt.add_row(list(r)) for r in six.iteritems(d)]
_print(pt, property)
def print_list(objs, fields, formatters={}, order_by=None, obj_is_dict=False,
labels={}):
if not labels:
labels = {}
for field in fields:
if field not in labels:
# No underscores (use spaces instead) and uppercase any ID's
label = field.replace("_", " ").replace("id", "ID")
# Uppercase anything else that's less than 3 chars
if len(label) < 3:
label = label.upper()
# Capitalize each word otherwise
else:
label = ' '.join(word[0].upper() + word[1:]
for word in label.split())
labels[field] = label
pt = prettytable.PrettyTable(
[labels[field] for field in fields], caching=False)
# set the default alignment to left-aligned
align = dict((labels[field], 'l') for field in fields)
set_align = True
for obj in objs:
row = []
for field in fields:
if formatters and field in formatters:
row.append(formatters[field](obj))
elif obj_is_dict:
data = obj.get(field, '')
else:
data = getattr(obj, field, '')
row.append(data)
# set the alignment to right-aligned if it's a numeric
if set_align and hasattr(data, '__int__'):
align[labels[field]] = 'r'
set_align = False
pt.add_row(row)
pt._align = align
if not order_by:
order_by = fields[0]
order_by = labels[order_by]
_print(pt, order_by)
| 31.447761 | 77 | 0.572378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 343 | 0.162791 |
eafd2d7bd898465467b4a666339f624043e204d4 | 707 | py | Python | references/web/test.py | flystarhe/cvtk | bcfea4c5b13269bdd63899020b9c70f2eb1f5b07 | [
"MIT"
] | 1 | 2021-06-29T07:12:45.000Z | 2021-06-29T07:12:45.000Z | references/web/test.py | flystarhe/cvtk | bcfea4c5b13269bdd63899020b9c70f2eb1f5b07 | [
"MIT"
] | null | null | null | references/web/test.py | flystarhe/cvtk | bcfea4c5b13269bdd63899020b9c70f2eb1f5b07 | [
"MIT"
] | null | null | null | # python test.py 500 /workspace/images/test.png 7000
import sys
import time
import requests
args = sys.argv[1:]
command = " ".join(args)
times = int(args[0])
data = {"image": [args[1]] * 2}
url = f"http://localhost:{args[2]}/predict"
headers = {"content-type": "application/x-www-form-urlencoded"}
oks = 0
start_time = time.time()
for _ in range(times):
response = requests.post(url, data=data, headers=headers)
if response.status_code == 200:
x = response.json()
if x["status"] == 0:
oks += 1
total_time = time.time() - start_time
status = dict(
command=command,
times=times,
oks=oks,
total_time=total_time,
latest_x=x,
)
print(str(status))
| 19.108108 | 63 | 0.636492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 156 | 0.220651 |
eafd8eec654916b6469e65277d4f8eb7cf7b961d | 1,048 | py | Python | api/app/core/config.py | logan-connolly/aita | 7782910fd9c5161c7af489a288ee9d52aca91421 | [
"MIT"
] | 4 | 2020-10-31T15:57:06.000Z | 2022-02-17T02:57:15.000Z | api/app/core/config.py | logan-connolly/aita | 7782910fd9c5161c7af489a288ee9d52aca91421 | [
"MIT"
] | 9 | 2020-05-06T16:11:45.000Z | 2021-12-26T22:58:17.000Z | api/app/core/config.py | logan-connolly/aita | 7782910fd9c5161c7af489a288ee9d52aca91421 | [
"MIT"
] | 1 | 2021-12-01T11:43:04.000Z | 2021-12-01T11:43:04.000Z | from typing import List
from pydantic import BaseSettings
class ApiConfig(BaseSettings):
title: str = "AITA"
version: str = "/api/v1"
openapi: str = "/api/v1/openapi.json"
class PostgresConfig(BaseSettings):
user: str
password: str
host: str
db: str
class Config:
env_prefix = "POSTGRES_"
class RedditConfig(BaseSettings):
client_id: str
client_secret: str
password: str
username: str
class Config:
env_prefix = "REDDIT_"
class WebSettings(BaseSettings):
port: int
class Config:
env_prefix = "WEB_"
class Settings(BaseSettings):
api = ApiConfig()
pg = PostgresConfig()
reddit = RedditConfig()
web = WebSettings()
DEBUG: bool = True
MODEL_PATH: str = "example/path"
URI: str = f"postgresql://{pg.user}:{pg.password}@{pg.host}/{pg.db}"
BACKEND_CORS_ORIGINS: List[str] = [
"http://localhost",
f"http://localhost:{web.port}",
]
class Config:
case_sensitive = True
settings = Settings()
| 18.068966 | 72 | 0.627863 | 950 | 0.906489 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.173664 |
eafe701e57f31e67fd3d9f0fc310e5ec69bd74c7 | 11,756 | py | Python | src/sniffmypacketsv2/transforms/common/protocols/sip.py | SneakersInc/sniffmypacketsv2 | 55d8ff70eedb4dd948351425c25a1e904ea6d50e | [
"Apache-2.0"
] | 11 | 2015-01-01T19:44:04.000Z | 2020-03-26T07:30:26.000Z | src/sniffmypacketsv2/transforms/common/protocols/sip.py | SneakersInc/sniffmypacketsv2 | 55d8ff70eedb4dd948351425c25a1e904ea6d50e | [
"Apache-2.0"
] | 8 | 2015-01-01T22:45:59.000Z | 2015-12-12T10:37:50.000Z | src/sniffmypacketsv2/transforms/common/protocols/sip.py | SneakersInc/sniffmypacketsv2 | 55d8ff70eedb4dd948351425c25a1e904ea6d50e | [
"Apache-2.0"
] | 3 | 2017-06-04T05:18:24.000Z | 2020-03-26T07:30:27.000Z | import base64
from scapy.layers.inet import *
from scapy.layers.dns import *
import dissector
class SIPStartField(StrField):
"""
field class for handling sip start field
@attention: it inherets StrField from Scapy library
"""
holds_packets = 1
name = "SIPStartField"
def getfield(self, pkt, s):
"""
this method will get the packet, takes what does need to be
taken and let the remaining go, so it returns two values.
first value which belongs to this field and the second is
the remaining which does need to be dissected with
other "field classes".
@param pkt: holds the whole packet
@param s: holds only the remaining data which is not dissected yet.
"""
cstream = -1
if pkt.underlayer.name == "TCP":
cstream = dissector.check_stream(\
pkt.underlayer.underlayer.fields["src"],\
pkt.underlayer.underlayer.fields["dst"],\
pkt.underlayer.fields["sport"],\
pkt.underlayer.fields["dport"],\
pkt.underlayer.fields["seq"], s)
if not cstream == -1:
s = cstream
remain = ""
value = ""
ls = s.splitlines(True)
f = ls[0].split()
if "SIP" in f[0]:
ls = s.splitlines(True)
f = ls[0].split()
length = len(f)
value = ""
if length == 3:
value = "SIP-Version:" + f[0] + ", Status-Code:" +\
f[1] + ", Reason-Phrase:" + f[2]
ls.remove(ls[0])
for element in ls:
remain = remain + element
else:
value = ls[0]
ls.remove(ls[0])
for element in ls:
remain = remain + element
return remain, value
elif "SIP" in f[2]:
ls = s.splitlines(True)
f = ls[0].split()
length = len(f)
value = []
if length == 3:
value = "Method:" + f[0] + ", Request-URI:" +\
f[1] + ", SIP-Version:" + f[2]
ls.remove(ls[0])
for element in ls:
remain = remain + element
else:
value = ls[0]
ls.remove(ls[0])
for element in ls:
remain = remain + element
return remain, value
else:
return s, ""
class SIPMsgField(StrField):
"""
field class for handling the body of sip packets
@attention: it inherets StrField from Scapy library
"""
holds_packets = 1
name = "SIPMsgField"
myresult = ""
def __init__(self, name, default):
"""
class constructor, for initializing instance variables
@param name: name of the field
@param default: Scapy has many formats to represent the data
internal, human and machine. anyways you may sit this param to None.
"""
self.name = name
self.fmt = "!B"
Field.__init__(self, name, default, "!B")
def getfield(self, pkt, s):
"""
this method will get the packet, takes what does need to be
taken and let the remaining go, so it returns two values.
first value which belongs to this field and the second is
the remaining which does need to be dissected with
other "field classes".
@param pkt: holds the whole packet
@param s: holds only the remaining data which is not dissected yet.
"""
if s.startswith("\r\n"):
s = s.lstrip("\r\n")
if s == "":
return "", ""
self.myresult = ""
for c in s:
self.myresult = self.myresult + base64.standard_b64encode(c)
return "", self.myresult
class SIPField(StrField):
"""
field class for handling the body of sip fields
@attention: it inherets StrField from Scapy library
"""
holds_packets = 1
name = "SIPField"
def getfield(self, pkt, s):
"""
this method will get the packet, takes what does need to be
taken and let the remaining go, so it returns two values.
first value which belongs to this field and the second is
the remaining which does need to be dissected with
other "field classes".
@param pkt: holds the whole packet
@param s: holds only the remaining data which is not dissected yet.
"""
if self.name == "unknown-header(s): ":
remain = ""
value = []
ls = s.splitlines(True)
i = -1
for element in ls:
i = i + 1
if element == "\r\n":
return s, []
elif element != "\r\n" and (": " in element[:10])\
and (element[-2:] == "\r\n"):
value.append(element)
ls.remove(ls[i])
remain = ""
unknown = True
for element in ls:
if element != "\r\n" and (": " in element[:15])\
and (element[-2:] == "\r\n") and unknown:
value.append(element)
else:
unknow = False
remain = remain + element
return remain, value
return s, []
remain = ""
value = ""
ls = s.splitlines(True)
i = -1
for element in ls:
i = i + 1
if element.upper().startswith(self.name.upper()):
value = element
value = value.strip(self.name)
ls.remove(ls[i])
remain = ""
for element in ls:
remain = remain + element
return remain, value[len(self.name) + 1:]
return s, ""
def __init__(self, name, default, fmt, remain=0):
"""
class constructor for initializing the instance variables
@param name: name of the field
@param default: Scapy has many formats to represent the data
internal, human and machine. anyways you may sit this param to None.
@param fmt: specifying the format, this has been set to "H"
@param remain: this parameter specifies the size of the remaining
data so make it 0 to handle all of the data.
"""
self.name = name
StrField.__init__(self, name, default, fmt, remain)
class SIP(Packet):
"""
class for handling the body of sip packets
@attention: it inherets Packet from Scapy library
"""
name = "sip"
fields_desc = [SIPStartField("start-line: ", "", "H"),
SIPField("accept: ", "", "H"),
SIPField("accept-contact: ", "", "H"),
SIPField("accept-encoding: ", "", "H"),
SIPField("accept-language: ", "", "H"),
SIPField("accept-resource-priority: ", "", "H"),
SIPField("alert-info: ", "", "H"),
SIPField("allow: ", "", "H"),
SIPField("allow-events: ", "", "H"),
SIPField("authentication-info: ", "", "H"),
SIPField("authorization: ", "", "H"),
SIPField("call-id: ", "", "H"),
SIPField("call-info: ", "", "H"),
SIPField("contact: ", "", "H"),
SIPField("content-disposition: ", "", "H"),
SIPField("content-encoding: ", "", "H"),
SIPField("content-language: ", "", "H"),
SIPField("content-length: ", "", "H"),
SIPField("content-type: ", "", "H"),
SIPField("cseq: ", "", "H"),
SIPField("date: ", "", "H"),
SIPField("error-info: ", "", "H"),
SIPField("event: ", "", "H"),
SIPField("expires: ", "", "H"),
SIPField("from: ", "", "H"),
SIPField("in-reply-to: ", "", "H"),
SIPField("join: ", "", "H"),
SIPField("max-forwards: ", "", "H"),
SIPField("mime-version: ", "", "H"),
SIPField("min-expires: ", "", "H"),
SIPField("min-se: ", "", "H"),
SIPField("organization: ", "", "H"),
SIPField("p-access-network-info: ", "", "H"),
SIPField("p-asserted-identity: ", "", "H"),
SIPField("p-associated-uri: ", "", "H"),
SIPField("p-called-party-id: ", "", "H"),
SIPField("p-charging-function-addresses: ", "", "H"),
SIPField("p-charging-vector: ", "", "H"),
SIPField("p-dcs-trace-party-id: ", "", "H"),
SIPField("p-dcs-osps: ", "", "H"),
SIPField("p-dcs-billing-info: ", "", "H"),
SIPField("p-dcs-laes: ", "", "H"),
SIPField("p-dcs-redirect: ", "", "H"),
SIPField("p-media-authorization: ", "", "H"),
SIPField("p-preferred-identity: ", "", "H"),
SIPField("p-visited-network-id: ", "", "H"),
SIPField("path: ", "", "H"),
SIPField("priority: ", "", "H"),
SIPField("privacy: ", "", "H"),
SIPField("proxy-authenticate: ", "", "H"),
SIPField("proxy-authorization: ", "", "H"),
SIPField("proxy-require: ", "", "H"),
SIPField("rack: ", "", "H"),
SIPField("reason: ", "", "H"),
SIPField("record-route: ", "", "H"),
SIPField("referred-by: ", "", "H"),
SIPField("reject-contact: ", "", "H"),
SIPField("replaces: ", "", "H"),
SIPField("reply-to: ", "", "H"),
SIPField("request-disposition: ", "", "H"),
SIPField("require: ", "", "H"),
SIPField("resource-priority: ", "", "H"),
SIPField("retry-after: ", "", "H"),
SIPField("route: ", "", "H"),
SIPField("rseq: ", "", "H"),
SIPField("security-client: ", "", "H"),
SIPField("security-server: ", "", "H"),
SIPField("security-verify: ", "", "H"),
SIPField("server: ", "", "H"),
SIPField("service-route: ", "", "H"),
SIPField("session-expires: ", "", "H"),
SIPField("sip-etag: ", "", "H"),
SIPField("sip-if-match: ", "", "H"),
SIPField("subject: ", "", "H"),
SIPField("subscription-state: ", "", "H"),
SIPField("supported: ", "", "H"),
SIPField("timestamp: ", "", "H"),
SIPField("to: ", "", "H"),
SIPField("unsupported: ", "", "H"),
SIPField("user-agent: ", "", "H"),
SIPField("via: ", "", "H"),
SIPField("warning: ", "", "H"),
SIPField("www-authenticate: ", "", "H"),
SIPField("refer-to: ", "", "H"),
SIPField("history-info: ", "", "H"),
SIPField("unknown-header(s): ", "", "H"),
SIPMsgField("message-body: ", "")]
bind_layers(TCP, SIP, sport=5060)
bind_layers(TCP, SIP, dport=5060)
bind_layers(UDP, SIP, sport=5060)
bind_layers(UDP, SIP, dport=5060)
| 40.678201 | 76 | 0.451004 | 11,512 | 0.979245 | 0 | 0 | 0 | 0 | 0 | 0 | 4,558 | 0.387717 |
eaff872f4834bf2846f82c2e9bc383d651070c30 | 129 | py | Python | starfish/image/_segmentation/__init__.py | ttung/starfish | 1bd8abf55a335620e4b20abb041f478334714081 | [
"MIT"
] | null | null | null | starfish/image/_segmentation/__init__.py | ttung/starfish | 1bd8abf55a335620e4b20abb041f478334714081 | [
"MIT"
] | null | null | null | starfish/image/_segmentation/__init__.py | ttung/starfish | 1bd8abf55a335620e4b20abb041f478334714081 | [
"MIT"
] | null | null | null | from starfish.pipeline import import_all_submodules
from ._base import Segmentation
import_all_submodules(__file__, __package__)
| 32.25 | 51 | 0.883721 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d800cdce3e86b3fb748785cf5b0ccbdfe3714b0c | 19,335 | py | Python | scanpy/tools/paga.py | LuckyMD/scanpy | 4b38130cb7a76f284058fb788c8279999389e3c5 | [
"BSD-3-Clause"
] | null | null | null | scanpy/tools/paga.py | LuckyMD/scanpy | 4b38130cb7a76f284058fb788c8279999389e3c5 | [
"BSD-3-Clause"
] | null | null | null | scanpy/tools/paga.py | LuckyMD/scanpy | 4b38130cb7a76f284058fb788c8279999389e3c5 | [
"BSD-3-Clause"
] | null | null | null | from collections import namedtuple
import numpy as np
import scipy as sp
from scipy.sparse.csgraph import minimum_spanning_tree
from .. import logging as logg
from ..neighbors import Neighbors
from .. import utils
from .. import settings
def paga(
adata,
groups='louvain',
use_rna_velocity=False,
copy=False):
"""\
Generate cellular maps of differentiation manifolds with complex
topologies [Wolf17i]_.
Partition-based graph abstraction (PAGA) quantifies the connectivities of
partitions of a neighborhood graph of single cells, thereby generating a
much simpler abstracted graph whose nodes label the partitions. Together
with a random walk-based distance measure, this generates a partial
coordinatization of data useful for exploring and explaining its variation.
Parameters
----------
adata : :class:`~scanpy.api.AnnData`
Annotated data matrix.
groups : categorical annotation of observations or 'louvain_groups', optional (default: 'louvain_groups')
Criterion to determine the resulting partitions of the single-cell
graph. 'louvain_groups' uses the Louvain algorithm and optimizes
modularity of the graph. You can also pass your predefined groups by
choosing any categorical annotation of observations (`adata.obs`).
use_rna_velocity : `bool` (default: `False`)
Use RNA velocity to orient edges in the abstracted graph and estimate transitions.
copy : `bool`, optional (default: `False`)
Copy `adata` before computation and return a copy. Otherwise, perform
computation inplace and return `None`.
Returns
-------
Returns or updates `adata` depending on `copy` with
connectivities : np.ndarray (adata.uns['connectivities'])
The full adjacency matrix of the abstracted graph, weights
correspond to connectivities.
confidence : np.ndarray (adata.uns['confidence'])
The full adjacency matrix of the abstracted graph, weights
correspond to confidence in the presence of an edge.
confidence_tree : sc.sparse csr matrix (adata.uns['confidence_tree'])
The adjacency matrix of the tree-like subgraph that best explains
the topology.
"""
if 'neighbors' not in adata.uns:
raise ValueError(
'You need to run `pp.neighbors` first to compute a neighborhood graph.')
adata = adata.copy() if copy else adata
utils.sanitize_anndata(adata)
logg.info('running partition-based graph abstraction (PAGA)', reset=True)
paga = PAGA(adata, groups, use_rna_velocity=use_rna_velocity)
paga.compute()
# only add if not present
if 'paga' not in adata.uns:
adata.uns['paga'] = {}
if not use_rna_velocity:
adata.uns['paga']['connectivities'] = paga.connectivities_coarse
adata.uns['paga']['confidence'] = paga.confidence
adata.uns['paga']['confidence_tree'] = paga.confidence_tree
adata.uns[groups + '_sizes'] = np.array(paga.vc.sizes())
else:
adata.uns['paga']['transitions_confidence'] = paga.transitions_confidence
adata.uns['paga']['transitions_ttest'] = paga.transitions_ttest
adata.uns['paga']['groups'] = groups
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
if use_rna_velocity:
logg.hint(
'added\n'
' \'paga/transitions_confidence\', confidence adjacency (adata.uns)\n'
' \'paga/transitions_ttest\', confidence subtree (adata.uns)')
else:
logg.hint(
'added\n'
' \'paga/connectivities\', connectivities adjacency (adata.uns)\n'
' \'paga/confidence\', confidence adjacency (adata.uns)\n'
' \'paga/confidence_tree\', confidence subtree (adata.uns)')
return adata if copy else None
class PAGA(Neighbors):
def __init__(self, adata, groups, use_rna_velocity=False,
tree_based_confidence=False):
super(PAGA, self).__init__(adata)
self._groups = groups
self._tree_based_confidence = tree_based_confidence
self._use_rna_velocity = use_rna_velocity
def compute(self):
if self._use_rna_velocity:
self.compute_transitions_coarse()
else:
self.compute_connectivities_coarse()
self.compute_confidence()
def compute_connectivities_coarse(self):
import igraph
ones = self.connectivities.copy()
# graph where edges carry weight 1
ones.data = np.ones(len(ones.data))
g = utils.get_igraph_from_adjacency(ones)
self.vc = igraph.VertexClustering(
g, membership=self._adata.obs[self._groups].cat.codes.values)
cg = self.vc.cluster_graph(combine_edges='sum')
self.connectivities_coarse = utils.get_sparse_from_igraph(cg, weight_attr='weight')/2
def compute_confidence(self):
"""Translates the connectivities_coarse measure into a confidence measure.
"""
pseudo_distance = self.connectivities_coarse.copy()
pseudo_distance.data = 1./pseudo_distance.data
connectivities_coarse_tree = minimum_spanning_tree(pseudo_distance)
connectivities_coarse_tree.data = 1./connectivities_coarse_tree.data
connectivities_coarse_tree_indices = [
connectivities_coarse_tree[i].nonzero()[1]
for i in range(connectivities_coarse_tree.shape[0])]
# inter- and intra-cluster based confidence
if not self._tree_based_confidence:
total_n = self.n_neighbors * np.array(self.vc.sizes())
maximum = self.connectivities_coarse.max()
confidence = self.connectivities_coarse.copy() # initializing
for i in range(self.connectivities_coarse.shape[0]):
for j in range(i+1, self.connectivities_coarse.shape[1]):
if self.connectivities_coarse[i, j] > 0:
geom_mean = np.sqrt(total_n[i] * total_n[j])
confidence[i, j] = self.connectivities_coarse[i, j] / geom_mean
confidence[j, i] = confidence[i, j]
# tree-based confidence
else:
median_connectivities_coarse_tree = np.median(connectivities_coarse_tree.data)
confidence = self.connectivities_coarse.copy()
confidence.data[self.connectivities_coarse.data >= median_connectivities_coarse_tree] = 1
connectivities_coarse_adjusted = self.connectivities_coarse.copy()
connectivities_coarse_adjusted.data -= median_connectivities_coarse_tree
connectivities_coarse_adjusted.data = np.exp(connectivities_coarse_adjusted.data)
index = self.connectivities_coarse.data < median_connectivities_coarse_tree
confidence.data[index] = connectivities_coarse_adjusted.data[index]
confidence_tree = self.compute_confidence_tree(
confidence, connectivities_coarse_tree_indices)
self.confidence = confidence
self.confidence_tree = confidence_tree
def compute_confidence_tree(
self, confidence, connectivities_coarse_tree_indices):
confidence_tree = sp.sparse.lil_matrix(confidence.shape, dtype=float)
for i, neighbors in enumerate(connectivities_coarse_tree_indices):
if len(neighbors) > 0:
confidence_tree[i, neighbors] = confidence[i, neighbors]
return confidence_tree.tocsr()
def compute_transitions_coarse(self):
# analogous code using networkx
# membership = adata.obs['clusters'].cat.codes.tolist()
# partition = defaultdict(list)
# for n, p in zip(list(range(len(G))), membership):
# partition[p].append(n)
# partition = partition.values()
# g_abstracted = nx.quotient_graph(g, partition, relabel=True)
# for some reason, though, edges aren't oriented in the quotient
# graph...
import igraph
g = utils.get_igraph_from_adjacency(
self._adata.uns['velocyto_transitions'], directed=True)
vc = igraph.VertexClustering(
g, membership=self._adata.obs[self._groups].cat.codes.values)
cg_full = vc.cluster_graph(combine_edges=False)
g_bool = utils.get_igraph_from_adjacency(
self._adata.uns['velocyto_transitions'].astype('bool'), directed=True)
vc_bool = igraph.VertexClustering(
g_bool, membership=self._adata.obs[self._groups].cat.codes.values)
cg_bool = vc_bool.cluster_graph(combine_edges='sum') # collapsed version
transitions_coarse = utils.get_sparse_from_igraph(cg_bool, weight_attr='weight')
# translate this into a confidence measure
# the number of outgoing edges
# total_n = np.zeros(len(vc.sizes()))
# # (this is not the convention of standard stochastic matrices)
# total_outgoing = transitions_coarse.sum(axis=1)
# for i in range(len(total_n)):
# total_n[i] = vc.subgraph(i).ecount()
# total_n[i] += total_outgoing[i, 0]
# use the topology based reference, the velocity one might have very small numbers
total_n = self.n_neighbors * np.array(vc_bool.sizes())
transitions_ttest = transitions_coarse.copy()
transitions_confidence = transitions_coarse.copy()
from scipy.stats import ttest_1samp
for i in range(transitions_coarse.shape[0]):
# no symmetry in transitions_coarse, hence we should not restrict to
# upper triangle
neighbors = transitions_coarse[i].nonzero()[1]
for j in neighbors:
forward = cg_full.es.select(_source=i, _target=j)['weight']
backward = cg_full.es.select(_source=j, _target=i)['weight']
# backward direction: add minus sign
values = np.array(list(forward) + list(-np.array(backward)))
# require some minimal number of observations
if len(values) < 5:
transitions_ttest[i, j] = 0
transitions_ttest[j, i] = 0
transitions_confidence[i, j] = 0
transitions_confidence[j, i] = 0
continue
t, prob = ttest_1samp(values, 0.0)
if t > 0:
# number of outgoing edges greater than number of ingoing edges
# i.e., transition from i to j
transitions_ttest[i, j] = -np.log10(max(prob, 1e-10))
transitions_ttest[j, i] = 0
else:
transitions_ttest[j, i] = -np.log10(max(prob, 1e-10))
transitions_ttest[i, j] = 0
# geom_mean
geom_mean = np.sqrt(total_n[i] * total_n[j])
diff = (len(forward) - len(backward)) / geom_mean
if diff > 0:
transitions_confidence[i, j] = diff
transitions_confidence[j, i] = 0
else:
transitions_confidence[j, i] = -diff
transitions_confidence[i, j] = 0
transitions_ttest.eliminate_zeros()
transitions_confidence.eliminate_zeros()
# transpose in order to match convention of stochastic matrices
# entry ij means transition from j to i
self.transitions_ttest = transitions_ttest.T
self.transitions_confidence = transitions_confidence.T
def paga_degrees(adata):
"""Compute the degree of each node in the abstracted graph.
Parameters
----------
adata : AnnData
Annotated data matrix.
Returns
-------
degrees : list
List of degrees for each node.
"""
import networkx as nx
g = nx.Graph(adata.uns['paga']['confidence'])
degrees = [d for _, d in g.degree(weight='weight')]
return degrees
def paga_expression_entropies(adata):
"""Compute the median expression entropy for each node-group.
Parameters
----------
adata : AnnData
Annotated data matrix.
Returns
-------
entropies : list
Entropies of median expressions for each node.
"""
from scipy.stats import entropy
groups_order, groups_masks = utils.select_groups(
adata, key=adata.uns['paga']['groups'])
entropies = []
for mask in groups_masks:
X_mask = adata.X[mask]
x_median = np.median(X_mask, axis=0)
x_probs = (x_median - np.min(x_median)) / (np.max(x_median) - np.min(x_median))
entropies.append(entropy(x_probs))
return entropies
def paga_compare_paths(adata1, adata2,
adjacency_key='confidence', adjacency_key2=None):
"""Compare paths in abstracted graphs in two datasets.
Compute the fraction of consistent paths between leafs, a measure for the
topological similarity between graphs.
By increasing the verbosity to level 4 and 5, the paths that do not agree
and the paths that agree are written to the output, respectively.
The PAGA "groups key" needs to be the same in both objects.
Parameters
----------
adata1, adata2 : AnnData
Annotated data matrices to compare.
adjacency_key : str
Key for indexing the adjacency matrices in `.uns['paga']` to be used in
adata1 and adata2.
adjacency_key2 : str, None
If provided, used for adata2.
Returns
-------
OrderedTuple with attributes ``n_steps`` (total number of steps in paths)
and ``frac_steps`` (fraction of consistent steps), ``n_paths`` and
``frac_paths``.
"""
import networkx as nx
g1 = nx.Graph(adata1.uns['paga'][adjacency_key])
g2 = nx.Graph(adata2.uns['paga'][adjacency_key2 if adjacency_key2 is not None else adjacency_key])
leaf_nodes1 = [str(x) for x in g1.nodes() if g1.degree(x) == 1]
logg.msg('leaf nodes in graph 1: {}'.format(leaf_nodes1), v=5, no_indent=True)
paga_groups = adata1.uns['paga']['groups']
asso_groups1 = utils.identify_groups(adata1.obs[paga_groups].values,
adata2.obs[paga_groups].values)
asso_groups2 = utils.identify_groups(adata2.obs[paga_groups].values,
adata1.obs[paga_groups].values)
orig_names1 = adata1.obs[paga_groups].cat.categories
orig_names2 = adata2.obs[paga_groups].cat.categories
import itertools
n_steps = 0
n_agreeing_steps = 0
n_paths = 0
n_agreeing_paths = 0
# loop over all pairs of leaf nodes in the reference adata1
for (r, s) in itertools.combinations(leaf_nodes1, r=2):
r2, s2 = asso_groups1[r][0], asso_groups1[s][0]
orig_names = [orig_names1[int(i)] for i in [r, s]]
orig_names += [orig_names2[int(i)] for i in [r2, s2]]
logg.msg('compare shortest paths between leafs ({}, {}) in graph1 and ({}, {}) in graph2:'
.format(*orig_names), v=4, no_indent=True)
no_path1 = False
try:
path1 = [str(x) for x in nx.shortest_path(g1, int(r), int(s))]
except nx.NetworkXNoPath:
no_path1 = True
no_path2 = False
try:
path2 = [str(x) for x in nx.shortest_path(g2, int(r2), int(s2))]
except nx.NetworkXNoPath:
no_path2 = True
if no_path1 and no_path2:
# consistent behavior
n_paths += 1
n_agreeing_paths += 1
n_steps += 1
n_agreeing_steps += 1
logg.msg('there are no connecting paths in both graphs', v=5, no_indent=True)
continue
elif no_path1 or no_path2:
# non-consistent result
n_paths += 1
n_steps += 1
continue
if len(path1) >= len(path2):
path_mapped = [asso_groups1[l] for l in path1]
path_compare = path2
path_compare_id = 2
path_compare_orig_names = [[orig_names2[int(s)] for s in l] for l in path_compare]
path_mapped_orig_names = [[orig_names2[int(s)] for s in l] for l in path_mapped]
else:
path_mapped = [asso_groups2[l] for l in path2]
path_compare = path1
path_compare_id = 1
path_compare_orig_names = [[orig_names1[int(s)] for s in l] for l in path_compare]
path_mapped_orig_names = [[orig_names1[int(s)] for s in l] for l in path_mapped]
n_agreeing_steps_path = 0
ip_progress = 0
for il, l in enumerate(path_compare[:-1]):
for ip, p in enumerate(path_mapped):
if ip >= ip_progress and l in p:
# check whether we can find the step forward of path_compare in path_mapped
if (ip + 1 < len(path_mapped)
and
path_compare[il + 1] in path_mapped[ip + 1]):
# make sure that a step backward leads us to the same value of l
# in case we "jumped"
logg.msg('found matching step ({} -> {}) at position {} in path{} and position {} in path_mapped'
.format(l, path_compare_orig_names[il + 1], il, path_compare_id, ip), v=6)
consistent_history = True
for iip in range(ip, ip_progress, -1):
if l not in path_mapped[iip - 1]:
consistent_history = False
if consistent_history:
# here, we take one step further back (ip_progress - 1); it's implied that this
# was ok in the previous step
logg.msg(' step(s) backward to position(s) {} in path_mapped are fine, too: valid step'
.format(list(range(ip - 1, ip_progress - 2, -1))), v=6)
n_agreeing_steps_path += 1
ip_progress = ip + 1
break
n_steps_path = len(path_compare) - 1
n_agreeing_steps += n_agreeing_steps_path
n_steps += n_steps_path
n_paths += 1
if n_agreeing_steps_path == n_steps_path: n_agreeing_paths += 1
# only for the output, use original names
path1_orig_names = [orig_names1[int(s)] for s in path1]
path2_orig_names = [orig_names2[int(s)] for s in path2]
logg.msg(' path1 = {},\n'
'path_mapped = {},\n'
' path2 = {},\n'
'-> n_agreeing_steps = {} / n_steps = {}.'
.format(path1_orig_names,
[list(p) for p in path_mapped_orig_names],
path2_orig_names,
n_agreeing_steps_path, n_steps_path), v=5, no_indent=True)
Result = namedtuple('paga_compare_paths_result',
['frac_steps', 'n_steps', 'frac_paths', 'n_paths'])
return Result(frac_steps=n_agreeing_steps/n_steps if n_steps > 0 else np.nan,
n_steps=n_steps if n_steps > 0 else np.nan,
frac_paths=n_agreeing_paths/n_paths if n_steps > 0 else np.nan,
n_paths=n_paths if n_steps > 0 else np.nan)
| 46.145585 | 121 | 0.617947 | 7,668 | 0.396587 | 0 | 0 | 0 | 0 | 0 | 0 | 6,435 | 0.332816 |
d802143b8c4b9b5183911d466a1c2053a64055aa | 650 | py | Python | sporting_webapp/nfl_package/migrations/0002_auto_20190807_1445.py | plopez9/chipy_sports_app_2.0 | 39b337238d0a55bfe842cb60ea9fe4724426fbf0 | [
"MIT"
] | null | null | null | sporting_webapp/nfl_package/migrations/0002_auto_20190807_1445.py | plopez9/chipy_sports_app_2.0 | 39b337238d0a55bfe842cb60ea9fe4724426fbf0 | [
"MIT"
] | null | null | null | sporting_webapp/nfl_package/migrations/0002_auto_20190807_1445.py | plopez9/chipy_sports_app_2.0 | 39b337238d0a55bfe842cb60ea9fe4724426fbf0 | [
"MIT"
] | 1 | 2019-10-08T17:39:08.000Z | 2019-10-08T17:39:08.000Z | # Generated by Django 2.1.7 on 2019-08-07 19:45
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nfl_package', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='nflplayersummary',
old_name='player',
new_name='Name',
),
migrations.RenameField(
model_name='nflplayersummary',
old_name='pos',
new_name='Pos',
),
migrations.RenameField(
model_name='nflplayersummary',
old_name='year',
new_name='Year',
),
]
| 22.413793 | 47 | 0.543077 | 565 | 0.869231 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.252308 |
d802630c9349918a8c83b164182db24aea187fcd | 982 | py | Python | tests/test_player_PlayerList.py | basbloemsaat/dartsense | 3114a3b73861baf9cf0019a9a2454d7f38e67af1 | [
"MIT"
] | null | null | null | tests/test_player_PlayerList.py | basbloemsaat/dartsense | 3114a3b73861baf9cf0019a9a2454d7f38e67af1 | [
"MIT"
] | 5 | 2018-03-16T09:59:05.000Z | 2019-02-10T21:55:03.000Z | tests/test_player_PlayerList.py | basbloemsaat/dartsense | 3114a3b73861baf9cf0019a9a2454d7f38e67af1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os
import pytest
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../lib"))
import dartsense.player
player_list = None
def test_player_list_init(setup_db):
player_list = dartsense.player.PlayerList()
assert isinstance(player_list, dartsense.player.PlayerList)
assert len(player_list) == 5
for player in player_list:
assert isinstance(player, dartsense.player.Player)
def test_player_list_filter(setup_db):
player_list = dartsense.player.PlayerList(
filters={'competition': pytest.setup_vars['testleague1_id']}
)
assert len(player_list) == 4
def test_player_list_search(setup_db):
player_list = dartsense.player.PlayerList(
search='player 3'
)
assert len(player_list) == 1
player_list = dartsense.player.PlayerList(
filters={'competition': pytest.setup_vars['testleague2_id']},
search='player 3'
)
assert len(player_list) == 1
| 22.318182 | 69 | 0.707739 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.10998 |
d802a8d11f27acf3f75ca9383d9b728785c704c9 | 937 | py | Python | tests/test_edit_first_contact.py | ksemish/KseniyaRepository | 80f476c12c5d5412eed31f243fe4de84982eee46 | [
"Apache-2.0"
] | null | null | null | tests/test_edit_first_contact.py | ksemish/KseniyaRepository | 80f476c12c5d5412eed31f243fe4de84982eee46 | [
"Apache-2.0"
] | null | null | null | tests/test_edit_first_contact.py | ksemish/KseniyaRepository | 80f476c12c5d5412eed31f243fe4de84982eee46 | [
"Apache-2.0"
] | null | null | null | from models.contact import Contacts
import random
def test_edit_some_contact(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contacts.create_contact(Contacts(lastname="LastNameUser", firstname="User Modify"))
old_contacts = db.get_contact_list()
randomcontact = random.choice(old_contacts)
index = old_contacts.index(randomcontact)
contact = Contacts(id=randomcontact.id, lastname="Lastname", firstname="ModifyFirstname")
app.contacts.test_edit_contact_by_id(randomcontact.id, contact)
new_contacts = db.get_contact_list()
assert len(old_contacts) == len(new_contacts)
old_contacts[index] = contact
# assert sorted(old_contacts, key=Contacts.contact_id_or_max) == sorted(new_contacts, key=Contacts.contact_id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contacts.contact_id_or_max) == sorted(app.contacts.get_contact_list(), key=Contacts.contact_id_or_max) | 55.117647 | 142 | 0.766275 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 171 | 0.182497 |
d802ce41072a86678bbadc7fc6f1d1a6a8d547c5 | 1,875 | py | Python | examples/pcd8544_pillow_demo.py | sommersoft/Adafruit_CircuitPython_PCD8544 | 2ef409f9454461494a997d994c28d62735c5da88 | [
"MIT"
] | null | null | null | examples/pcd8544_pillow_demo.py | sommersoft/Adafruit_CircuitPython_PCD8544 | 2ef409f9454461494a997d994c28d62735c5da88 | [
"MIT"
] | null | null | null | examples/pcd8544_pillow_demo.py | sommersoft/Adafruit_CircuitPython_PCD8544 | 2ef409f9454461494a997d994c28d62735c5da88 | [
"MIT"
] | null | null | null | """
This demo will fill the screen with white, draw a black box on top
and then print Hello World! in the center of the display
This example is for use on (Linux) computers that are using CPython with
Adafruit Blinka to support CircuitPython libraries. CircuitPython does
not support PIL/pillow (python imaging library)!
"""
import board
import busio
import digitalio
from PIL import Image, ImageDraw, ImageFont
import adafruit_pcd8544
# Parameters to Change
BORDER = 5
FONTSIZE = 10
spi = busio.SPI(board.SCK, MOSI=board.MOSI)
dc = digitalio.DigitalInOut(board.D6) # data/command
cs = digitalio.DigitalInOut(board.CE0) # Chip select
reset = digitalio.DigitalInOut(board.D5) # reset
display = adafruit_pcd8544.PCD8544(spi, dc, cs, reset)
# Contrast and Brightness Settings
display.bias = 4
display.contrast = 60
# Turn on the Backlight LED
backlight = digitalio.DigitalInOut(board.D13) # backlight
backlight.switch_to_output()
backlight.value = True
# Clear display.
display.fill(0)
display.show()
# Create blank image for drawing.
# Make sure to create image with mode '1' for 1-bit color.
image = Image.new("1", (display.width, display.height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black background
draw.rectangle((0, 0, display.width, display.height), outline=255, fill=255)
# Draw a smaller inner rectangle
draw.rectangle(
(BORDER, BORDER, display.width - BORDER - 1, display.height - BORDER - 1),
outline=0,
fill=0,
)
# Load a TTF font.
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", FONTSIZE)
# Draw Some Text
text = "Hello World!"
(font_width, font_height) = font.getsize(text)
draw.text(
(display.width // 2 - font_width // 2, display.height // 2 - font_height // 2),
text,
font=font,
fill=255,
)
# Display image
display.image(image)
display.show()
| 25.337838 | 86 | 0.737067 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 770 | 0.410667 |
d80324dc01f020d815e622c10604165f52290b3e | 75 | py | Python | dummypy/packageA/__init__.py | brett-hosking/dummypy | 07931382d55ffb9875fb0edab01ff5b33a6fa8a3 | [
"MIT"
] | null | null | null | dummypy/packageA/__init__.py | brett-hosking/dummypy | 07931382d55ffb9875fb0edab01ff5b33a6fa8a3 | [
"MIT"
] | null | null | null | dummypy/packageA/__init__.py | brett-hosking/dummypy | 07931382d55ffb9875fb0edab01ff5b33a6fa8a3 | [
"MIT"
] | null | null | null | # Nested package modules
from . import Atest
__packageAname__ = 'packageA' | 18.75 | 29 | 0.786667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.453333 |
d803412163b0b8022ac01ffa7da41a8470d148ab | 1,205 | py | Python | src/waldur_core/media/tests/test_utils.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 26 | 2017-10-18T13:49:58.000Z | 2021-09-19T04:44:09.000Z | src/waldur_core/media/tests/test_utils.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 14 | 2018-12-10T14:14:51.000Z | 2021-06-07T10:33:39.000Z | src/waldur_core/media/tests/test_utils.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 32 | 2017-09-24T03:10:45.000Z | 2021-10-16T16:41:09.000Z | from freezegun import freeze_time
from rest_framework import test
from rest_framework.exceptions import ValidationError
from waldur_core.core.tests.helpers import override_waldur_core_settings
from waldur_core.media.utils import decode_attachment_token, encode_attachment_token
from waldur_core.structure.tests.factories import CustomerFactory, UserFactory
@override_waldur_core_settings(TIME_ZONE='Asia/Muscat')
class TestMediaUtils(test.APITransactionTestCase):
def setUp(self):
self.user = UserFactory()
self.customer = CustomerFactory()
def test_token_encoder(self):
token = encode_attachment_token(self.user.uuid.hex, self.customer, 'image')
user_uuid, content_type, object_id, field = decode_attachment_token(token)
self.assertEqual(self.user.uuid.hex, user_uuid)
self.assertEqual(field, 'image')
self.assertEqual(object_id, self.customer.uuid.hex)
def test_expired_token(self):
with freeze_time('2019-01-01'):
token = encode_attachment_token(self.user.uuid.hex, self.customer, 'image')
with freeze_time('2019-01-02'):
self.assertRaises(ValidationError, decode_attachment_token, token)
| 43.035714 | 87 | 0.760166 | 788 | 0.653942 | 0 | 0 | 844 | 0.700415 | 0 | 0 | 58 | 0.048133 |
d803e1c9556b3090e13cb8ecebad7e5377228653 | 19,620 | py | Python | main.py | Salonee-Jain/Eden | 5283118f75df433b40c649b0dabcd45b84d7ed70 | [
"MIT"
] | 9 | 2021-09-11T16:04:43.000Z | 2022-02-19T06:30:07.000Z | main.py | Salonee-Jain/Eden | 5283118f75df433b40c649b0dabcd45b84d7ed70 | [
"MIT"
] | null | null | null | main.py | Salonee-Jain/Eden | 5283118f75df433b40c649b0dabcd45b84d7ed70 | [
"MIT"
] | 3 | 2021-09-04T19:30:30.000Z | 2022-01-07T17:25:08.000Z | from pygame import mixer
import speech_recognition as sr
import pyttsx3
import pyjokes
import boto3
import pyglet
import winsound
import datetime
import pywhatkit
import datetime
import time
import os
from PIL import Image
import random
import wikipedia
import smtplib, ssl
from mutagen.mp3 import MP3
import requests, json
from bs4 import BeautifulSoup
import geocoder
from geopy.geocoders import Nominatim
import webbrowser
import pymongo
from getmac import get_mac_address as gma
import cv2
import face_recognition
import numpy as np
import smtplib
import datetime
import re, requests, subprocess, urllib.parse, urllib.request
r = sr.Recognizer()
task={}
filename1=[]
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
client = pymongo.MongoClient("mongodb+srv://karan:123@cluster0.gfuxd.mongodb.net/myFirstDatabase?retryWrites=true&w=majority")
database = client["LocationDatabase"]
table = database["Location"]
def location():
g = geocoder.ip('me')
Latitude = str(g.latlng[0])
Longitude = str(g.latlng[1])
geolocator = Nominatim(user_agent="geoapiExercises")
location = geolocator.reverse(Latitude+","+Longitude)
mydict={"_id":''.join(i for i in gma() if not i.isdigit()).replace(":",""),"location":str(location)}
try:
x = table.insert_one(mydict)
except:
myquery = { "_id": gma() }
newvalues = { "$set": { "location": str(location) } }
table.update_one(myquery, newvalues)
def weather(city):
city = city.replace(" ", "+")
res = requests.get(
f'https://www.google.com/search?q={city}&oq={city}&aqs=chrome.0.35i39l2j0l4j46j69i60.6128j1j7&sourceid=chrome&ie=UTF-8', headers=headers)
soup = BeautifulSoup(res.text, 'html.parser')
location = soup.select('#wob_loc')[0].getText().strip()
info = soup.select('#wob_dc')[0].getText().strip()
weather = soup.select('#wob_tm')[0].getText().strip()
greetings=["Hello","Hey","Hi","Greetings","Namaste"]
timewait=speak(greetings[random.randint(0,4)])
time.sleep(timewait)
timewait=speak("In "+location)
time.sleep(timewait)
d = datetime.datetime.strptime(str(datetime.datetime.now().strftime("%H:%M")), "%H:%M")
timewait=speak("It is "+str(d.strftime("%I")))
time.sleep(timewait-0.1)
timewait=speak(str(d.strftime("%M")),"op")
time.sleep(timewait)
if(datetime.datetime.now().hour>12):
timewait=speak("P M","ui")
time.sleep(timewait-0.1)
else:
timewait=speak("A M")
time.sleep(timewait)
timewait=speak("The Temperature is "+weather+"Degree Celcius","oi")
time.sleep(timewait)
def weather_main():
g = geocoder.ip('me')
Latitude = str(g.latlng[0])
Longitude = str(g.latlng[1])
geolocator = Nominatim(user_agent="geoapiExercises")
location = geolocator.reverse(Latitude+","+Longitude)
address = location.raw['address']
city_name=address['city']
api_key = "Your_OWN_KEY"
base_url = "http://api.openweathermap.org/data/2.5/weather?"
complete_url = base_url + "appid=" + api_key + "&q=" + city_name
response = requests.get(complete_url)
x = response.json()
city_name = city_name+" weather"
weather(city_name)
if x["cod"] != "404":
y = x["main"]
current_temperature = y["temp"]
current_humidity = y["humidity"]
z = x["weather"]
weather_description = z[0]["description"]
timewait=speak("Humidity is " +str(current_humidity) + "percentage","op")
time.sleep(timewait)
timewait=speak("It's "+str(weather_description)+" Today","ooP")
time.sleep(timewait)
if(("thunderstorm" in str(weather_description)) or ("rain" in str(weather_description)) or ("shower" in str(weather_description))):
timewait=speak("You Might Need An Umbrella!")
time.sleep(timewait)
elif(("clear" in str(weather_description)) or ("sunny" in str(weather_description))):
timewait=speak("We Have A Clear Sky!")
time.sleep(timewait)
elif("cloudy" in str(weather_description)):
timewait=speak("The Sky Might Be Cloudy!")
time.sleep(timewait)
timewait=speak("Have a Nice Day")
time.sleep(timewait+1)
name="User"
def speak(text,tp="1",voice="Salli"):
response = polly_client.synthesize_speech(VoiceId=voice,
OutputFormat='mp3',
Text=text)
date_string = datetime.datetime.now().strftime("%d%m%Y%H%M%S")
file = open('speech'+date_string+tp+'.mp3', 'wb')
file.write(response['AudioStream'].read())
file.close()
filename1.append('speech'+date_string+tp+'.mp3')
if(len(filename1)>10):
for i in range(0,5):
os.remove(filename1[i])
filename1.pop(i)
audio = MP3('speech'+date_string+tp+'.mp3')
mixer.init()
mixer.music.load('speech'+date_string+tp+'.mp3')
mixer.music.play()
return audio.info.length
polly_client = boto3.Session(
aws_access_key_id="Your_OWN_KEY",
aws_secret_access_key="Your_OWN_KEY",
region_name='us-west-2').client('polly')
try:
for file in os.listdir("./"):
filename = os.fsdecode(file)
if filename.endswith(".jpg"):
imgloaded=face_recognition.load_image_file(filename)
imgloaded=cv2.cvtColor(imgloaded,cv2.COLOR_BGR2RGB)
camera = cv2.VideoCapture(0)
return_value, image = camera.read()
cv2.imwrite(os.path.join('./' , 'testimage.jpg'), image)
imgtest=face_recognition.load_image_file('./testimage.jpg')
imgtest=cv2.cvtColor(imgtest,cv2.COLOR_BGR2RGB)
faceloc=face_recognition.face_locations(imgloaded)[0]
encodeloaded=face_recognition.face_encodings(imgloaded)[0]
cv2.rectangle(imgloaded,(faceloc[3],faceloc[0]),(faceloc[1],faceloc[2]),(255,0,255),2)
faceloctest=face_recognition.face_locations(imgtest)[0]
encodetest=face_recognition.face_encodings(imgtest)[0]
cv2.rectangle(imgtest,(faceloc[3],faceloc[0]),(faceloc[1],faceloc[2]),(255,0,255),2)
results=face_recognition.compare_faces([encodeloaded],encodetest)
if(results[0]):
name=filename.replace(".jpg","")
break
except:
timewait=speak("What's Your Name? ")
time.sleep(timewait)
print("Listening")
with sr.Microphone() as source2:
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
name = r.recognize_google(audio2)
camera = cv2.VideoCapture(0)
return_value, image = camera.read()
date_string = datetime.datetime.now().strftime("%d%m%Y%H%M%S")
cv2.imwrite(os.path.join('./' , name+'.jpg'), image)
onlyonce=0
while(1):
try:
d = datetime.datetime.strptime(str(datetime.datetime.now().strftime("%H:%M")), "%H:%M")
if(str(d.strftime("%M"))=='14' and onlyonce==0):
onlyonce+=1
location()
if(onlyonce>0):
if(str(d.strftime("%M"))!='14'):
onlyonce=0
with sr.Microphone() as source2:
print("Listening")
r.adjust_for_ambient_noise(source2, duration=1)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
MyText = MyText.lower()
print(MyText.title())
if("joke" in MyText):
My_joke = pyjokes.get_joke(language="en", category="all")
print(My_joke)
time1 = speak(My_joke,"joke")
time.sleep(int(time1))
elif(("hello" in MyText) or ("update" in MyText) or ("hi" in MyText) or ("hey" in MyText)):
speak(name,"iu")
time.sleep(1)
weather_main()
elif("time" in MyText):
speak(name,"iu")
time.sleep(1)
speak("The Time Is","O")
time.sleep(0.7)
speak(str(datetime.datetime.strptime(str(datetime.datetime.now().strftime("%H:%M")), "%H:%M").strftime("%I")))
time.sleep(0.5)
speak(str(datetime.datetime.strptime(str(datetime.datetime.now().strftime("%H:%M")), "%H:%M").strftime("%M")),"o")
time.sleep(0.8)
if(datetime.datetime.now().hour>12):
timewait=speak("P M","ui")
time.sleep(timewait-0.1)
else:
timewait=speak("A M")
time.sleep(timewait)
elif("date" in MyText):
speak(name,"iu")
time.sleep(1)
x = datetime.datetime.now()
speak("It's "+str(x.strftime("%A")))
time.sleep(0.85)
speak(str(x.strftime("%d")).replace("0",""),"i")
time.sleep(0.8)
speak(x.strftime("%B"),"P")
time.sleep(0.8)
speak(str(x.year),"OP")
time.sleep(0.8)
elif("mail" in MyText):
port = 587
smtp_server = "smtp.gmail.com"
sender_email = "techtrends288@gmail.com"
speak("What's The Receiver's Mail I D")
receiver_email = input("Receiver's Mail ID:")
password = input("Receiver's Your Password: ")
speak("What's The Subject?")
time.sleep(2)
print("Speak Now")
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
SUBJECT = r.recognize_google(audio2)
speak("What Should The Message Say")
time.sleep(2)
print("Speak Now")
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
message = r.recognize_google(audio2)
context = ssl.create_default_context()
with smtplib.SMTP(smtp_server, port) as server:
server.ehlo()
server.starttls(context=context)
server.ehlo()
server.login(sender_email, password)
message = 'Subject: {}\n\n{}'.format(SUBJECT, message)
server.sendmail(sender_email, receiver_email, message)
speak("Message On Its Way!")
print("Message Sent!")
elif("whatsapp" and "message" in MyText):
if("to" in MyText):
split_sentence = MyText.split(' ')
name=split_sentence[-1]
speak("What's "+name+"'s Phone Number? ")
else:
speak("What's Their Phone Number?")
time.sleep(2)
print("Speak Now")
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
number = MyText.lower().replace(" ", "")
speak("What's The Message? ")
time.sleep(2)
print("Speak Now")
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
msg = MyText.lower()
try:
pywhatkit.sendwhatmsg("+91"+number,msg,datetime.datetime.now().hour,datetime.datetime.now().minute+1)
except:
pywhatkit.sendwhatmsg("+91"+number,msg,datetime.datetime.now().hour,datetime.datetime.now().minute+2)
speak("Message On Its Way!")
print("Message Sent!")
elif("random" and "number" in MyText):
speak(name,"iu")
time.sleep(1)
if("from" and "to" in MyText):
split_sentence = MyText.split(' ')
fromIndex=split_sentence.index('from')
toIndex=split_sentence.index('to')
speak("Here's Your Random Number "+str(random.randint(int(split_sentence[int(fromIndex)+1]),int(split_sentence[int(toIndex)+1]))))
else:
speak("Here's Your Random Number "+str(random.randint(0,100)))
time.sleep(3)
elif(("note" in MyText) or( "write" in MyText) or( "homework" in MyText)):
speak("What's The Content? ")
time.sleep(2)
print("Speak Now")
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
msg = MyText.lower()
pywhatkit.text_to_handwriting(msg)
img_path = "pywhatkit.png"
image1 = Image.open(r'pywhatkit.png')
im1 = image1.convert('RGB')
im1.save(r'HandWritten.pdf')
speak("Your HomeWork Is Generated As Handwritten dot p n g")
time.sleep(3)
elif(("do" in MyText) or( "what" in MyText) or ("where" in MyText) or ("who" in MyText)):
split_sentence = MyText.split(' ')
if((split_sentence[-2]!="know") or (split_sentence[-2]!="is") or (split_sentence[-2]!="are") or (split_sentence[-2]!="an") or (split_sentence[-2]!="a") or (split_sentence[-2]!="the")):
print(wikipedia.summary(split_sentence[-2]+" "+split_sentence[-1],sentences=2))
time1=speak(wikipedia.summary(split_sentence[-2]+" "+split_sentence[-1],sentences=2))
else:
print(wikipedia.summary(split_sentence[-1],sentences=2))
time1=speak(wikipedia.summary(split_sentence[-1],sentences=2))
time.sleep(time1)
elif(("create" in MyText) and ("list" in MyText)):
speak(name,"iu")
time.sleep(1)
split_sentence = MyText.split(' ')
dict["new key"]=[]
task[split_sentence[split_sentence.index("list")-1]]=[]
nameoflist=split_sentence[split_sentence.index("list")-1]
speak("What Items Do You Want Me To Add?")
time.sleep(2)
speak("Please! Add One Item At a time!","p")
time.sleep(4)
while ("end" not in MyText):
print("Say Task")
time.sleep(1)
r.adjust_for_ambient_noise(source2, duration=0.1)
audio2 = r.listen(source2)
MyText = r.recognize_google(audio2)
if("end" in MyText):
speak("List Updated")
else:
task[nameoflist].append(MyText)
speak("Next Item?")
time.sleep(2)
print(task)
elif(("show" in MyText) and ("list" in MyText)):
speak(name,"iu")
time.sleep(1)
if(task=={}):
speak("You Currently Have No Items In The List")
else:
speak("You Have"+str(len(task))+" Items In List")
time.sleep(2)
for key in task:
speak("In "+key+" You Have","o")
time.sleep(2)
for keys in task[key]:
speak(keys,"oo")
time.sleep(1)
elif("weather" in MyText):
speak(name,"iu")
time.sleep(1)
weather_main()
elif(("open" in MyText)):
split_sentence = MyText.split(' ')
url=""
for i in split_sentence:
if(i=="open"):
continue
url+=i
webbrowser.open_new(url)
elif("search" in MyText):
split_sentence = MyText.split(' ')
url=""
for i in split_sentence:
if(i=="search"):
continue
url+=i+"+"
webbrowser.open("https://www.google.com/search?q={query}".format(query=url))
webbrowser.open("https://www.youtube.com/results?search_query={query}".format(query=url))
elif(("siri" in MyText) or ("siri" in MyText) or ("siri" in MyText)):
comment=["She Seems Clever!","Full Respect, Being An Assistant Is Hardwork","I Know Her, She Is Amazing","You Know Her? That's Great!"]
timewait=speak(comment[random.randint(0,3)])
time.sleep(timewait)
elif("id" in MyText):
speak(name,"iu")
time.sleep(1)
timewait=speak("Please Note Down Your ID ")
time.sleep(timewait)
time.sleep(0.5)
timewait=speak(''.join(i for i in gma() if not i.isdigit()).replace(":",""),"io")
print(''.join(i for i in gma() if not i.isdigit()).replace(":",""))
time.sleep(timewait)
elif("location" in MyText):
MyText=MyText.lower()
split_sentence = MyText.split(' ')
idd=''.join([str(elem) for elem in split_sentence[split_sentence.index("of")+1:]]).lower()
for x in table.find({"_id":idd},{ "_id": 0, "location": 1}):
timewait=speak("Last Updated Location Is "+x["location"])
time.sleep(timewait)
elif(("youtube" in MyText)):
split_sentence = MyText.split(' ')
url=""
for i in split_sentence:
if(i=="youtube"):
continue
url+=i+"+"
webbrowser.open("https://www.youtube.com/results?search_query={query}".format(query=url))
elif("play" in MyText):
split_sentence = MyText.split(' ')
url=""
for i in split_sentence:
if(i=="play"):
continue
url+=i+" "
music_name = url
query_string = urllib.parse.urlencode({"search_query": music_name})
formatUrl = urllib.request.urlopen("https://www.youtube.com/results?" + query_string)
search_results = re.findall(r"watch\?v=(\S{11})", formatUrl.read().decode())
clip = requests.get("https://www.youtube.com/watch?v=" + "{}".format(search_results[0]))
clip2 = "https://www.youtube.com/watch?v=" + "{}".format(search_results[0])
#os.system("start \"\" {url}".format(url=clip2))
webbrowser.open(clip2)
except sr.RequestError as e:
print("Could not request results; {0}".format(e))
except sr.UnknownValueError:
print("Could You Repeat That?")
| 40.287474 | 200 | 0.528746 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,250 | 0.165647 |
d80792f38036130fad8a47258da22e135acbe1f7 | 2,993 | py | Python | tests/test_renderers/test_fixtures_docutils.py | ExecutableBookProject/myst_parser | 4bf38aca204b9643ca5dc84b30bdcad209519428 | [
"MIT"
] | 9 | 2020-03-10T15:52:20.000Z | 2020-04-15T20:55:26.000Z | tests/test_renderers/test_fixtures_docutils.py | ExecutableBookProject/myst_parser | 4bf38aca204b9643ca5dc84b30bdcad209519428 | [
"MIT"
] | 58 | 2020-02-13T06:29:43.000Z | 2020-02-23T18:59:54.000Z | tests/test_renderers/test_fixtures_docutils.py | ExecutableBookProject/MyST-Parser | 75ef9cb7b65c98d969724fc6c096e8d6209c5ea0 | [
"MIT"
] | 6 | 2020-02-28T02:58:17.000Z | 2020-04-22T22:26:56.000Z | """Test fixture files, using the ``DocutilsRenderer``.
Note, the output AST is before any transforms are applied.
"""
import shlex
from io import StringIO
from pathlib import Path
import pytest
from docutils.core import Publisher, publish_doctree
from myst_parser.parsers.docutils_ import Parser
FIXTURE_PATH = Path(__file__).parent.joinpath("fixtures")
@pytest.mark.param_file(FIXTURE_PATH / "docutil_syntax_elements.md")
def test_syntax_elements(file_params, monkeypatch):
"""Test conversion of Markdown to docutils AST (before transforms are applied)."""
def _apply_transforms(self):
pass
monkeypatch.setattr(Publisher, "apply_transforms", _apply_transforms)
doctree = publish_doctree(
file_params.content,
source_path="notset",
parser=Parser(),
settings_overrides={"myst_highlight_code_blocks": False},
)
# in docutils 0.18 footnote ids have changed
outcome = doctree.pformat().replace('"footnote-reference-1"', '"id1"')
file_params.assert_expected(outcome, rstrip_lines=True)
@pytest.mark.param_file(FIXTURE_PATH / "docutil_roles.md")
def test_docutils_roles(file_params, monkeypatch):
"""Test conversion of Markdown to docutils AST (before transforms are applied)."""
def _apply_transforms(self):
pass
monkeypatch.setattr(Publisher, "apply_transforms", _apply_transforms)
doctree = publish_doctree(
file_params.content,
source_path="notset",
parser=Parser(),
)
file_params.assert_expected(doctree.pformat(), rstrip_lines=True)
@pytest.mark.param_file(FIXTURE_PATH / "docutil_directives.md")
def test_docutils_directives(file_params, monkeypatch):
"""Test output of docutils directives."""
if "SKIP" in file_params.description: # line-block directive not yet supported
pytest.skip(file_params.description)
def _apply_transforms(self):
pass
monkeypatch.setattr(Publisher, "apply_transforms", _apply_transforms)
doctree = publish_doctree(
file_params.content,
source_path="notset",
parser=Parser(),
)
file_params.assert_expected(doctree.pformat(), rstrip_lines=True)
@pytest.mark.param_file(FIXTURE_PATH / "docutil_syntax_extensions.txt")
def test_syntax_extensions(file_params):
"""The description is parsed as a docutils commandline"""
pub = Publisher(parser=Parser())
option_parser = pub.setup_option_parser()
try:
settings = option_parser.parse_args(
shlex.split(file_params.description)
).__dict__
except Exception as err:
raise AssertionError(
f"Failed to parse commandline: {file_params.description}\n{err}"
)
report_stream = StringIO()
settings["warning_stream"] = report_stream
doctree = publish_doctree(
file_params.content,
parser=Parser(),
settings_overrides=settings,
)
file_params.assert_expected(doctree.pformat(), rstrip_lines=True)
| 30.85567 | 86 | 0.718009 | 0 | 0 | 0 | 0 | 2,623 | 0.876378 | 0 | 0 | 797 | 0.266288 |
d8087c350c4738b46d10a0e8fed4bf37a3d77723 | 6,095 | py | Python | readthedocs/settings/environment.py | optimizely/readthedocs.org | d63aa9ffeea33e4ce6e7739767ee4378dee971b8 | [
"MIT"
] | null | null | null | readthedocs/settings/environment.py | optimizely/readthedocs.org | d63aa9ffeea33e4ce6e7739767ee4378dee971b8 | [
"MIT"
] | null | null | null | readthedocs/settings/environment.py | optimizely/readthedocs.org | d63aa9ffeea33e4ce6e7739767ee4378dee971b8 | [
"MIT"
] | 1 | 2018-10-12T22:15:39.000Z | 2018-10-12T22:15:39.000Z | import os
import json
from .base import CommunityBaseSettings
class EnvironmentSettings(CommunityBaseSettings):
"""Settings for local development"""
DEBUG = os.environ.get('DEBUG') == 'true'
ALLOW_PRIVATE_REPOS = os.environ['ALLOW_PRIVATE_REPOS'] == 'true'
PRODUCTION_DOMAIN = os.environ['PROD_HOST']
WEBHOOK_DOMAIN = os.environ['WEBHOOK_HOST']
WEBSOCKET_HOST = os.environ['WEBSOCKET_HOST']
DEFAULT_PRIVACY_LEVEL = os.environ['DEFAULT_PRIVACY_LEVEL']
PUBLIC_API_URL = PRODUCTION_DOMAIN
CSRF_TRUSTED_ORIGINS = [PRODUCTION_DOMAIN]
@property
def DATABASES(self): # noqa
return {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASS'],
'HOST': os.environ['DB_HOST'],
'PORT': os.environ['DB_PORT']
}
}
DONT_HIT_DB = False
ACCOUNT_EMAIL_VERIFICATION = 'none'
SESSION_COOKIE_DOMAIN = None
CACHE_BACKEND = 'dummy://'
SLUMBER_USERNAME = os.environ['SLUMBER_USER']
SLUMBER_PASSWORD = os.environ['SLUMBER_PASS'] # noqa: ignore dodgy check
SLUMBER_API_HOST = os.environ['SLUMBER_HOST']
# Redis setup.
REDIS_HOST = os.environ['REDIS_HOST']
REDIS_PORT = os.environ['REDIS_PORT']
REDIS_ADDRESS = '{}:{}'.format(REDIS_HOST, REDIS_PORT)
BROKER_URL = 'redis://{}/0'.format(REDIS_ADDRESS)
CELERY_RESULT_BACKEND = BROKER_URL
CELERY_ALWAYS_EAGER = os.environ.get('ASYNC_TASKS') != 'true'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TASK_IGNORE_RESULT = False
# Elastic Search setup.
ES_HOSTS = json.loads(os.environ['ES_HOSTS'])
ES_DEFAULT_NUM_REPLICAS = 0
ES_DEFAULT_NUM_SHARDS = 5
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine',
},
}
# Mail settings
# Whether or not to actually use the default email backend.
if os.environ.get('ENABLE_EMAILS') != 'true':
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
DEFAULT_FROM_EMAIL = os.environ.get('FROM_EMAIL')
EMAIL_HOST = os.environ.get('EMAIL_HOST')
EMAIL_HOST_USER = os.environ.get('EMAIL_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_PASS')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# File Sync -- NOTE: Must be local for single-app hosts.
FILE_SYNCER = os.environ['FILE_SYNCER']
# Cors origins.
CORS_ORIGIN_WHITELIST = json.loads(os.environ['CORS_HOSTS'])
# Social Auth config.
@property
def SOCIALACCOUNT_PROVIDERS(self):
providers = super(EnvironmentSettings, self).SOCIALACCOUNT_PROVIDERS
# This enables private repositories.
providers['github']['SCOPE'].append('repo')
return providers
ACCOUNT_DEFAULT_HTTP_PROTOCOL = os.environ.get(
'ACCOUNT_DEFAULT_HTTP_PROTOCOL'
) or 'http'
# Cache backend.
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': REDIS_ADDRESS,
'PREFIX': 'docs',
'OPTIONS': {
'DB': 1,
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 5,
'timeout': 3,
},
'MAX_CONNECTIONS': 10,
'PICKLE_VERSION': -1,
},
},
}
LOG_FORMAT = "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s"
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format': LOG_FORMAT,
'datefmt': "%d/%b/%Y %H:%M:%S"
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': ('INFO', 'DEBUG')[DEBUG],
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'readthedocs.core.views.post_commit': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'core.middleware': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'restapi': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['console'],
'level': 'ERROR',
'propagate': False,
},
'readthedocs.projects.views.public.search': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'search': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'elasticsearch.trace': {
'level': 'DEBUG',
'handlers': ['console'],
},
'': {
'handlers': ['console'],
'level': 'INFO',
}
}
}
EnvironmentSettings.load_settings(__name__)
if not os.environ.get('DJANGO_SETTINGS_SKIP_LOCAL', False):
try:
# pylint: disable=unused-wildcard-import
from .local_settings import * # noqa
except ImportError:
pass
| 31.096939 | 80 | 0.528466 | 5,780 | 0.948318 | 0 | 0 | 661 | 0.10845 | 0 | 0 | 2,223 | 0.364725 |
d80aded943ac31da1cf1d442bfacdc0c574d184b | 583 | py | Python | jobs/tests.py | dukedbgroup/BayesianTuner | e74dc61c846c3beab95ca2140c1aab1d5179e208 | [
"Apache-2.0"
] | 13 | 2018-03-10T23:32:16.000Z | 2019-09-10T14:20:46.000Z | jobs/tests.py | dukedbgroup/BayesianTuner | e74dc61c846c3beab95ca2140c1aab1d5179e208 | [
"Apache-2.0"
] | null | null | null | jobs/tests.py | dukedbgroup/BayesianTuner | e74dc61c846c3beab95ca2140c1aab1d5179e208 | [
"Apache-2.0"
] | 1 | 2018-12-12T22:17:51.000Z | 2018-12-12T22:17:51.000Z | from unittest import TestCase
from logger import get_logger
from config import get_config
from .runner import JobsRunner
logger = get_logger(__name__, log_level=("TEST", "LOGLEVEL"))
config = get_config()
def test_run(self, config, job_config, date):
print("Running test job")
class JobRunnerTests(TestCase):
def setUp(self):
self.runner = JobsRunner(config)
self.runner.run_loop()
def tearDown(self):
self.runner.stop_loop()
def testAddJob(self):
self.runner.add_job("test_job", "jobs.tests.test_run", {"schedule_at": "01:00"})
| 25.347826 | 88 | 0.703259 | 297 | 0.509434 | 0 | 0 | 0 | 0 | 0 | 0 | 85 | 0.145798 |
d80bf67fed1d7c15d54f1dcaa8d2e1e4a8afca9b | 220 | py | Python | wekaScript.py | sebas1208/sentiment-analizer-bi | 188383366373f6d20e04978cb9db520b367bde0a | [
"MIT"
] | null | null | null | wekaScript.py | sebas1208/sentiment-analizer-bi | 188383366373f6d20e04978cb9db520b367bde0a | [
"MIT"
] | null | null | null | wekaScript.py | sebas1208/sentiment-analizer-bi | 188383366373f6d20e04978cb9db520b367bde0a | [
"MIT"
] | null | null | null | import weka.core.jvm as jvm
jvm.start()
from weka.core.converters import Loader, Saver
loader = Loader(classname="weka.core.converters.ArffLoader")
data = loader.load_file("./Listas/train.arff")
print data
jvm.stop() | 20 | 60 | 0.763636 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.245455 |
d80ea4320731a7f5c8a15b5c673de800b94a36ed | 463 | py | Python | underscore/declaration.py | doboy/Underscore | d98273db3144cda79191d2c90f45d81b6d700b1f | [
"MIT"
] | 7 | 2016-09-23T00:44:05.000Z | 2021-10-04T21:19:12.000Z | underscore/declaration.py | jameswu1991/Underscore | d98273db3144cda79191d2c90f45d81b6d700b1f | [
"MIT"
] | 1 | 2016-09-23T00:45:05.000Z | 2019-02-16T19:05:37.000Z | underscore/declaration.py | jameswu1991/Underscore | d98273db3144cda79191d2c90f45d81b6d700b1f | [
"MIT"
] | 3 | 2016-09-23T01:13:15.000Z | 2018-07-20T21:22:17.000Z | # Copyright (c) 2013 Huan Do, http://huan.do
class Declaration(object):
def __init__(self, name):
self.name = name
self.delete = False
self._conditional = None
@property
def conditional(self):
assert self._conditional is not None
return self.delete or self._conditional
def generator():
_ = '_'
while True:
# yield Declaration('_' + str(len(_)))
yield Declaration(_)
_ += '_'
| 23.15 | 47 | 0.593952 | 277 | 0.598272 | 137 | 0.295896 | 129 | 0.278618 | 0 | 0 | 88 | 0.190065 |
d80fc11903913cd937faa887dc7d412f894ad87e | 2,567 | py | Python | mlcomp/contrib/model/video/resnext3d/r2plus1_util.py | megachester/mlcomp | 8d30ba0a52e225144533e68295b71acb49e3c68a | [
"Apache-2.0"
] | 166 | 2019-08-21T20:00:04.000Z | 2020-05-14T16:13:57.000Z | mlcomp/contrib/model/video/resnext3d/r2plus1_util.py | megachester/mlcomp | 8d30ba0a52e225144533e68295b71acb49e3c68a | [
"Apache-2.0"
] | 14 | 2019-08-22T07:58:39.000Z | 2020-04-13T13:59:07.000Z | mlcomp/contrib/model/video/resnext3d/r2plus1_util.py | megachester/mlcomp | 8d30ba0a52e225144533e68295b71acb49e3c68a | [
"Apache-2.0"
] | 22 | 2019-08-23T12:37:20.000Z | 2020-04-20T10:06:29.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import torch.nn as nn
def r2plus1_unit(
dim_in,
dim_out,
temporal_stride,
spatial_stride,
groups,
inplace_relu,
bn_eps,
bn_mmt,
dim_mid=None,
):
"""
Implementation of `R(2+1)D unit <https://arxiv.org/abs/1711.11248>`_.
Decompose one 3D conv into one 2D spatial conv and one 1D temporal conv.
Choose the middle dimensionality so that the total No. of parameters
in 2D spatial conv and 1D temporal conv is unchanged.
Args:
dim_in (int): the channel dimensions of the input.
dim_out (int): the channel dimension of the output.
temporal_stride (int): the temporal stride of the bottleneck.
spatial_stride (int): the spatial_stride of the bottleneck.
groups (int): number of groups for the convolution.
inplace_relu (bool): calculate the relu on the original input
without allocating new memory.
bn_eps (float): epsilon for batch norm.
bn_mmt (float): momentum for batch norm. Noted that BN momentum in
PyTorch = 1 - BN momentum in Caffe2.
dim_mid (Optional[int]): If not None, use the provided channel dimension
for the output of the 2D spatial conv. If None, compute the output
channel dimension of the 2D spatial conv so that the total No. of
model parameters remains unchanged.
"""
if dim_mid is None:
dim_mid = int(
dim_out * dim_in * 3 * 3 * 3 / (dim_in * 3 * 3 + dim_out * 3))
logging.info(
"dim_in: %d, dim_out: %d. Set dim_mid to %d" % (
dim_in, dim_out, dim_mid)
)
# 1x3x3 group conv, BN, ReLU
conv_middle = nn.Conv3d(
dim_in,
dim_mid,
[1, 3, 3], # kernel
stride=[1, spatial_stride, spatial_stride],
padding=[0, 1, 1],
groups=groups,
bias=False,
)
conv_middle_bn = nn.BatchNorm3d(dim_mid, eps=bn_eps, momentum=bn_mmt)
conv_middle_relu = nn.ReLU(inplace=inplace_relu)
# 3x1x1 group conv
conv = nn.Conv3d(
dim_mid,
dim_out,
[3, 1, 1], # kernel
stride=[temporal_stride, 1, 1],
padding=[1, 0, 0],
groups=groups,
bias=False,
)
return nn.Sequential(conv_middle, conv_middle_bn, conv_middle_relu, conv)
| 34.226667 | 80 | 0.61979 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,493 | 0.581613 |
d810e9e8b14b2edde01e890b4f74510b0b268466 | 312 | py | Python | camara_con_kivy.py | Rocha117/Laboratorio_07 | 1d1b646f9665523962a7d8addf16b056f437bc27 | [
"MIT"
] | null | null | null | camara_con_kivy.py | Rocha117/Laboratorio_07 | 1d1b646f9665523962a7d8addf16b056f437bc27 | [
"MIT"
] | null | null | null | camara_con_kivy.py | Rocha117/Laboratorio_07 | 1d1b646f9665523962a7d8addf16b056f437bc27 | [
"MIT"
] | null | null | null | from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
class CamaraWindow(BoxLayout):
def __init__(self, **kwargs):
super().__init__(**kwargs)
class CamaraApp(App):
def build(self):
return CamaraWindow()
if __name__ == '__main__':
CamaraApp().run() | 22.285714 | 41 | 0.634615 | 175 | 0.560897 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.032051 |
d811f23940af1b60e6df0cb986906b761af79201 | 1,685 | py | Python | test/test_record_count_use_case_interactor.py | takeoverjp/tsdraw | 71e10082696df857c6fcd6fed41a434e33b8645e | [
"MIT"
] | null | null | null | test/test_record_count_use_case_interactor.py | takeoverjp/tsdraw | 71e10082696df857c6fcd6fed41a434e33b8645e | [
"MIT"
] | null | null | null | test/test_record_count_use_case_interactor.py | takeoverjp/tsdraw | 71e10082696df857c6fcd6fed41a434e33b8645e | [
"MIT"
] | null | null | null | import unittest
from datetime import datetime, timezone
from src.entity.count_entity import CountEntity
from src.interface_adapter.in_memory_count_repository import \
InMemoryCountRepository
from src.use_case.record_count_input_data import RecordCountInputData
from src.use_case.record_count_use_case_interactor import \
RecordCountUseCaseInteractor
class TestRecordCountUseCaseInteractor(unittest.TestCase):
def setUp(self) -> None:
self.repository = InMemoryCountRepository()
return super().setUp()
def test_create(self):
# Execute
RecordCountUseCaseInteractor(self.repository)
def test_handle_empty(self):
# Setup
interactor = RecordCountUseCaseInteractor(self.repository)
date = datetime(2020, 1, 1, tzinfo=timezone.utc)
input = RecordCountInputData(date, [])
# Execute
interactor.handle(input)
# Assert
counts = self.repository.find_all()
self.assertEqual(len(counts), 0)
def test_handle_multi_input(self):
# Setup
interactor = RecordCountUseCaseInteractor(self.repository)
date = datetime(2020, 1, 1, tzinfo=timezone.utc)
ent0 = CountEntity(date, "/bin/bash", 3)
ent1 = CountEntity(date, "/bin/sash", 4)
ent2 = CountEntity(date, "/bin/cash", 5)
counts = [ent0, ent1, ent2]
input = RecordCountInputData(date, counts)
# Execute
interactor.handle(input)
# Assert
counts = self.repository.find_all()
self.assertEqual(len(counts), 3)
self.assertIn(ent0, counts)
self.assertIn(ent1, counts)
self.assertIn(ent2, counts)
| 31.792453 | 69 | 0.678932 | 1,323 | 0.785163 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.053412 |
d81352042fe28274d48c972342192e4cd5a7987b | 120 | py | Python | net/gan/loss/__init__.py | bacTlink/caffe_tmpname | 14f713ec782ec9d6757a55fa1bda0151fe6c0e33 | [
"Intel",
"BSD-2-Clause"
] | 2 | 2018-02-02T07:35:08.000Z | 2018-02-05T09:25:10.000Z | net/gan/loss/__init__.py | bacTlink/caffe_tmpname | 14f713ec782ec9d6757a55fa1bda0151fe6c0e33 | [
"Intel",
"BSD-2-Clause"
] | null | null | null | net/gan/loss/__init__.py | bacTlink/caffe_tmpname | 14f713ec782ec9d6757a55fa1bda0151fe6c0e33 | [
"Intel",
"BSD-2-Clause"
] | null | null | null | all = ['GeneratorLoss',
'ClassfierLoss']
from generator import GeneratorLoss
from classfier import ClassfierLoss
| 24 | 35 | 0.775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.25 |
d813cd01f5b9a14e68558e232020bb8ed1c59d28 | 972 | py | Python | Curso-em-video-Python3-mundo2/ex070.py | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 0347a8325443fce84e0a753c96f523a22858537b | [
"MIT"
] | null | null | null | Curso-em-video-Python3-mundo2/ex070.py | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 0347a8325443fce84e0a753c96f523a22858537b | [
"MIT"
] | null | null | null | Curso-em-video-Python3-mundo2/ex070.py | bernardombraga/Solucoes-exercicios-cursos-gratuitos | 0347a8325443fce84e0a753c96f523a22858537b | [
"MIT"
] | null | null | null | line = '-' * 30
print(line)
print('{:^30}'.format('LOJA SUPER BARATÃO'))
print(line)
total = more1000 = 0
productMaisBarato = ''
priceMaisBarato = 0
primeiraVez = True
while True:
name = str(input('Nome do produto: '))
price = float(input('Preço: R$'))
moreProducts = ' '
while moreProducts not in 'SN':
moreProducts = str(input('Quer continuar? [S/N] ')).strip().upper()[0]
total += price
if price > 1000:
more1000 += 1
if primeiraVez:
primeiraVez = False
priceMaisBarato = price
productMaisBarato = name
else:
if price < priceMaisBarato:
priceMaisBarato = price
productMaisBarato = name
if moreProducts == 'N':
break
print('{:-^30}'.format(' FIM DO PROGRAMA '))
print(f'O total da compra foi de {total}.')
print(f'Temos {more1000} que custa mais de R$1000.00')
print(f'O produto mais barato foi a {productMaisBarato} que custa R${priceMaisBarato:.2f}.')
| 30.375 | 92 | 0.623457 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 295 | 0.302875 |
d813fe7e8c53a4b341a427a7aa1d706dae039f16 | 310 | py | Python | dockernd/python/ND/NostalgiaDrive/setup.py | mchellmer/DockerMongoFceuxPython | f7db8fc976d76046ca03f707f9845d348f9be651 | [
"MIT"
] | null | null | null | dockernd/python/ND/NostalgiaDrive/setup.py | mchellmer/DockerMongoFceuxPython | f7db8fc976d76046ca03f707f9845d348f9be651 | [
"MIT"
] | null | null | null | dockernd/python/ND/NostalgiaDrive/setup.py | mchellmer/DockerMongoFceuxPython | f7db8fc976d76046ca03f707f9845d348f9be651 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='nd',
py_modules=['nd'],
version='1.0.0',
description='user friendly emulation game selection',
license="MIT",
author='Mark Hellmer',
author_email='mchellmer@gmail.com',
install_requires=['tkinter', 'nltk', 'pymongo'],
scripts=[]
)
| 22.142857 | 57 | 0.645161 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 119 | 0.383871 |
d8146baed580c42319f8435585246c29dbec5fcd | 703 | py | Python | migrations/versions/f83defd9c5ed_add_class_of_delete.py | Nyirabazungu/blog-app | 5461027a5d63445d2c3bf1908f119981bc1c49bf | [
"MIT"
] | null | null | null | migrations/versions/f83defd9c5ed_add_class_of_delete.py | Nyirabazungu/blog-app | 5461027a5d63445d2c3bf1908f119981bc1c49bf | [
"MIT"
] | null | null | null | migrations/versions/f83defd9c5ed_add_class_of_delete.py | Nyirabazungu/blog-app | 5461027a5d63445d2c3bf1908f119981bc1c49bf | [
"MIT"
] | null | null | null | """add class of delete
Revision ID: f83defd9c5ed
Revises: 1060ee5817c7
Create Date: 2019-03-04 17:50:54.573744
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'f83defd9c5ed'
down_revision = '1060ee5817c7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('subscribers', 'username')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('subscribers', sa.Column('username', sa.VARCHAR(length=255), autoincrement=False, nullable=True))
# ### end Alembic commands ###
| 24.241379 | 115 | 0.704125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 412 | 0.58606 |
d815e5a4deb18f61e7e650ac8ee688311cb0ebfc | 57,497 | py | Python | BioSTEAM 2.x.x/biorefineries/oilcane/_uncertainty_plots.py | yoelcortes/Bioindustrial-Complex | d39edfec88e443ef7a62218ca0215e3b105f4b96 | [
"MIT"
] | 2 | 2020-01-03T21:04:41.000Z | 2020-01-09T01:15:48.000Z | BioSTEAM 2.x.x/biorefineries/oilcane/_uncertainty_plots.py | yoelcortes/Bioindustrial-Complex | d39edfec88e443ef7a62218ca0215e3b105f4b96 | [
"MIT"
] | 6 | 2020-01-03T21:31:27.000Z | 2020-02-28T13:53:56.000Z | BioSTEAM 2.x.x/biorefineries/oilcane/_uncertainty_plots.py | yoelcortes/Bioindustrial-Complex | d39edfec88e443ef7a62218ca0215e3b105f4b96 | [
"MIT"
] | 2 | 2020-01-07T14:04:06.000Z | 2020-01-08T23:05:25.000Z | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 5 01:34:00 2021
@author: yrc2
"""
import biosteam as bst
import biorefineries.oilcane as oc
from biosteam.utils import CABBI_colors, colors
from thermosteam.utils import set_figure_size, set_font, roundsigfigs
from thermosteam.units_of_measure import format_units
from colorpalette import Palette
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from warnings import warn
import numpy as np
import pandas as pd
from matplotlib.gridspec import GridSpec
from . import _variable_mockups as variables
from ._variable_mockups import (
tea_monte_carlo_metric_mockups,
tea_monte_carlo_derivative_metric_mockups,
lca_monte_carlo_metric_mockups,
lca_monte_carlo_derivative_metric_mockups,
MFPP, TCI, electricity_production, natural_gas_consumption,
ethanol_production, biodiesel_production,
GWP_ethanol, GWP_biodiesel, GWP_electricity,
GWP_ethanol_allocation, GWP_biodiesel_allocation,
GWP_economic, MFPP_derivative,
TCI_derivative,
ethanol_production_derivative,
biodiesel_production_derivative,
electricity_production_derivative,
natural_gas_consumption_derivative,
GWP_ethanol_derivative,
)
from ._load_data import (
images_folder,
get_monte_carlo,
spearman_file,
)
import os
from._parse_configuration import format_name
__all__ = (
'plot_all',
'plot_montecarlo_main_manuscript',
'plot_breakdowns',
'plot_montecarlo_feedstock_comparison',
'plot_montecarlo_configuration_comparison',
'plot_montecarlo_agile_comparison',
'plot_montecarlo_derivative',
'plot_montecarlo_absolute',
'plot_spearman_tea',
'plot_spearman_lca',
'plot_spearman_tea_short',
'plot_spearman_lca_short',
'plot_monte_carlo_across_coordinate',
'monte_carlo_box_plot',
'plot_monte_carlo',
'plot_spearman',
'plot_configuration_breakdown',
'plot_TCI_areas_across_oil_content',
'plot_heatmap_comparison',
'plot_feedstock_conventional_comparison_kde',
'plot_feedstock_cellulosic_comparison_kde',
'plot_configuration_comparison_kde',
'plot_open_comparison_kde',
'plot_feedstock_comparison_kde',
'plot_crude_configuration_comparison_kde',
'plot_agile_comparison_kde',
'plot_separated_configuration_comparison_kde',
'area_colors',
'area_hatches',
)
area_colors = {
'Feedstock handling': CABBI_colors.teal,
'Juicing': CABBI_colors.green_dirty,
'EtOH prod.': CABBI_colors.blue,
'Ethanol production': CABBI_colors.blue,
'Oil ext.': CABBI_colors.brown,
'Oil extraction': CABBI_colors.brown,
'Biod. prod.': CABBI_colors.orange,
'Biodiesel production': CABBI_colors.orange,
'Pretreatment': CABBI_colors.green,
'Wastewater treatment': colors.purple,
'CH&P': CABBI_colors.yellow,
'Co-Heat and Power': CABBI_colors.yellow,
'Utilities': colors.red,
'Storage': CABBI_colors.grey,
'HXN': colors.orange,
'Heat exchanger network': colors.orange,
}
area_hatches = {
'Feedstock handling': 'x',
'Juicing': '-',
'EtOH prod.': '/',
'Ethanol production': '/',
'Oil ext.': '\\',
'Oil extraction': '\\',
'Biod. prod.': '/|',
'Biodiesel production': '/|',
'Pretreatment': '//',
'Wastewater treatment': r'\\',
'CH&P': '',
'Co-Heat and Power': '',
'Utilities': '\\|',
'Storage': '',
'HXN': '+',
'Heat exchanger network': '+',
}
for i in area_colors: area_colors[i] = area_colors[i].tint(20)
palette = Palette(**area_colors)
letter_color = colors.neutral.shade(25).RGBn
GWP_units_L = '$\\mathrm{kg} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{L}^{-1}$'
GWP_units_L_small = GWP_units_L.replace('kg', 'g')
CABBI_colors.orange_hatch = CABBI_colors.orange.copy(hatch='////')
ethanol_over_biodiesel = bst.MockVariable('Ethanol over biodiesel', 'L/MT', 'Biorefinery')
GWP_ethanol_displacement = variables.GWP_ethanol_displacement
production = (ethanol_production, biodiesel_production)
mc_metric_settings = {
'MFPP': (MFPP, f"MFPP\n[{format_units('USD/MT')}]", None),
'TCI': (TCI, f"TCI\n[{format_units('10^6*USD')}]", None),
'production': (production, f"Production\n[{format_units('L/MT')}]", None),
'electricity_production': (electricity_production, f"Elec. prod.\n[{format_units('kWhr/MT')}]", None),
'natural_gas_consumption': (natural_gas_consumption, f"NG cons.\n[{format_units('m^3/MT')}]", None),
'GWP_ethanol_displacement': (GWP_ethanol_displacement, "GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]", None),
'GWP_economic': ((GWP_ethanol, GWP_biodiesel), "GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]", None),
'GWP_energy': ((GWP_ethanol_allocation, GWP_biodiesel_allocation), "GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]", None),
}
mc_comparison_settings = {
'MFPP': (MFPP, r"$\Delta$" + f"MFPP\n[{format_units('USD/MT')}]", None),
'TCI': (TCI, r"$\Delta$" + f"TCI\n[{format_units('10^6*USD')}]", None),
'production': (production, r"$\Delta$" + f"Production\n[{format_units('L/MT')}]", None),
'electricity_production': (electricity_production, r"$\Delta$" + f"Elec. prod.\n[{format_units('kWhr/MT')}]", None),
'natural_gas_consumption': (natural_gas_consumption, r"$\Delta$" + f"NG cons.\n[{format_units('m^3/MT')}]", None),
'GWP_ethanol_displacement': (GWP_ethanol_displacement, r"$\Delta$" + "GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]", None),
'GWP_economic': (GWP_ethanol, r"$\Delta$" + "GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]", None),
'GWP_energy': (GWP_ethanol_allocation, r"$\Delta$" + "GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]", None),
'GWP_property_allocation': ((GWP_ethanol, GWP_ethanol_allocation), r"$\Delta$" + f"GWP\n[{GWP_units_L}]", None),
}
mc_derivative_metric_settings = {
'MFPP': (MFPP_derivative, r"$\Delta$" + format_units(r"MFPP/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('USD/MT')}]", None),
'TCI': (TCI_derivative, r"$\Delta$" + format_units(r"TCI/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('10^6*USD')}]", None),
'production': ((ethanol_production_derivative, biodiesel_production_derivative), r"$\Delta$" + format_units(r"Prod./OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('L/MT')}]", None),
'electricity_production': (electricity_production_derivative, r"$\Delta$" + format_units(r"EP/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('kWhr/MT')}]", None),
'natural_gas_consumption': (natural_gas_consumption_derivative, r"$\Delta$" + format_units(r"NGC/OC").replace('cdot', r'cdot \Delta') + f"\n[{format_units('m^3/MT')}]", None),
'GWP_economic': (GWP_ethanol_derivative, r"$\Delta$" + r"GWP $\cdot \Delta \mathrm{OC}^{-1}$" f"\n[{GWP_units_L_small}]", 1000),
}
kde_metric_settings = {j[0]: j for j in mc_metric_settings.values()}
kde_comparison_settings = {j[0]: j for j in mc_comparison_settings.values()}
kde_derivative_settings = {j[0]: j for j in mc_derivative_metric_settings.values()}
# %% Plots for publication
def plot_all():
# plot_montecarlo_main_manuscript()
plot_montecarlo_absolute()
plot_spearman_tea()
plot_spearman_lca()
plot_breakdowns()
def plot_montecarlo_main_manuscript():
set_font(size=8)
set_figure_size(aspect_ratio=0.85)
fig = plt.figure()
everything = GridSpec(4, 3, fig, hspace=1.5, wspace=0.7,
top=0.90, bottom=0.05,
left=0.11, right=0.97)
def spec2axes(spec, x, y, hspace=0, wspace=0.7, **kwargs):
subspec = spec.subgridspec(x, y, hspace=hspace, wspace=wspace, **kwargs)
return np.array([[fig.add_subplot(subspec[i, j]) for j in range(y)] for i in range(x)], object)
gs_feedstock_comparison = everything[:2, :]
gs_configuration_comparison = everything[2:, :2]
gs_agile_comparison = everything[2:, 2]
axes_feedstock_comparison = spec2axes(gs_feedstock_comparison, 2, 3)
axes_configuration_comparison = spec2axes(gs_configuration_comparison, 2, 2)
axes_agile_comparison = spec2axes(gs_agile_comparison, 2, 1)
plot_montecarlo_feedstock_comparison(axes_feedstock_comparison, letters='ABCDEFG')
plot_montecarlo_configuration_comparison(axes_configuration_comparison, letters='ABCDEFG')
plot_montecarlo_agile_comparison(axes_agile_comparison, letters='ABCDEFG')
def add_title(gs, title):
ax = fig.add_subplot(gs)
ax._frameon = False
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_title(
title, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold', y=1.1
)
add_title(gs_feedstock_comparison, '(I) Impact of opting to process oilcane over sugarcane')
add_title(gs_configuration_comparison, '(II) Impact of cellulosic ethanol integration')
add_title(gs_agile_comparison, '(III) Impact of\noilsorghum\nintegration')
plt.show()
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_main_manuscript.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_feedstock_comparison(axes_box=None, letters=None,
single_column=True):
if single_column:
width = 'half'
aspect_ratio = 2.25
ncols = 1
left = 0.255
bottom = 0.05
else:
width = None
aspect_ratio = 0.75
left = 0.105
bottom = 0.12
ncols = 3
if axes_box is None:
set_font(size=8)
set_figure_size(width=width, aspect_ratio=aspect_ratio)
fig, axes = plot_monte_carlo(
derivative=False, absolute=False, comparison=True,
tickmarks=None, agile=False, ncols=ncols, axes_box=axes_box,
labels=[
'Direct Cogeneration',
'Integrated Co-Fermentation',
# 'Direct Cogeneration',
# 'Integrated Co-Fermentation',
],
comparison_names=['O1 - S1', 'O2 - S2'],
metrics = ['MFPP', 'TCI', 'production', 'GWP_property_allocation',
'natural_gas_consumption', 'electricity_production'],
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green',
'orange', 'orange_hatch', 'grey', 'brown',
])
)
for ax, letter in zip(axes, 'ABCDEFGH' if letters is None else letters):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
# if axes_box is None and letter in 'DH':
# x = 0.5
# plt.text(x, ylb - (yub - ylb) * 0.3,
# 'Impact of processing\noilcane over sugarcane',
# horizontalalignment='center',verticalalignment='center',
# fontsize=8)
if axes_box is None:
plt.subplots_adjust(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_feedstock_comparison.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_configuration_comparison(axes_box=None, letters=None,
single_column=True):
if single_column:
width = 'half'
aspect_ratio = 2.25
ncols = 1
left = 0.255
bottom = 0.05
x = 1.65
metrics= ['MFPP', 'TCI', 'production', 'GWP_property_allocation',
'natural_gas_consumption', 'electricity_production']
else:
width = None
aspect_ratio = 0.75
left = 0.105
bottom = 0.12
ncols = 2
x = 0.58
metrics= ['MFPP', 'TCI', 'production', 'GWP_property_allocation']
if axes_box is None:
set_font(size=8)
set_figure_size(width=width, aspect_ratio=aspect_ratio)
fig, axes = plot_monte_carlo(
derivative=False, absolute=False, comparison=True,
tickmarks=None, agile=False, ncols=ncols, axes_box=axes_box,
labels=[
'Oilcane',
# 'Sugarcane',
],
comparison_names=[
'O2 - O1',
# 'S2 - S1'
],
metrics=metrics,
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green',
'orange', 'orange_hatch',
])
)
for ax, letter in zip(axes, 'ABCDEF' if letters is None else letters):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(x, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
if axes_box is None:
plt.subplots_adjust(right=0.96, left=left, wspace=0.38, top=0.98, bottom=bottom)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_configuration_comparison.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_agile_comparison(axes_box=None, letters=None):
if axes_box is None:
set_font(size=8)
set_figure_size(width=3.3071, aspect_ratio=1.0)
fig, axes = plot_monte_carlo(
derivative=False, absolute=False, comparison=True,
tickmarks=None, agile_only=True, ncols=1,
labels=[
'Direct Cogeneration',
'Integrated Co-Fermentation'
],
metrics=['MFPP', 'TCI'],
axes_box=axes_box,
)
for ax, letter in zip(axes, 'AB' if letters is None else letters):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
if axes_box is None and letter == 'B':
plt.text(0.5, ylb - (yub - ylb) * 0.25,
'Impact of integrating oilsorghum\nat an agile oilcane biorefinery',
horizontalalignment='center',verticalalignment='center',
fontsize=8)
if axes_box is None:
plt.subplots_adjust(right=0.9, left=0.2, wspace=0.5, top=0.98, bottom=0.15)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_agile_comparison.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_derivative():
set_font(size=8)
set_figure_size(
aspect_ratio=0.5,
# width=3.3071, aspect_ratio=1.85
)
fig, axes = plot_monte_carlo(
derivative=True, absolute=True,
comparison=False, agile=False,
ncols=3,
# tickmarks=np.array([
# [-3, -2, -1, 0, 1, 2, 3, 4, 5],
# [-9, -6, -3, 0, 3, 6, 9, 12, 15],
# [-2.0, -1.5, -1.0, -0.5, 0, 0.5, 1.0, 1.5, 2],
# [-16, -8, 0, 8, 16, 24, 32, 40, 48],
# [-400, -300, -200, -100, 0, 100, 200, 300, 400],
# [-300, -225, -150, -75, 0, 75, 150, 225, 300]
# ], dtype=object),
labels=['DC', 'ICF'],
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown',
'orange',
])
)
for ax, letter in zip(axes, 'ABCDEFGH'):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(1.65, ylb + (yub - ylb) * 0.90, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
plt.subplots_adjust(
hspace=0, wspace=0.7,
top=0.95, bottom=0.1,
left=0.12, right=0.96
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_derivative.{i}')
plt.savefig(file, transparent=True)
def plot_montecarlo_absolute():
set_font(size=8)
set_figure_size(aspect_ratio=1.05)
fig, axes = plot_monte_carlo(
absolute=True, comparison=False, ncols=2,
expand=0.1,
labels=['Sugarcane\nDC', 'Oilcane\nDC',
'Sugarcane\nICF', 'Oilcane\nICF',
'Sugarcane &\nSorghum DC', 'Oilcane &\nOil-sorghum DC',
'Sugarcane &\nSorghum ICF', 'Oilcane &\nOil-sorghum ICF'],
xrot=90,
color_wheel = CABBI_colors.wheel([
'blue_light', 'green_dirty', 'orange', 'green', 'grey', 'brown',
'orange', 'orange', 'green', 'orange', 'green',
])
)
for ax, letter in zip(axes, 'ABCDEFGHIJ'):
plt.sca(ax)
ylb, yub = plt.ylim()
plt.text(7.8, ylb + (yub - ylb) * 0.92, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
plt.subplots_adjust(left=0.12, right=0.95, wspace=0.40, top=0.98, bottom=0.2)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'montecarlo_absolute.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_tea(with_units=None, aspect_ratio=0.8, **kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=aspect_ratio)
plot_spearman(
configurations=[
'O1', 'O1*',
'O2', 'O2*',
],
labels=[
'DC', 'Oil-sorghum int., DC',
'ICF', 'Oil-sorghum int., ICF',
],
kind='TEA',
with_units=with_units,
cutoff=0.03,
**kwargs
)
plt.subplots_adjust(left=0.45, right=0.975, top=0.98, bottom=0.08)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'spearman_tea.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_tea_short(**kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=0.65, width=6.6142 * 2/3)
plot_spearman(
configurations=[
'O1',
'O2',
],
labels=[
'DC',
'ICF',
],
kind='TEA',
with_units=False,
cutoff=0.03,
top=5,
legend=True,
legend_kwargs={'loc': 'upper left'},
**kwargs
)
plt.subplots_adjust(left=0.35, right=0.975, top=0.98, bottom=0.15)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'spearman_tea.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_lca_short(with_units=False, aspect_ratio=0.65, **kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=aspect_ratio, width=6.6142 * 2/3)
plot_spearman(
configurations=[
'O1',
'O2',
],
labels=[
'DC',
'ICF',
],
kind='LCA',
with_units=with_units,
cutoff=0.03,
top=5,
legend=False,
**kwargs
)
plt.subplots_adjust(left=0.35, right=0.975, top=0.98, bottom=0.15)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'spearman_lca.{i}')
plt.savefig(file, transparent=True)
def plot_spearman_lca(with_units=None, aspect_ratio=0.65, **kwargs):
set_font(size=8)
set_figure_size(aspect_ratio=aspect_ratio)
plot_spearman(
configurations=[
'O1', 'O1*',
'O2', 'O2*',
],
labels=[
'DC', 'Oil-sorghum int., DC',
'ICF', 'Oil-sorghum int., ICF',
],
kind='LCA',
with_units=with_units,
cutoff=0.03,
**kwargs
)
plt.subplots_adjust(left=0.45, right=0.975, top=0.98, bottom=0.10)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'spearman_lca.{i}')
plt.savefig(file, transparent=True)
def plot_breakdowns():
set_font(size=8)
set_figure_size(aspect_ratio=0.68)
fig, axes = plt.subplots(nrows=1, ncols=2)
plt.sca(axes[0])
plot_configuration_breakdown('O1', ax=axes[0], legend=False)
plt.sca(axes[1])
plot_configuration_breakdown('O2', ax=axes[1], legend=True)
yticks = axes[1].get_yticks()
plt.yticks(yticks, ['']*len(yticks))
plt.ylabel('')
plt.subplots_adjust(left=0.09, right=0.96, wspace=0., top=0.84, bottom=0.31)
for ax, letter in zip(axes, ['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation']):
plt.sca(ax)
ylb, yub = plt.ylim()
xlb, xub = plt.xlim()
plt.text((xlb + xub) * 0.5, ylb + (yub - ylb) * 1.2, letter, color=letter_color,
horizontalalignment='center',verticalalignment='center',
fontsize=12, fontweight='bold')
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'breakdowns.{i}')
plt.savefig(file, transparent=True)
# %% Heatmap
def get_fraction_in_same_direction(data, direction):
return (direction * data >= 0.).sum(axis=0) / data.size
def get_median(data):
return roundsigfigs(np.percentile(data, 50, axis=0))
def plot_heatmap_comparison(comparison_names=None, xlabels=None):
if comparison_names is None: comparison_names = oc.comparison_names
columns = comparison_names
if xlabels is None: xlabels = [format_name(i).replace(' ', '') for i in comparison_names]
def get_data(metric, name):
df = get_monte_carlo(name, metric)
values = df.values
return values
GWP_economic, GWP_ethanol, GWP_biodiesel, GWP_electricity, GWP_crude_glycerol, = lca_monte_carlo_metric_mockups
MFPP, TCI, ethanol_production, biodiesel_production, electricity_production, natural_gas_consumption = tea_monte_carlo_metric_mockups
GWP_ethanol_displacement = variables.GWP_ethanol_displacement
GWP_ethanol_allocation = variables.GWP_ethanol_allocation
rows = [
MFPP,
TCI,
ethanol_production,
biodiesel_production,
electricity_production,
natural_gas_consumption,
GWP_ethanol_displacement,
GWP_ethanol_allocation,
GWP_ethanol, # economic
]
ylabels = [
f"MFPP\n[{format_units('USD/MT')}]",
f"TCI\n[{format_units('10^6*USD')}]",
f"Ethanol production\n[{format_units('L/MT')}]",
f"Biodiesel production\n[{format_units('L/MT')}]",
f"Elec. prod.\n[{format_units('kWhr/MT')}]",
f"NG cons.\n[{format_units('m^3/MT')}]",
"GWP$_{\\mathrm{displacement}}$" f"\n[{GWP_units_L}]",
"GWP$_{\\mathrm{energy}}$" f"\n[{GWP_units_L}]",
"GWP$_{\\mathrm{economic}}$" f"\n[{GWP_units_L}]",
]
N_rows = len(rows)
N_cols = len(comparison_names)
data = np.zeros([N_rows, N_cols], dtype=object)
data[:] = [[get_data(i, j) for j in columns] for i in rows]
medians = np.zeros_like(data, dtype=float)
fractions = medians.copy()
for i in range(N_rows):
for j in range(N_cols):
medians[i, j] = x = get_median(data[i, j])
fractions[i, j] = get_fraction_in_same_direction(data[i, j], 1 if x > 0 else -1)
fig, ax = plt.subplots()
mbar = bst.plots.MetricBar(
'Fraction in the same direction [%]', ticks=[-100, -75, -50, -25, 0, 25, 50, 75, 100],
cmap=plt.cm.get_cmap('RdYlGn')
)
im, cbar = bst.plots.plot_heatmap(
100 * fractions, vmin=0, vmax=100, ax=ax, cell_labels=medians,
metric_bar=mbar, xlabels=xlabels, ylabels=ylabels,
)
cbar.ax.set_ylabel(mbar.title, rotation=-90, va="bottom")
plt.sca(ax)
ax.spines[:].set_visible(False)
plt.grid(True, 'major', 'both', lw=1, color='w', ls='-')
# %% KDE
def plot_kde(name, metrics=(GWP_ethanol, MFPP), xticks=None, yticks=None,
xbox_kwargs=None, ybox_kwargs=None, top_left='',
top_right='Tradeoff', bottom_left='Tradeoff',
bottom_right=''):
set_font(size=8)
set_figure_size(width='half', aspect_ratio=1.20)
Xi, Yi = [i.index for i in metrics]
df = oc.get_monte_carlo(name, metrics)
y = df[Yi].values
x = df[Xi].values
sX, sY = [kde_comparison_settings[i] for i in metrics]
_, xlabel, fx = sX
_, ylabel, fy = sY
if fx: x *= fx
if fy: y *= fy
ax = bst.plots.plot_kde(
y=y, x=x, xticks=xticks, yticks=yticks,
xticklabels=True, yticklabels=True,
xbox_kwargs=xbox_kwargs or dict(light=CABBI_colors.orange.RGBn, dark=CABBI_colors.orange.shade(60).RGBn),
ybox_kwargs=ybox_kwargs or dict(light=CABBI_colors.blue.RGBn, dark=CABBI_colors.blue.shade(60).RGBn),
)
plt.sca(ax)
plt.xlabel(xlabel.replace('\n', ' '))
plt.ylabel(ylabel.replace('\n', ' '))
bst.plots.plot_quadrants()
xlb, xub = plt.xlim()
ylb, yub = plt.ylim()
xpos = lambda x: xlb + (xub - xlb) * x
# xlpos = lambda x: xlb * (1 - x)
ypos = lambda y: ylb + (yub - ylb) * y
y_mt_0 = y > 0
y_lt_0 = y < 0
x_mt_0 = x > 0
x_lt_0 = x < 0
xleft = 0.02
xright = 0.98
ytop = 0.94
ybottom = 0.02
if yub > 0. and xlb < 0.:
if top_left.endswith('()'):
p = (y_mt_0 & x_lt_0).sum() / y.size
top_left = f"{p:.0%} {top_left.strip('()')}"
plt.text(xpos(xleft), ypos(ytop), top_left, color=CABBI_colors.teal.shade(50).RGBn,
horizontalalignment='left', verticalalignment='top',
fontsize=10, fontweight='bold', zorder=10)
if ylb < 0. and xlb < 0.:
if bottom_left.endswith('()'):
p = (y_lt_0 & x_lt_0).sum() / y.size
bottom_left = f"{p:.0%} {bottom_left.strip('()')}"
plt.text(xpos(xleft), ypos(ybottom), bottom_left, color=CABBI_colors.grey.shade(75).RGBn,
horizontalalignment='left', verticalalignment='bottom',
fontsize=10, fontweight='bold', zorder=10)
if yub > 0. and xub > 0.:
if top_right.endswith('()'):
p = (y_mt_0 & x_mt_0).sum() / y.size
top_right = f"{p:.0%} {top_right.strip('()')}"
plt.text(xpos(xright), ypos(ytop), top_right, color=CABBI_colors.grey.shade(75).RGBn,
horizontalalignment='right', verticalalignment='top',
fontsize=10, fontweight='bold', zorder=10)
if ylb < 0. and xub > 0.:
if bottom_right.endswith('()'):
p = (y_lt_0 & x_mt_0).sum() / y.size
bottom_right = f"{p:.0%} {bottom_right.strip('()')}"
plt.text(xpos(xright), ypos(ybottom), bottom_right, color=colors.red.shade(50).RGBn,
horizontalalignment='right', verticalalignment='bottom',
fontsize=10, fontweight='bold', zorder=10)
plt.subplots_adjust(
hspace=0.05, wspace=0.05,
top=0.98, bottom=0.15,
left=0.15, right=0.98,
)
def plot_kde_2d(name, metrics=(GWP_ethanol, MFPP), xticks=None, yticks=None,
top_left='', top_right='Tradeoff', bottom_left='Tradeoff',
bottom_right='', xbox_kwargs=None, ybox_kwargs=None, titles=None):
set_font(size=8)
set_figure_size(aspect_ratio=0.65)
if isinstance(name, str): name = (name,)
Xi, Yi = [i.index for i in metrics]
dfs = [oc.get_monte_carlo(i, metrics) for i in name]
sX, sY = [kde_comparison_settings[i] for i in metrics]
_, xlabel, fx = sX
_, ylabel, fy = sY
xs = np.array([[df[Xi] for df in dfs]])
ys = np.array([[df[Yi] for df in dfs]])
if fx: xs *= fx
if fy: ys *= fy
axes = bst.plots.plot_kde_2d(
xs=xs, ys=ys,
xticks=xticks, yticks=yticks,
xticklabels=[True, True], yticklabels=[True, True],
xbox_kwargs=2*[xbox_kwargs or dict(light=CABBI_colors.orange.RGBn, dark=CABBI_colors.orange.shade(60).RGBn)],
ybox_kwargs=[ybox_kwargs or dict(light=CABBI_colors.blue.RGBn, dark=CABBI_colors.blue.shade(60).RGBn)],
)
M, N = axes.shape
xleft = 0.02
xright = 0.98
ytop = 0.94
ybottom = 0.02
for i in range(M):
for j in range(N):
ax = axes[i, j]
plt.sca(ax)
if i == M - 1: plt.xlabel(xlabel.replace('\n', ' '))
if j == 0: plt.ylabel(ylabel.replace('\n', ' '))
bst.plots.plot_quadrants()
xlb, xub = plt.xlim()
ylb, yub = plt.ylim()
xpos = lambda x: xlb + (xub - xlb) * x
# xlpos = lambda x: xlb * (1 - x)
ypos = lambda y: ylb + (yub - ylb) * y
df = dfs[j]
x = df[Xi]
y = df[Yi]
y_mt_0 = y > 0
y_lt_0 = y < 0
x_mt_0 = x > 0
x_lt_0 = x < 0
if yub > 0. and xlb < 0. and top_left:
if top_left.endswith('()'):
p = (y_mt_0 & x_lt_0).sum() / y.size
top_left = f"{p:.0%} {top_left.strip('()')}"
replacement = '()'
else:
replacement = None
plt.text(xpos(xleft), ypos(ytop), top_left, color=CABBI_colors.teal.shade(50).RGBn,
horizontalalignment='left', verticalalignment='top',
fontsize=10, fontweight='bold', zorder=10)
top_left = replacement
if ylb < 0. and xlb < 0. and bottom_left:
if bottom_left.endswith('()'):
p = (y_lt_0 & x_lt_0).sum() / y.size
bottom_left = f"{p:.0%} {bottom_left.strip('()')}"
replacement = '()'
else:
replacement = None
plt.text(xpos(xleft), ypos(ybottom), bottom_left, color=CABBI_colors.grey.shade(75).RGBn,
horizontalalignment='left', verticalalignment='bottom',
fontsize=10, fontweight='bold', zorder=10)
bottom_left = replacement
if yub > 0. and xub > 0. and top_right:
if top_right.endswith('()'):
p = (y_mt_0 & x_mt_0).sum() / y.size
top_right = f"{p:.0%} {top_right.strip('()')}"
replacement = '()'
else:
replacement = None
plt.text(xpos(xright), ypos(ytop), top_right, color=CABBI_colors.grey.shade(75).RGBn,
horizontalalignment='right', verticalalignment='top',
fontsize=10, fontweight='bold', zorder=10)
top_right = replacement
if ylb < 0. and xub > 0. and bottom_right:
if bottom_right.endswith('()'):
p = (y_lt_0 & x_mt_0).sum() / y.size
bottom_right = f"{p:.0%} {bottom_right.strip('()')}"
replacement = '()'
else:
replacement = None
plt.text(xpos(xright), ypos(ybottom), bottom_right, color=colors.red.shade(50).RGBn,
horizontalalignment='right', verticalalignment='bottom',
fontsize=10, fontweight='bold', zorder=10)
bottom_right = replacement
plt.subplots_adjust(
hspace=0, wspace=0,
top=0.98, bottom=0.15,
left=0.1, right=0.98,
)
if titles:
plt.subplots_adjust(
top=0.90,
)
for ax, letter in zip(axes[0, :], titles):
plt.sca(ax)
ylb, yub = plt.ylim()
xlb, xub = plt.xlim()
plt.text((xlb + xub) * 0.5, ylb + (yub - ylb) * 1.17, letter, color=letter_color,
horizontalalignment='center', verticalalignment='center',
fontsize=12, fontweight='bold')
def plot_feedstock_conventional_comparison_kde():
plot_kde(
'O1 - S1',
yticks=[-20, -10, 0, 10, 20, 30, 40],
xticks=[-0.12, -0.09, -0.06, -0.03, 0, 0.03, 0.06],
top_left='Oilcane Favored',
bottom_right='Sugarcane\nFavored',
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'feedstock_conventional_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_feedstock_cellulosic_comparison_kde():
plot_kde(
'O2 - S2',
yticks=[-40, -20, 0, 20, 40, 60, 80],
xticks=[-5, -4, -3, -2, -1, 0],
top_left='Oilcane Favored',
bottom_right='Sugarcane Favored',
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
fx=1000.,
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'feedstock_cellulosic_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_feedstock_comparison_kde():
plot_kde_2d(
('O1 - S1', 'O2 - S2'),
yticks=[[-10, 0, 10, 20, 30, 40, 50, 60]],
xticks=[[-0.12, -0.09, -0.06, -0.03, 0, 0.03, 0.06],
[-2.0, -1.5, -1, -0.5, 0., 0.5, 1.0]],
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='Oilcane\nFavored()',
bottom_right='\nSugarcane\nFavored()',
titles=['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation'],
)
plt.subplots_adjust(
wspace=0,
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'feedstock_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_configuration_comparison_kde():
plot_kde(
'O1 - O2',
yticks=[-20, 0, 20, 40, 60],
xticks=[-2, -1.5, -1, -0.5, 0, 0.5, 1],
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='DC Favored()',
bottom_right='ICF\nFavored()',
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'configuration_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_separated_configuration_comparison_kde():
plot_kde_2d(
('O1', 'O2'),
yticks=[[-20, 0, 20, 40, 60]],
xticks=[
[0, 0.5, 1, 1.5],
[0, 2, 4, 6, 8, 10]
],
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='DC Favored()',
bottom_right='ICF\nFavored()',
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'separated_configuration_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_crude_configuration_comparison_kde():
plot_kde_2d(
('O1 - O3', 'O2 - O4'),
yticks=[[-12, 0, 12, 24, 36, 48]],
xticks=[
[-0.5, -0.4, -0.3, -0.2, -0.1, 0],
[-1, -0.8, -0.6, -0.4, -0.2, 0]
],
top_right='GWP\nTradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='Biodiesel\nProduction Favored()',
bottom_right='Crude Oil\nProduction Favored()',
titles=['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation'],
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'crude_configuration_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_agile_comparison_kde():
plot_kde_2d(
('O1* - O1', 'O2* - O2'),
metrics=[TCI, MFPP],
yticks=[[0, 3, 6, 9, 12, 15]],
xticks=2*[[-150, -125, -100, -75, -50, -25, 0]],
top_right='TCI-Tradeoff()',
bottom_left='MFPP\nTradeoff()',
top_left='Sorghum\nIntegration Favored()',
bottom_right='Cane-only\nFavored()',
xbox_kwargs=dict(light=CABBI_colors.green_dirty.RGBn,
dark=CABBI_colors.green_dirty.shade(60).RGBn),
titles=['(A) Direct Cogeneration', '(B) Integrated Co-Fermentation'],
)
for i in ('svg', 'png'):
file = os.path.join(images_folder, f'agile_conventional_comparison_kde.{i}')
plt.savefig(file, transparent=True)
def plot_open_comparison_kde(overlap=False):
metrics = [MFPP, TCI, GWP_ethanol, biodiesel_production]
df_conventional_oc = oc.get_monte_carlo('O1', metrics)
df_cellulosic_oc = oc.get_monte_carlo('O2', metrics)
df_conventional_sc = oc.get_monte_carlo('S1', metrics)
df_cellulosic_sc = oc.get_monte_carlo('S2', metrics)
MFPPi = MFPP.index
TCIi = TCI.index
if overlap:
ys = np.zeros([1, 2], dtype=object)
xs = np.zeros([1, 2], dtype=object)
ys[0, 0] = (df_conventional_oc[MFPPi], df_cellulosic_oc[MFPPi])
ys[0, 1] = (df_conventional_sc[MFPPi], df_cellulosic_sc[MFPPi])
xs[0, 0] = (df_conventional_oc[TCIi], df_cellulosic_oc[TCIi])
xs[0, 1] = (df_conventional_sc[TCIi], df_cellulosic_sc[TCIi])
yticks = [[-30, -15, 0, 15, 30, 45, 60, 75]]
xticks = 2*[[200, 300, 400, 500, 600]]
else:
ys = np.array([
[df_conventional_oc[MFPPi], df_conventional_sc[MFPPi]],
[df_cellulosic_oc[MFPPi], df_cellulosic_sc[MFPPi]]
])
xs = np.array([
[df_conventional_oc[TCIi], df_conventional_sc[TCIi]],
[df_cellulosic_oc[TCIi], df_cellulosic_sc[TCIi]]
])
yticks = 2*[[-30, -15, 0, 15, 30, 45, 60, 75]]
xticks = 2*[[200, 300, 400, 500, 600]]
bst.plots.plot_kde_2d(
ys=ys, xs=xs, xticks=xticks, yticks=yticks,
xbox_kwargs=[dict(position=1), dict(position=1)],
ybox_kwargs=[dict(position=0), dict(position=0)],
)
#%% General Monte Carlo box plots
def plot_monte_carlo_across_coordinate(coordinate, data, color_wheel):
if isinstance(data, list):
return [plot_monte_carlo_across_coordinate(coordinate, i, color_wheel) for i in data]
else:
color = color_wheel.next()
return bst.plots.plot_montecarlo_across_coordinate(
coordinate, data,
light_color=color.tint(50).RGBn,
dark_color=color.shade(50).RGBn,
)
def monte_carlo_box_plot(data, positions, light_color, dark_color, width=None,
hatch=None, outliers=False, **kwargs):
if width is None: width = 0.8
if outliers:
flierprops = {'marker':'D',
'markerfacecolor': light_color,
'markeredgecolor': dark_color,
'markersize':3}
else:
flierprops = {'marker':''}
bp = plt.boxplot(
x=data, positions=positions, patch_artist=True,
widths=width, whis=[5, 95],
boxprops={'facecolor':light_color,
'edgecolor':dark_color},
medianprops={'color':dark_color,
'linewidth':1.5},
flierprops=flierprops,
**kwargs
)
if hatch:
for box in bp['boxes']:
box.set(hatch = hatch)
def plot_monte_carlo(derivative=False, absolute=True, comparison=True,
configuration_names=None, comparison_names=None,
metrics=None, labels=None, tickmarks=None, agile=True,
ncols=1, expand=None, step_min=None,
agile_only=False, xrot=None,
color_wheel=None, axes_box=None):
if derivative:
default_configuration_names = ['O1', 'O2']
default_comparison_names = ['O2 - O1']
metric_info = mc_derivative_metric_settings
default_metrics = list(metric_info)
else:
default_configuration_names = oc.configuration_names[:-2]
default_comparison_names = oc.comparison_names
if comparison:
metric_info = mc_comparison_settings
else:
metric_info = mc_metric_settings
if agile_only:
default_configuration_names = [i for i in default_configuration_names if '*' in i]
default_comparison_names = [i for i in default_comparison_names if '*' in i]
default_metrics = ['MFPP', 'TCI', 'production']
else:
default_metrics = list(metric_info)
if configuration_names is None: configuration_names = default_configuration_names
if comparison_names is None: comparison_names = default_comparison_names
if metrics is None: metrics = default_metrics
combined = absolute and comparison
if agile_only:
configuration_names = [i for i in configuration_names if '*' in i]
comparison_names = [i for i in comparison_names if '*' in i]
elif not agile:
configuration_names = [i for i in configuration_names if '*' not in i]
comparison_names = [i for i in comparison_names if '*' not in i]
if combined:
columns = configurations = configuration_names + comparison_names
elif absolute:
columns = configurations = configuration_names
elif comparison:
columns = configurations = comparison_names
else:
columns = configurations = []
rows, ylabels, factors = zip(*[metric_info[i] for i in metrics])
factors = [(i, j) for i, j in enumerate(factors) if j is not None]
if color_wheel is None: color_wheel = CABBI_colors.wheel()
N_rows = len(rows)
if axes_box is None:
fig, axes_box = plt.subplots(ncols=ncols, nrows=int(round(N_rows / ncols)))
plt.subplots_adjust(wspace=0.45)
else:
fig = None
axes = axes_box.transpose()
axes = axes.flatten()
N_cols = len(columns)
xtext = labels or [format_name(i).replace(' ', '') for i in configurations]
N_marks = len(xtext)
xticks = tuple(range(N_marks))
def get_data(metric, name):
try:
df = get_monte_carlo(name, metric)
except:
return np.zeros([1, 1])
else:
values = df.values
return values
def plot(arr, position):
if arr.ndim == 2:
N = arr.shape[1]
width = 0.618 / N
boxwidth = 0.618 / (N + 1/N)
plots = []
for i in range(N):
color = color_wheel.next()
boxplot = monte_carlo_box_plot(
data=arr[:, i], positions=[position + (i-(N-1)/2)*width],
light_color=color.RGBn,
dark_color=color.shade(60).RGBn,
width=boxwidth,
hatch=getattr(color, 'hatch', None),
)
plots.append(boxplot)
return plots
else:
color = color_wheel.next()
return monte_carlo_box_plot(
data=arr, positions=[position],
light_color=color.RGBn,
dark_color=color.shade(60).RGBn,
width=0.618,
)
data = np.zeros([N_rows, N_cols], dtype=object)
data[:] = [[get_data(i, j) for j in columns] for i in rows]
for i, j in factors: data[i, :] *= j
if tickmarks is None:
tickmarks = [
bst.plots.rounded_tickmarks_from_data(
i, step_min=step_min, N_ticks=8, lb_max=0, center=0,
f=roundsigfigs, expand=expand,
f_min=lambda x: np.percentile(x, 5),
f_max=lambda x: np.percentile(x, 95),
)
for i in data
]
x0 = len(configuration_names) - 0.5
xf = len(columns) - 0.5
for i in range(N_rows):
ax = axes[i]
plt.sca(ax)
if combined:
bst.plots.plot_vertical_line(x0)
ax.axvspan(x0, xf, color=colors.purple_tint.tint(60).RGBn)
plt.xlim(-0.5, xf)
for j in range(N_cols):
color_wheel.restart()
for i in range(N_rows):
ax = axes[i]
plt.sca(ax)
plot(data[i, j], j)
plt.ylabel(ylabels[i])
for i in range(N_rows):
ax = axes[i]
plt.sca(ax)
yticks = tickmarks[i]
plt.ylim([yticks[0], yticks[1]])
if yticks[0] < 0.:
bst.plots.plot_horizontal_line(0, color=CABBI_colors.black.RGBn, lw=0.8, linestyle='--')
try:
xticklabels = xtext if ax in axes_box[-1] else []
except:
xticklabels = xtext if i == N_rows - 1 else []
bst.plots.style_axis(ax,
xticks = xticks,
yticks = yticks,
xticklabels= xticklabels,
ytick0=False,
ytickf=False,
offset_xticks=True,
xrot=xrot,
)
if fig is None:
fig = plt.gcf()
else:
plt.subplots_adjust(hspace=0)
fig.align_ylabels(axes)
return fig, axes
#%% Spearman
def plot_spearman(configurations, labels=None, metric=None,
kind=None, with_units=None, legend=None, legend_kwargs=None, **kwargs):
if kind is None: kind = 'TEA'
if with_units is None: with_units = True
if legend is None: legend = True
if metric is None:
if kind == 'TEA':
metric = MFPP
metric_name = metric.name
elif kind == 'LCA':
metric = GWP_economic
metric_name = r'GWP$_{\mathrm{economic}}$'
else:
raise ValueError(f"invalid kind '{kind}'")
else:
if metric == 'MFPP':
metric = MFPP
elif metric == 'GWP':
metric = GWP_economic
metric_name = metric.name
stream_price = format_units('USD/L')
USD_MT = format_units('USD/MT')
ng_price = format_units('USD/m^3')
electricity_price = format_units('USD/kWhr')
operating_days = format_units('day/yr')
capacity = format_units('10^6 MT/yr')
titer = format_units('g/L')
productivity = format_units('g/L/hr')
material_GWP = '$\\mathrm{kg} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{kg}^{-1}$'
feedstock_GWP = '$\\mathrm{g} \\cdot \\mathrm{CO}_{2}\\mathrm{eq} \\cdot \\mathrm{kg}^{-1}$'
index, ignored_list = zip(*[
('Crushing mill oil recovery [60 $-$ 95 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Saccharification oil recovery [70 $-$ 95 %]', ['S2', 'S1', 'S2*', 'S1*', 'O1', 'O1*']),
(f'Cane operating days [120 $-$ 180 {operating_days}]', []),
(f'Sorghum operating days [30 $-$ 60 {operating_days}]', ['S2', 'S1', 'O1', 'O2']),
(f'Crushing capacity [1.2 $-$ 2.0 {capacity}]', []),
(f'Ethanol price [0.269, 0.476, 0.758 {stream_price}]', []),
(f'Relative biodiesel price [0.0819, 0.786, 1.09 {stream_price}]', []),
(f'Natural gas price [0.105, 0.122, 0.175 {ng_price}]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Electricity price [0.0583, 0.065, 0.069 {electricity_price}]', ['S2', 'O2', 'S2*', 'O2*']),
('IRR [10 $-$ 15 %]', []),
(f'Crude glycerol price [100 $-$ 220 {USD_MT}]', ['S2', 'S1', 'S2*', 'S1*']),
(f'Pure glycerol price [488 $-$ 812 {USD_MT}]', ['S2', 'S1', 'S2*', 'S1*']),
('Saccharification reaction time [54 $-$ 90 hr]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Cellulase price [159 $-$ 265 {USD_MT}]', ['S1', 'O1', 'S1*', 'O1*']),
('Cellulase loading [1.5 $-$ 2.5 wt. % cellulose]', ['S1', 'O1', 'S1*', 'O1*']),
('PTRS base cost [14.9 $-$ 24.7 MMUSD]', ['S1', 'O1', 'S1*', 'O1*']),
# ('Pretreatment reactor system base cost [14.9 $-$ 24.7 MMUSD]', ['S1', 'O1', 'S1*', 'O1*']),
('Cane glucose yield [85 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Sorghum glucose yield [85 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Cane xylose yield [65 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Sorghum xylose yield [65 $-$ 97.5 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Glucose to ethanol yield [90 $-$ 95 %]', ['S1', 'O1', 'S1*', 'O1*']),
('Xylose to ethanol yield [50 $-$ 95 %]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Titer [65 $-$ 130 {titer}]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Productivity [1.0 $-$ 2.0 {productivity}]', ['S1', 'O1', 'S1*', 'O1*']),
('Cane PL content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Sorghum PL content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Cane FFA content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Sorghum FFA content [7.5 $-$ 12.5 %]', ['S2', 'S1', 'S2*', 'S1*']),
('Cane oil content [5 $-$ 15 dry wt. %]', ['S2', 'S1', 'S2*', 'S1*']),
('Relative sorghum oil content [-3 $-$ 0 dry wt. %]', ['S2', 'S1', 'S2*', 'S1*', 'O2', 'O1']),
('TAG to FFA conversion [17.25 $-$ 28.75 % theoretical]', ['S1', 'O1', 'S1*', 'O1*']),
# TODO: change lower upper values to baseline +- 10%
(f'Feedstock GWPCF [26.3 $-$ 44.0 {feedstock_GWP}]', ['S1', 'S2', 'S1*', 'S2*']),
(f'Methanol GWPCF [0.338 $-$ 0.563 {material_GWP}]', ['S1', 'S2', 'S1*', 'S2*']),
(f'Pure glycerine GWPCF [1.25 $-$ 2.08 {material_GWP}]', ['S1', 'S2', 'S1*', 'S2*']),
(f'Cellulase GWPCF [6.05 $-$ 10.1 {material_GWP}]', ['S1', 'O1', 'S1*', 'O1*']),
(f'Natural gas GWPCF [0.297 $-$ 0.363 {material_GWP}]', ['S1', 'O1', 'S1*', 'O1*']),
])
if not with_units: index = [i.split(' [')[0] for i in index]
ignored_dct = {
'S1': [],
'O1': [],
'S2': [],
'O2': [],
'S1*': [],
'O1*': [],
'S2*': [],
'O2*': [],
}
for i, ignored in enumerate(ignored_list):
for name in ignored: ignored_dct[name].append(i)
index_name = index[i]
if kind == 'LCA':
for term in ('cost', 'price', 'IRR', 'time', 'capacity'):
if term in index_name:
for name in ignored_dct: ignored_dct[name].append(i)
break
elif kind == 'TEA':
if 'GWP' in index_name:
for name in ignored_dct: ignored_dct[name].append(i)
else:
raise ValueError(f"invalid kind '{kind}'")
rhos = []
for name in configurations:
file = spearman_file(name)
try:
df = pd.read_excel(file, header=[0, 1], index_col=[0, 1])
except:
warning = RuntimeWarning(f"file '{file}' not found")
warn(warning)
continue
s = df[metric.index]
s.iloc[ignored_dct[name]] = 0.
rhos.append(s)
color_wheel = [CABBI_colors.orange, CABBI_colors.green_soft, CABBI_colors.blue, CABBI_colors.brown]
fig, ax = bst.plots.plot_spearman_2d(rhos, index=index,
color_wheel=color_wheel,
name=metric_name,
**kwargs)
if legend:
if legend_kwargs is None:
legend_kwargs = {'loc': 'lower left'}
plt.legend(
handles=[
mpatches.Patch(
color=color_wheel[i].RGBn,
label=labels[i] if labels else format_name(configurations[i])
)
for i in range(len(configurations))
],
**legend_kwargs,
)
return fig, ax
# %% Other
def plot_configuration_breakdown(name, across_coordinate=False, **kwargs):
oc.load(name)
if across_coordinate:
return bst.plots.plot_unit_groups_across_coordinate(
oc.set_cane_oil_content,
[5, 7.5, 10, 12.5],
'Feedstock oil content [dry wt. %]',
oc.unit_groups,
colors=[area_colors[i.name].RGBn for i in oc.unit_groups],
hatches=[area_hatches[i.name] for i in oc.unit_groups],
**kwargs,
)
else:
def format_total(x):
if x < 1e3:
return format(x, '.3g')
else:
x = int(x)
n = 10 ** (len(str(x)) - 3)
value = int(round(x / n) * n)
return format(value, ',')
for i in oc.unit_groups:
if i.name == 'EtOH prod.':
i.name = 'Ethanol production'
elif i.name == 'Oil ext.':
i.name = 'Oil extraction'
elif i.name == 'Biod. prod.':
i.name = 'Biodiesel production'
i.metrics[0].name = 'Inst. eq.\ncost'
i.metrics[3].name = 'Elec.\ncons.'
i.metrics[4].name = 'Mat.\ncost'
return bst.plots.plot_unit_groups(
oc.unit_groups,
colors=[area_colors[i.name].RGBn for i in oc.unit_groups],
hatches=[area_hatches[i.name] for i in oc.unit_groups],
format_total=format_total,
fraction=True,
legend_kwargs=dict(
loc='lower center',
ncol=4,
bbox_to_anchor=(0, -0.52),
labelspacing=1.5, handlelength=2.8,
handleheight=1, scale=0.8,
),
**kwargs,
)
def plot_TCI_areas_across_oil_content(configuration='O2'):
oc.load(configuration)
data = {i.name: [] for i in oc.unit_groups}
increasing_areas = []
decreasing_areas = []
oil_contents = np.linspace(5, 15, 10)
for i in oil_contents:
oc.set_cane_oil_content(i)
oc.sys.simulate()
for i in oc.unit_groups: data[i.name].append(i.get_installed_cost())
for name, group_data in data.items():
lb, *_, ub = group_data
if ub > lb:
increasing_areas.append(group_data)
else:
decreasing_areas.append(group_data)
increasing_values = np.sum(increasing_areas, axis=0)
increasing_values -= increasing_values[0]
decreasing_values = np.sum(decreasing_areas, axis=0)
decreasing_values -= decreasing_values[-1]
plt.plot(oil_contents, increasing_values, label='Oil & fiber areas')
plt.plot(oil_contents, decreasing_values, label='Sugar areas')
# def plot_monte_carlo_across_oil_content(kind=0, derivative=False):
# MFPP, TCI, *production, electricity_production, natural_gas_consumption = tea_monte_carlo_metric_mockups
# rows = [MFPP, TCI, production]
# if kind == 0:
# columns = across_oil_content_names
# elif kind == 1:
# columns = across_oil_content_agile_names
# elif kind == 2:
# columns = across_oil_content_comparison_names
# elif kind == 3:
# columns = across_oil_content_agile_comparison_names
# elif kind == 4:
# columns = across_oil_content_agile_direct_comparison_names
# else:
# raise NotImplementedError(str(kind))
# if derivative:
# x = 100 * (oil_content[:-1] + np.diff(oil_content) / 2.)
# ylabels = [
# f"MFPP der. [{format_units('USD/MT')}]",
# f"TCI der. [{format_units('10^6*USD')}]",
# f"Production der. [{format_units('L/MT')}]"
# ]
# else:
# x = 100 * oil_content
# ylabels = [
# f"MFPP$\backprime$ [{format_units('USD/MT')}]",
# f"TCI [{format_units('10^6*USD')}]",
# f"Production [{format_units('L/MT')}]"
# ]
# N_cols = len(columns)
# N_rows = len(rows)
# fig, axes = plt.subplots(ncols=N_cols, nrows=N_rows)
# data = np.zeros([N_rows, N_cols], dtype=object)
# def get_data(metric, name):
# if isinstance(metric, bst.Variable):
# return get_monte_carlo_across_oil_content(name, metric, derivative)
# else:
# return [get_data(i, name) for i in metric]
# data = np.array([[get_data(i, j) for j in columns] for i in rows])
# tickmarks = [None] * N_rows
# get_max = lambda x: max([i.max() for i in x]) if isinstance(x, list) else x.max()
# get_min = lambda x: min([i.min() for i in x]) if isinstance(x, list) else x.min()
# N_ticks = 5
# for r in range(N_rows):
# lb = min(min([get_min(i) for i in data[r, :]]), 0)
# ub = max([get_max(i) for i in data[r, :]])
# diff = 0.1 * (ub - lb)
# ub += diff
# if derivative:
# lb = floor(lb)
# ub = ceil(ub)
# step = (ub - lb) / (N_ticks - 1)
# tickmarks[r] = [0, 1] if step == 0 else [int(lb + step * i) for i in range(N_ticks)]
# else:
# if rows[r] is MFPP:
# if kind == 0 or kind == 1:
# tickmarks[r] = [-20, 0, 20, 40, 60]
# elif kind == 2:
# tickmarks[r] = [-20, -10, 0, 10, 20]
# elif kind == 3:
# tickmarks[r] = [-10, 0, 10, 20, 30]
# elif kind == 4:
# tickmarks[r] = [-5, 0, 5, 10, 15]
# continue
# lb = floor(lb / 15) * 15
# ub = ceil(ub / 15) * 15
# step = (ub - lb) / (N_ticks - 1)
# tickmarks[r] = [0, 1] if step == 0 else [int(lb + step * i) for i in range(N_ticks)]
# color_wheel = CABBI_colors.wheel()
# for j in range(N_cols):
# color_wheel.restart()
# for i in range(N_rows):
# arr = data[i, j]
# ax = axes[i, j]
# plt.sca(ax)
# percentiles = plot_monte_carlo_across_coordinate(x, arr, color_wheel)
# if i == 0: ax.set_title(format_name(columns[j]))
# xticklabels = i == N_rows - 1
# yticklabels = j == 0
# if xticklabels: plt.xlabel('Oil content [dry wt. %]')
# if yticklabels: plt.ylabel(ylabels[i])
# bst.plots.style_axis(ax,
# xticks = [5, 10, 15],
# yticks = tickmarks[i],
# xticklabels= xticklabels,
# yticklabels= yticklabels,
# ytick0=False)
# for i in range(N_cols): fig.align_ylabels(axes[:, i])
# plt.subplots_adjust(hspace=0.1, wspace=0.1) | 40.778014 | 196 | 0.569404 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 15,384 | 0.267562 |
d81681f0c3e8533953487db033ce07fd94815535 | 103 | py | Python | custom_components/yoosee/const.py | shaonianzhentan/ha_yoosee_camera | 63e75b1285c03da75f98264dfbab01280b20f3c0 | [
"MIT"
] | null | null | null | custom_components/yoosee/const.py | shaonianzhentan/ha_yoosee_camera | 63e75b1285c03da75f98264dfbab01280b20f3c0 | [
"MIT"
] | null | null | null | custom_components/yoosee/const.py | shaonianzhentan/ha_yoosee_camera | 63e75b1285c03da75f98264dfbab01280b20f3c0 | [
"MIT"
] | null | null | null | DOMAIN = "yoosee"
PLATFORMS = ["camera"]
DEFAULT_NAME = "Yoosee摄像头"
VERSION = "1.1"
SERVICE_PTZ = 'ptz' | 20.6 | 26 | 0.68932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.394495 |
d81683ba8142b60ee8046ff652ca3747377a9744 | 4,729 | py | Python | generate_data.py | StanfordASL/Adaptive-Control-Oriented-Meta-Learning | 093d2764314bbfccc3a804fb9e737a10d08a1eb5 | [
"MIT"
] | 24 | 2021-03-14T19:00:49.000Z | 2022-03-23T14:31:33.000Z | generate_data.py | wuyou33/Adaptive-Control-Oriented-Meta-Learning | 093d2764314bbfccc3a804fb9e737a10d08a1eb5 | [
"MIT"
] | 1 | 2021-06-07T09:57:26.000Z | 2021-06-12T19:57:00.000Z | generate_data.py | wuyou33/Adaptive-Control-Oriented-Meta-Learning | 093d2764314bbfccc3a804fb9e737a10d08a1eb5 | [
"MIT"
] | 3 | 2021-06-14T09:05:27.000Z | 2021-12-22T19:31:15.000Z | """
TODO description.
Author: Spencer M. Richards
Autonomous Systems Lab (ASL), Stanford
(GitHub: spenrich)
"""
if __name__ == "__main__":
import pickle
import jax
import jax.numpy as jnp
from jax.experimental.ode import odeint
from utils import spline, random_ragged_spline
from dynamics import prior, plant, disturbance
# Seed random numbers
seed = 0
key = jax.random.PRNGKey(seed)
# Generate smooth trajectories
num_traj = 500
T = 30
num_knots = 6
poly_orders = (9, 9, 6)
deriv_orders = (4, 4, 2)
min_step = jnp.array([-2., -2., -jnp.pi/6])
max_step = jnp.array([2., 2., jnp.pi/6])
min_knot = jnp.array([-jnp.inf, -jnp.inf, -jnp.pi/3])
max_knot = jnp.array([jnp.inf, jnp.inf, jnp.pi/3])
key, *subkeys = jax.random.split(key, 1 + num_traj)
subkeys = jnp.vstack(subkeys)
in_axes = (0, None, None, None, None, None, None, None, None)
t_knots, knots, coefs = jax.vmap(random_ragged_spline, in_axes)(
subkeys, T, num_knots, poly_orders, deriv_orders,
min_step, max_step, min_knot, max_knot
)
# x_coefs, y_coefs, ϕ_coefs = coefs
r_knots = jnp.dstack(knots)
# Sampled-time simulator
@jax.partial(jax.vmap, in_axes=(None, 0, 0, 0))
def simulate(ts, w, t_knots, coefs,
plant=plant, prior=prior, disturbance=disturbance):
"""TODO: docstring."""
# Construct spline reference trajectory
def reference(t):
x_coefs, y_coefs, ϕ_coefs = coefs
x = spline(t, t_knots, x_coefs)
y = spline(t, t_knots, y_coefs)
ϕ = spline(t, t_knots, ϕ_coefs)
ϕ = jnp.clip(ϕ, -jnp.pi/3, jnp.pi/3)
r = jnp.array([x, y, ϕ])
return r
# Required derivatives of the reference trajectory
def ref_derivatives(t):
ref_vel = jax.jacfwd(reference)
ref_acc = jax.jacfwd(ref_vel)
r = reference(t)
dr = ref_vel(t)
ddr = ref_acc(t)
return r, dr, ddr
# Feedback linearizing PD controller
def controller(q, dq, r, dr, ddr):
kp, kd = 10., 0.1
e, de = q - r, dq - dr
dv = ddr - kp*e - kd*de
H, C, g, B = prior(q, dq)
τ = H@dv + C@dq + g
u = jnp.linalg.solve(B, τ)
return u, τ
# Closed-loop ODE for `x = (q, dq)`, with a zero-order hold on
# the controller
def ode(x, t, u, w=w):
q, dq = x
f_ext = disturbance(q, dq, w)
ddq = plant(q, dq, u, f_ext)
dx = (dq, ddq)
return dx
# Simulation loop
def loop(carry, input_slice):
t_prev, q_prev, dq_prev, u_prev = carry
t = input_slice
qs, dqs = odeint(ode, (q_prev, dq_prev), jnp.array([t_prev, t]),
u_prev)
q, dq = qs[-1], dqs[-1]
r, dr, ddr = ref_derivatives(t)
u, τ = controller(q, dq, r, dr, ddr)
carry = (t, q, dq, u)
output_slice = (q, dq, u, τ, r, dr)
return carry, output_slice
# Initial conditions
t0 = ts[0]
r0, dr0, ddr0 = ref_derivatives(t0)
q0, dq0 = r0, dr0
u0, τ0 = controller(q0, dq0, r0, dr0, ddr0)
# Run simulation loop
carry = (t0, q0, dq0, u0)
carry, output = jax.lax.scan(loop, carry, ts[1:])
q, dq, u, τ, r, dr = output
# Prepend initial conditions
q = jnp.vstack((q0, q))
dq = jnp.vstack((dq0, dq))
u = jnp.vstack((u0, u))
τ = jnp.vstack((τ0, τ))
r = jnp.vstack((r0, r))
dr = jnp.vstack((dr0, dr))
return q, dq, u, τ, r, dr
# Sample wind velocities from the training distribution
w_min = 0. # minimum wind velocity in inertial `x`-direction
w_max = 6. # maximum wind velocity in inertial `x`-direction
a = 5. # shape parameter `a` for beta distribution
b = 9. # shape parameter `b` for beta distribution
key, subkey = jax.random.split(key, 2)
w = w_min + (w_max - w_min)*jax.random.beta(subkey, a, b, (num_traj,))
# Simulate tracking for each `w`
dt = 0.01
t = jnp.arange(0, T + dt, dt) # same times for each trajectory
q, dq, u, τ, r, dr = simulate(t, w, t_knots, coefs)
data = {
'seed': seed, 'prng_key': key,
't': t, 'q': q, 'dq': dq,
'u': u, 'r': r, 'dr': dr,
't_knots': t_knots, 'r_knots': r_knots,
'w': w, 'w_min': w_min, 'w_max': w_max,
'beta_params': (a, b),
}
with open('training_data.pkl', 'wb') as file:
pickle.dump(data, file)
| 33.06993 | 76 | 0.534574 | 0 | 0 | 0 | 0 | 2,572 | 0.541702 | 0 | 0 | 970 | 0.204297 |
d816ebec6670bc97c3cfcc6d198d67b571f9d900 | 674 | py | Python | backend/model/views.py | princesinghtomar/BTP | 44bf84db09637453b1e107bfdd305a47610b81f2 | [
"MIT"
] | null | null | null | backend/model/views.py | princesinghtomar/BTP | 44bf84db09637453b1e107bfdd305a47610b81f2 | [
"MIT"
] | null | null | null | backend/model/views.py | princesinghtomar/BTP | 44bf84db09637453b1e107bfdd305a47610b81f2 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from rest_framework.parsers import JSONParser
from django.http.response import JsonResponse
import base64
from numpy import random
# Create your views here.
@csrf_exempt
def algo(req):
if req.method == "POST":
data = JSONParser().parse(req)
audioData = base64.b64decode(data["audioData"][22:].encode('ascii'))
prob = random.uniform(0, 1)
if prob > 0.5:
return JsonResponse({"output": "Nice!!", "feedback": "This is pos feedback"})
else:
return JsonResponse({"output": "Oh No!!", "feedback": "This is neg feedback"})
| 35.473684 | 90 | 0.675074 | 0 | 0 | 0 | 0 | 426 | 0.632047 | 0 | 0 | 146 | 0.216617 |
d8189813d74db9ea651f07f400203fcf191b7cd7 | 1,024 | py | Python | scvi/data/anndata/_constants.py | Semih-Kurt/scvi-tools | 1bea2af8cc99e11d55a6925f09d978de5f6994fb | [
"BSD-3-Clause"
] | null | null | null | scvi/data/anndata/_constants.py | Semih-Kurt/scvi-tools | 1bea2af8cc99e11d55a6925f09d978de5f6994fb | [
"BSD-3-Clause"
] | null | null | null | scvi/data/anndata/_constants.py | Semih-Kurt/scvi-tools | 1bea2af8cc99e11d55a6925f09d978de5f6994fb | [
"BSD-3-Clause"
] | null | null | null | from typing import NamedTuple
# scVI Manager Store Constants
# ----------------------------
# Keys for UUIDs used for referencing model class manager stores.
_SCVI_UUID_KEY = "_scvi_uuid"
_SOURCE_SCVI_UUID_KEY = "_source_scvi_uuid"
# scVI Registry Constants
# -----------------------
# Keys used in the scVI registry.
_SCVI_VERSION_KEY = "scvi_version"
_MODEL_NAME_KEY = "model_name"
_SETUP_KWARGS_KEY = "setup_kwargs"
_FIELD_REGISTRIES_KEY = "field_registries"
_DATA_REGISTRY_KEY = "data_registry"
_STATE_REGISTRY_KEY = "state_registry"
_SUMMARY_STATS_KEY = "summary_stats"
# scVI Data Registry Constants
# ----------------------------
# Keys used in the data registry.
_DR_ATTR_NAME = "attr_name"
_DR_ATTR_KEY = "attr_key"
# AnnData Object Constants
# ------------------------
# AnnData object attribute names.
class _ADATA_ATTRS_NT(NamedTuple):
X: str = "X"
LAYERS: str = "layers"
OBS: str = "obs"
OBSM: str = "obsm"
VAR: str = "var"
VARM: str = "varm"
_ADATA_ATTRS = _ADATA_ATTRS_NT()
| 23.272727 | 65 | 0.668945 | 166 | 0.162109 | 0 | 0 | 0 | 0 | 0 | 0 | 575 | 0.561523 |
d8195b25d16e501245f75d758ee63f9c330bba60 | 1,994 | py | Python | hummingbot/connector/derivative/dydx_perpetual/dydx_perpetual_utils.py | coreydemarse/hummingbot | 48dd45b103622b198ca8e833ed9de7d0ad573ed9 | [
"Apache-2.0"
] | 11 | 2020-09-15T08:21:59.000Z | 2022-03-19T05:06:59.000Z | hummingbot/connector/derivative/dydx_perpetual/dydx_perpetual_utils.py | coreydemarse/hummingbot | 48dd45b103622b198ca8e833ed9de7d0ad573ed9 | [
"Apache-2.0"
] | null | null | null | hummingbot/connector/derivative/dydx_perpetual/dydx_perpetual_utils.py | coreydemarse/hummingbot | 48dd45b103622b198ca8e833ed9de7d0ad573ed9 | [
"Apache-2.0"
] | 5 | 2020-09-18T12:59:31.000Z | 2021-06-27T01:46:16.000Z | from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.config_methods import using_exchange
CENTRALIZED = True
EXAMPLE_PAIR = "BTC-USD"
DEFAULT_FEES = [0.05, 0.2]
KEYS = {
"dydx_perpetual_api_key":
ConfigVar(key="dydx_perpetual_api_key",
prompt="Enter your dydx Perpetual API key >>> ",
required_if=using_exchange("dydx_perpetual"),
is_secure=True,
is_connect_key=True),
"dydx_perpetual_api_secret":
ConfigVar(key="dydx_perpetual_api_secret",
prompt="Enter your dydx Perpetual API secret >>> ",
required_if=using_exchange("dydx_perpetual"),
is_secure=True,
is_connect_key=True),
"dydx_perpetual_passphrase":
ConfigVar(key="dydx_perpetual_passphrase",
prompt="Enter your dydx Perpetual API passphrase >>> ",
required_if=using_exchange("dydx_perpetual"),
is_secure=True,
is_connect_key=True),
"dydx_perpetual_account_number":
ConfigVar(key="dydx_perpetual_account_number",
prompt="Enter your dydx Perpetual API account_number >>> ",
required_if=using_exchange("dydx_perpetual"),
is_secure=True,
is_connect_key=True),
"dydx_perpetual_stark_private_key":
ConfigVar(key="dydx_perpetual_stark_private_key",
prompt="Enter your stark private key >>> ",
required_if=using_exchange("dydx_perpetual"),
is_secure=True,
is_connect_key=True),
"dydx_perpetual_ethereum_address":
ConfigVar(key="dydx_perpetual_ethereum_address",
prompt="Enter your ethereum wallet address >>> ",
required_if=using_exchange("dydx_perpetual"),
is_secure=True,
is_connect_key=True),
}
| 38.346154 | 77 | 0.605817 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 714 | 0.358074 |
d81a7d31cbd3b0514e3ad7982c9d385703974bfc | 546 | py | Python | sodp/reports/migrations/0017_auto_20210728_2110.py | ElHombreMorado8/sodp | e4a05620b633d261b22025af1d488cf767ba2e30 | [
"Apache-2.0"
] | null | null | null | sodp/reports/migrations/0017_auto_20210728_2110.py | ElHombreMorado8/sodp | e4a05620b633d261b22025af1d488cf767ba2e30 | [
"Apache-2.0"
] | 2 | 2021-07-15T10:13:58.000Z | 2022-03-30T14:20:03.000Z | sodp/reports/migrations/0017_auto_20210728_2110.py | ElHombreMorado8/sodp | e4a05620b633d261b22025af1d488cf767ba2e30 | [
"Apache-2.0"
] | 3 | 2021-07-03T07:13:48.000Z | 2021-08-10T19:28:20.000Z | # Generated by Django 3.1.12 on 2021-07-28 21:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('reports', '0016_auto_20210727_2156'),
]
operations = [
migrations.AddIndex(
model_name='report',
index=models.Index(fields=['user'], name='reports_user_index'),
),
migrations.AddIndex(
model_name='report',
index=models.Index(fields=['user', 'project'], name='reports_project_index'),
),
]
| 24.818182 | 89 | 0.600733 | 452 | 0.827839 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.296703 |
d81a9d62719f07472e189017fffe17520e8b2f77 | 150 | py | Python | mfsapiapp/apps.py | lupamo3/django-points | 7dc943521c73de1728c5f96d529b16ae51de98e5 | [
"MIT"
] | null | null | null | mfsapiapp/apps.py | lupamo3/django-points | 7dc943521c73de1728c5f96d529b16ae51de98e5 | [
"MIT"
] | null | null | null | mfsapiapp/apps.py | lupamo3/django-points | 7dc943521c73de1728c5f96d529b16ae51de98e5 | [
"MIT"
] | 1 | 2021-09-21T06:20:28.000Z | 2021-09-21T06:20:28.000Z | from django.apps import AppConfig
class MfsapiappConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'mfsapiapp'
| 21.428571 | 56 | 0.766667 | 113 | 0.753333 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.28 |
d81bf2a69ea31df5d99253836392c80819c38099 | 5,633 | py | Python | pkg/msgapi/mqtt/models/mqmsg.py | ToraNova/rapidflask | f42354b296659dac5be904d7bb68076b9458f79a | [
"MIT"
] | null | null | null | pkg/msgapi/mqtt/models/mqmsg.py | ToraNova/rapidflask | f42354b296659dac5be904d7bb68076b9458f79a | [
"MIT"
] | null | null | null | pkg/msgapi/mqtt/models/mqmsg.py | ToraNova/rapidflask | f42354b296659dac5be904d7bb68076b9458f79a | [
"MIT"
] | null | null | null | #--------------------------------------------------
# mqtt_control.py
# MQTT_Control is a database model to control subscriptions
# and publications
# introduced in u8
# ToraNova
#--------------------------------------------------
from pkg.resrc import res_import as r
from pkg.system.database import dbms
Base = dbms.msgapi.base
class MQTT_Msg(Base):
# PERMA : DO NOT CHANGE ANYTHING HERE UNLESS NECESSARY
__tablename__ = "MQTT_Msgs" #Try to use plurals here (i.e car's')
id = r.Column(r.Integer, primary_key=True)
def __repr__(self):
return '<%r %r>' % (self.__tablename__,self.id)
#---------------------------------------------------------
######################################################################################################
# EDITABLE ZONE
######################################################################################################
# TODO: DEFINE LIST OF COLUMNS
# the string topic of the topic to subscribe to
topic = r.Column(r.String(r.lim.MAX_MQTT_TOPIC_SIZE), nullable=False)
tlink = r.Column(r.Integer, nullable=True) #links to one of our subscribed topic
msg = r.Column(r.String(r.lim.MAX_MQTT_MSGCT_SIZE), nullable=False)
timev0 = r.Column(r.DateTime, nullable=False) #insertion time
timed0 = r.Column(r.DateTime, nullable=True) #deletion time (msg to be kept until)
pflag0 = r.Column(r.Boolean, nullable=False) #flag to check if the msg has been processed
pflag1 = r.Column(r.Boolean, nullable=False) #flag to check if the msg has been processed successfully
delonproc = r.Column(r.Boolean, nullable=False) #flag to check if this message should be delete on process
# TODO: DEFINE THE RLIST
# CHANGED ON U6 : RLISTING NOW MERGED WITH RLINKING : see 'RLINKING _ HOW TO USE:'
# The following is for r-listing (as of u6, rlinking as well) (resource listing)
# the values in the rlist must be the same as the column var name
rlist = r.OrderedDict([
("Topic","topic"),
("Linked (description)","__link__/tlink/MQTT_Subs/id:description"),
("Content","msg"),
("Received","__time__/%b-%d-%Y %H:%M:%S/timev0"),
("Delete on","__time__/%b-%d-%Y %H:%M:%S/timed0"),
("Processed?","pflag0"),
("Process OK?","pflag1")
]) #header,row data
# RLINKING _ HOW TO USE :
# using the __link__ keyword, seperate the arguments with /
# The first argument is the local reference, the field in which we use to refer
# the second argument is the foreign table
# the third argument is the foreign table Primary key
# the fourth argument is the field we want to find from the foreign table
# NOTICE that the fourth table uses ':' instead of /.
# Example
# "RPi id":"__link__/rpi_id/RPi/id:rpi_name"
# for the display of RPi id, we link to a foreign table that is called RPi
# we use the rpi_id foreign key on this table, to locate the id on the foreign table
# then we query for the field rpi_name
# TODO: DEFINE THE priKey and display text
#this primary key is used for rlisting/adding and mod.
rlist_priKey = "id"
rlist_dis = "MQTT Message Stack" #display for r routes
def get_onrecv(self):
# get the name of the process used on this msg
from pkg.msgapi.mqtt.models import MQTT_Sub
t = MQTT_Sub.query.filter( MQTT_Sub.id == self.tlink ).first()
if( t is not None ):
return t.onrecv
# TODO: CONSTRUCTOR DEFINES, PLEASE ADD IN ACCORDING TO COLUMNS
# the key in the insert_list must be the same as the column var name
def __init__(self,insert_list):
'''requirements in insert_list
@param tlink - link to the mqtt sub record
@param topic - the topic string (incase linking failed)
@param msg - the msg content'''
from pkg.msgapi.mqtt.models import MQTT_Sub
from pkg.system.servlog import srvlog
import datetime
from datetime import timedelta
# find links
self.tlink = r.checkNull( insert_list, "tlink")
self.topic = insert_list["topic"]
self.msg = insert_list["msg"]
self.timev0 = datetime.datetime.now()
self.pflag0 = insert_list["pflag0"]
self.pflag1 = insert_list["pflag1"]
submaster = MQTT_Sub.query.filter( MQTT_Sub.id == self.tlink ).first()
if(submaster is not None):
if( submaster.stordur is None):
self.timed0 = None #store forever
else:
self.timed0 = self.timev0 + timedelta( seconds= submaster.stordur)
self.delonproc = submaster.delonproc #inherits from the topic master
else:
srvlog["oper"].warning("MQTT message added to unknown link topic:"+self.topic+
" id="+int(self.tlink))
self.timed0 = r.lim.DEF_MQTT_MSGST_DURA
self.delonproc = True
def default_add_action(self):
# This will be run when the table is added via r-add
# may do some imports here i.e (from pkg.database.fsqlite import db_session)
# TODO add a MQTT restart function here
pass
def default_mod_action(self):
# This will be run when the table is added modified via r-mod
# may do some imports here i.e (from pkg.database.fsqlite import db_session)
pass
def default_del_action(self):
# This will be run when the table is deleted
# may do some imports here i.e (from pkg.database.fsqlite import db_session)
pass
######################################################################################################
| 46.172131 | 110 | 0.607314 | 5,299 | 0.940707 | 0 | 0 | 0 | 0 | 0 | 0 | 3,236 | 0.574472 |
d81c2a498b63c4f8d2b42b07b7d7874f56faeea8 | 799 | py | Python | Raspberry_Pi_Animated_Gif_Player/code.py | albinger/Adafruit_Learning_System_Guides | 4fe2da261fe5d1ca282b86bd3b93ee1466346fa7 | [
"MIT"
] | null | null | null | Raspberry_Pi_Animated_Gif_Player/code.py | albinger/Adafruit_Learning_System_Guides | 4fe2da261fe5d1ca282b86bd3b93ee1466346fa7 | [
"MIT"
] | null | null | null | Raspberry_Pi_Animated_Gif_Player/code.py | albinger/Adafruit_Learning_System_Guides | 4fe2da261fe5d1ca282b86bd3b93ee1466346fa7 | [
"MIT"
] | null | null | null | # SPDX-FileCopyrightText: 2021 Melissa LeBlanc-Williams for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import usb_cdc
import rotaryio
import board
import digitalio
serial = usb_cdc.data
encoder = rotaryio.IncrementalEncoder(board.ROTA, board.ROTB)
button = digitalio.DigitalInOut(board.SWITCH)
button.switch_to_input(pull=digitalio.Pull.UP)
last_position = None
button_state = False
while True:
position = encoder.position
if last_position is None or position != last_position:
serial.write(bytes(str(position) + ",", "utf-8"))
last_position = position
print(button.value)
if not button.value and not button_state:
button_state = True
if button.value and button_state:
serial.write(bytes("click,", "utf-8"))
button_state = False
| 27.551724 | 79 | 0.737171 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 135 | 0.168961 |
d81d21cec7557d8a1961e62b91b95e81583ae2fb | 2,753 | py | Python | test.py | bing1100/DeepLabV3Plus-Pytorch | 8267a8f1f2cfbcb58b8884c1bc48d09b04ffd795 | [
"MIT"
] | null | null | null | test.py | bing1100/DeepLabV3Plus-Pytorch | 8267a8f1f2cfbcb58b8884c1bc48d09b04ffd795 | [
"MIT"
] | null | null | null | test.py | bing1100/DeepLabV3Plus-Pytorch | 8267a8f1f2cfbcb58b8884c1bc48d09b04ffd795 | [
"MIT"
] | null | null | null | import fiona
import rasterio
import rasterio.plot
import matplotlib as mpl
import matplotlib.pyplot
from descartes import PolygonPatch
from shapely.geometry import LineString
import numpy as np
import sys
from multiprocessing import Pool
np.set_printoptions(threshold=np.inf)
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
import math
import pickle
from multiprocessing import Pool
from skimage import io
ROADS = "/media/bhux/ssd/oilwell/deeplab_data/"
SAVE_LOC = "/media/bhux/ssd/oilwell/deeplab_data/"
FILELOCATION = '/media/bhux/ssd/oilwell/images/'
LABELLOCATION = '/media/bhux/ssd/oilwell/labels/all/'
CENTERLOCATION = '/media/bhux/ssd/oilwell/labels/converted/'
SAVELOCATION = '/media/bhux/ssd/oilwell/deeplab_data/'
SAT = False
GT = True
# Normalize bands into 0.0 - 1.0 scale
def normalize(array):
array_min, array_max = array.min(), array.max()
return (array - array_min) / (array_max - array_min)
def func(i):
print("starting " + str(i))
fileName = FILELOCATION + str(i) + '.tif'
src = rasterio.open(fileName)
fig, ax = mpl.pyplot.subplots(1, figsize=(12,12))
mpl.pyplot.axis('off')
if SAT:
fir = src.read(5)
nir = src.read(4)
red = src.read(3)
green = src.read(2)
blue = src.read(1)
# Normalize band DN
fir_norm = normalize(fir)
nir_norm = normalize(nir)
red_norm = normalize(red)
green_norm = normalize(green)
blue_norm = normalize(blue)
# Stack bands
nrg = np.dstack((red_norm, green_norm, blue_norm))
mpl.pyplot.imshow(nrg)
saveName = SAVELOCATION + "/region_" + str(i) + "_sat_rgb.png"
fig.savefig(saveName, bbox_inches='tight', transparent=True, pad_inches=0)
# Stack bands
nrg = np.dstack((red_norm, nir_norm, fir_norm))
mpl.pyplot.imshow(nrg)
saveName = SAVELOCATION + "/region_" + str(i) + "_sat.png"
fig.savefig(saveName, bbox_inches='tight', transparent=True, pad_inches=0)
fig.clf()
mpl.pyplot.close()
def func1(idx):
print("Processing ", idx)
image = io.imread(SAVELOCATION + "/region_" + str(idx) + "_sat_rgb.png")
_ = plt.hist(image.ravel(), bins = 256, color = 'orange', )
_ = plt.hist(image[:, :, 0].ravel(), bins = 256, color = 'red', alpha = 0.5)
_ = plt.hist(image[:, :, 1].ravel(), bins = 256, color = 'Green', alpha = 0.5)
_ = plt.hist(image[:, :, 2].ravel(), bins = 256, color = 'Blue', alpha = 0.5)
_ = plt.xlabel('Intensity Value')
_ = plt.ylabel('Count')
_ = plt.legend(['Total', 'Red_Channel', 'Green_Channel', 'Blue_Channel'])
plt.show()
with Pool(12) as p:
p.map(func1, range(1))
| 31.284091 | 82 | 0.645478 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 529 | 0.192154 |
d81db9c64184b7bbb4f7d92a6d33f2986503acaf | 5,895 | py | Python | alveus/models/LayeredModel.py | levifussell/Alveus | 730f06d39dfd3f761cfecc4cc2834d79a11f3845 | [
"MIT"
] | 2 | 2018-04-14T19:04:00.000Z | 2019-03-22T23:11:32.000Z | alveus/models/LayeredModel.py | levifussell/alveus | 730f06d39dfd3f761cfecc4cc2834d79a11f3845 | [
"MIT"
] | null | null | null | alveus/models/LayeredModel.py | levifussell/alveus | 730f06d39dfd3f761cfecc4cc2834d79a11f3845 | [
"MIT"
] | null | null | null | import numpy as np
from ..layers.Layer import LayerTrainable
class LayeredModel(object):
def __init__(self, layers):
"""
layers : a list of layers. Treated as a feed-forward model
"""
assert len(layers) > 0, "Model layers must be non-empty"
# check that the output of each layer is the same size as the input of
# the next layer
#for l1, l2 in zip(layers[:-1], layers[1:]):
# print(l1.output_size, l2.input_size)
for l1, l2 in zip(layers[:-1], layers[1:]):
#print(l1,l2)
#print(l1.output_size,l2.input_size)
assert l1.output_size == l2.input_size, "layers do not match input to output in the model"
self.layers = layers
def reset(self):
for l in self.layers:
l.reset()
def forward(self, x, end_layer=None):
"""
x : data to push through the network
end_layer : the layer to stop the forward movement of the data. Used for training. (default=None)
"""
x = x.squeeze()
assert (self.layers[0].input_size == 1 and x.shape == ()) or len(x) == self.layers[0].input_size, "unexpected input dimensionality (check bias)"
# if an end layer has not been named, feedforward the entire model
if end_layer is None:
f_layers = self.layers
else:
f_layers = self.layers[:end_layer]
# for l in f_layers:
# x = np.array(l.forward(x))
for l in f_layers:
#print(l.info())
x = l.forward(x)
return x
def train(self, X, y, warmup_timesteps=100, data_repeats=1):
"""
x : input data to train on
y : output data to train on
warmup_timesteps : number of timesteps to run the data before training (default=100)
"""
assert isinstance(self.layers[-1], LayerTrainable), "This model cannot be trained because the final layer of type {} is not trainable".format(type(self.layers[-1]))
# TODO: for now we assume ONLY the last layer can be trained
# warmup stage
# for x in X[:warmup_timesteps]:
# # some function that allows us to display
# self.display()
# _ = self.forward(x, len(self.layers)-1)
# # training stage
# y_forward = np.zeros((np.shape(X[warmup_timesteps:])[0],
# self.layers[-1].input_size))
# for idx, x in enumerate(X[warmup_timesteps:]):
# # some function that allows us to display
# self.display()
# y_p = self.forward(x, len(self.layers)-1)
# y_forward[idx, :] = y_p
# y_nonwarmup = y[warmup_timesteps:]
y_forward = np.zeros((np.shape(X)[0] - data_repeats*warmup_timesteps,
self.layers[-1].input_size))
y_nonwarmup = np.zeros((np.shape(y)[0] - data_repeats*warmup_timesteps,
np.shape(y)[1]))
y_idx = 0
data_rate = np.shape(X)[0] / data_repeats
# print(data_rate)
# print(X[:10])
# print(X[data_rate:(data_rate+10)])
for idx,x in enumerate(X):
# some function that allows us to display
self.display()
# if idx % data_rate == 0:
# print(x)
# self.reset()
if idx % data_rate < warmup_timesteps:
_ = self.forward(x, len(self.layers)-1)
else:
y_p = self.forward(x, len(self.layers)-1)
y_forward[y_idx, :] = y_p
y_nonwarmup[y_idx, :] = y[idx, :]
y_idx += 1
# training stage
# y_forward = np.zeros((np.shape(X[warmup_timesteps:])[0],
# self.layers[-1].input_size))
# for idx, x in enumerate(X[warmup_timesteps:]):
# # some function that allows us to display
# self.display()
# y_p = self.forward(x, len(self.layers)-1)
# y_forward[idx, :] = y_p
# y_nonwarmup = y[warmup_timesteps:]
self.layers[-1].train(y_forward, y_nonwarmup)
def generate(self, x_data, count, reset_increment=-1, warmup_timesteps=0):
"""
Given a single datapoint, the model will feed this back into itself
to produce generative output data.
x_data : data to generate from (the first data point will be used unless reset_increment != -1)
count : number of times to run the generative process
reset_increment : how often to feed the generator the 'real' data value (default=-1 <= no reset)
"""
# y_outputs = []
y_outputs = np.zeros(count)
# x = np.array(x_data[0])
x = x_data[0]
for e in range(-warmup_timesteps, count, 1):
# some function that allows us to display
self.display()
# if we enable reseting, feed the 'real' data in (e == 0) is for warm-up swap
if e == 0 or (reset_increment != -1 and e % reset_increment == 0):
assert e < len(x_data), "generating data is less than the specified count"
x = x_data[e + warmup_timesteps]
# forward generating without 'warmup'
if e >= 0:
x = self.forward(x)
y_outputs[e] = x
x = np.hstack((x, 1))
# forward generating with 'warmup'
else:
_ = self.forward(x_data[e + warmup_timesteps])
# return np.array(y_outputs).squeeze()
return y_outputs.squeeze()
def get_output_size(self):
return self.layers[-1].output_size
def get_input_size(self):
return self.layers[0].input_size
def display(self):
pass
| 36.165644 | 172 | 0.547074 | 5,830 | 0.988974 | 0 | 0 | 0 | 0 | 0 | 0 | 2,882 | 0.488889 |
d81df0fb17b9790a2714ea5d69265b8729cbc1bc | 6,269 | py | Python | sandbox/02_v4l2_common_feed_pipes.py | Zalewa/voyandz | e5da27ea073bd69055021454aa020fb8fa77775a | [
"MIT"
] | null | null | null | sandbox/02_v4l2_common_feed_pipes.py | Zalewa/voyandz | e5da27ea073bd69055021454aa020fb8fa77775a | [
"MIT"
] | null | null | null | sandbox/02_v4l2_common_feed_pipes.py | Zalewa/voyandz | e5da27ea073bd69055021454aa020fb8fa77775a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from flask import Flask, send_file, make_response, Response, g, request, stream_with_context
from io import BytesIO
import atexit
import errno
import os
import subprocess
import threading
INPUT = '/dev/video0'
FFMPEG = "/home/test/ffmpeg-nvenc/ffmpeg"
app = Flask(__name__)
@app.route('/pic')
def pic():
cmd = [FFMPEG, '-s', 'uhd2160', '-i', INPUT,
'-vframes', '1', '-vcodec', 'png', '-f', 'image2pipe', '-']
app.logger.debug('exec: {}'.format(' '.join(cmd)))
p = subprocess.Popen(cmd, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
stdout, stderr = p.communicate()
ec = p.wait()
if ec == 0:
return send_file(BytesIO(stdout), mimetype="image/png")
else:
return make_response("<pre>{}</pre>".format(stderr.decode('utf-8', 'replace')), 500)
@app.route('/mpjpeg')
def mpjpeg():
cmd = [FFMPEG, '-s', 'uhd2160', '-i', INPUT,
'-f', 'mpjpeg', '-s', 'hd720',
'-qmin', '1', '-qmax', '6', '-r', '15', '-']
return Response(_stream(cmd), mimetype="multipart/x-mixed-replace;boundary=ffserver")
@app.route('/ts')
def ts():
cmd = [FFMPEG, '-s', 'uhd2160', '-i', INPUT,
'-f', 'mpegts', '-s', 'hd720',
'-vcodec', 'h264_nvenc', '-qp', '23',
'-g', '30', '-bf', '0', '-zerolatency', '1',
'-strict_gop', '1', '-sc_threshold', '0', '-']
return Response(_stream(cmd), mimetype="video/ts")
@atexit.register
def teardown(*args):
app.logger.debug('teardown')
app.logger.debug(global_ctx)
global_ctx.close()
def _stream(cmd):
app.logger.debug('stream: {}'.format(' '.join(cmd)))
def generate():
with global_ctx.feed(cmd) as feed:
rpipe = feed.new_reader()
try:
while True:
chunk = os.read(rpipe, 10240)
if not chunk:
break
yield chunk
finally:
os.close(rpipe)
return stream_with_context(generate())
class _GlobalContext:
def __init__(self):
app.logger.debug('_GlobalContext')
self._feeds = {}
self._feed_lock = threading.Lock()
def feed(self, cmd):
with self._feed_lock:
feed_id = ' '.join(cmd)
feed = self._feeds.get(feed_id)
if feed is None:
feed = _Feed(cmd)
self._feeds[feed_id] = feed
return feed
def close(self):
with self._feed_lock:
for feed in self._feeds.values():
feed._close()
self._feeds = {}
class _Feed:
def __init__(self, cmd):
self._acquired = 0
self._lock = threading.Lock()
self._process = None
self._rpipe = None
self._cmd = cmd
self._buffer = None
self._thread = None
self._closed = False
def new_reader(self):
app.logger.debug("feed new reader")
return self._buffer.new_reader()
def _open(self):
app.logger.debug("feed open")
self._closed = False
self._buffer = _MultiClientBuffer()
self._rpipe, wpipe = os.pipe()
try:
try:
self._process = subprocess.Popen(self._cmd, stdin=None, stdout=wpipe, stderr=subprocess.DEVNULL, close_fds=True)
finally:
os.close(wpipe)
thread = threading.Thread(target=self._buffer_loop)
thread.daemon = True
thread.start()
self._thread = thread
except:
if self._rpipe is not None:
os.close(self._rpipe)
self._rpipe = None
self._closed = True
raise
def _close(self):
app.logger.debug("feed close")
self._buffer.close()
self._closed = True
p = self._process
if p:
p.terminate()
try:
p.wait(1.0)
except subprocess.TimeoutExpired:
p.kill()
p.wait()
self._process = None
if self._rpipe:
os.close(self._rpipe)
self._rpipe = None
thread = self._thread
self._thread = None
if thread:
thread.join()
def _buffer_loop(self):
while not self._closed:
chunk = os.read(self._rpipe, 10240)
if not chunk:
break
self._buffer.write(chunk)
def __enter__(self):
with self._lock:
if self._acquired == 0:
self._open()
self._acquired += 1
app.logger.debug("feed enter {}".format(self._acquired))
return self
def __exit__(self, *args):
with self._lock:
app.logger.debug("feed exit {}".format(self._acquired))
self._acquired -= 1
if self._acquired <= 0:
self._close()
class _MultiClientBuffer:
def __init__(self):
self._pipes = []
self._pipes_lock = threading.Lock()
self._closed = False
def new_reader(self):
with self._pipes_lock:
if self._closed:
raise IOError(errno.EIO, "already closed")
rpipe, wpipe = os.pipe()
self._pipes.append((rpipe, wpipe))
return rpipe
def write(self, chunk):
if self._closed:
return
pipes_to_del = []
try:
with self._pipes_lock:
pipes = list(self._pipes)
for idx, (_, wpipe) in enumerate(pipes):
try:
os.write(wpipe, chunk)
except BrokenPipeError:
pipes_to_del.append(idx)
os.close(wpipe)
except Exception:
pipes_to_del = range(len(pipes))
raise
finally:
with self._pipes_lock:
for pipe_idx in reversed(pipes_to_del):
del self._pipes[pipe_idx]
def close(self):
with self._pipes_lock:
self._closed = True
for _, wpipe in self._pipes:
os.close(wpipe)
self._pipes = []
global_ctx = _GlobalContext()
| 28.756881 | 128 | 0.525762 | 4,169 | 0.665018 | 463 | 0.073855 | 1,282 | 0.204498 | 0 | 0 | 600 | 0.095709 |
d81e111335bb2b0eceb3190733e973e3515afc1a | 252 | py | Python | code-ch02/test_ne.py | kcalvinalvin/editedProgrammingBitcoin | 9680a92cbacdd226cd143fac46d935d00109c902 | [
"MIT"
] | null | null | null | code-ch02/test_ne.py | kcalvinalvin/editedProgrammingBitcoin | 9680a92cbacdd226cd143fac46d935d00109c902 | [
"MIT"
] | null | null | null | code-ch02/test_ne.py | kcalvinalvin/editedProgrammingBitcoin | 9680a92cbacdd226cd143fac46d935d00109c902 | [
"MIT"
] | null | null | null | from unittest import TestCase
from eccCh02 import Point
class PointTest(TestCase):
def test_ne(self):
a = Point(x=3, y=-7, a=5, b=7)
b = Point(x=18, y=77, a=5, b=7)
self.assertTrue(a != b)
self.assertFalse(a != a)
| 22.909091 | 39 | 0.583333 | 194 | 0.769841 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d81f398ba332a8ddc4a369fda6218312310af6ed | 33 | py | Python | test/regression/features/imports/fromImport.py | ppelleti/berp | 30925288376a6464695341445688be64ac6b2600 | [
"BSD-3-Clause"
] | 90 | 2015-02-03T23:56:30.000Z | 2022-02-10T03:55:32.000Z | test/regression/features/imports/fromImport.py | ppelleti/berp | 30925288376a6464695341445688be64ac6b2600 | [
"BSD-3-Clause"
] | 4 | 2015-04-01T13:49:13.000Z | 2019-07-09T19:28:56.000Z | test/regression/features/imports/fromImport.py | bjpop/berp | 30925288376a6464695341445688be64ac6b2600 | [
"BSD-3-Clause"
] | 8 | 2015-04-25T03:47:52.000Z | 2019-07-27T06:33:56.000Z | from DefinesX import x
print(x)
| 8.25 | 22 | 0.757576 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d81f6f0578f05c3ef2f5009ba322576a6f13de5b | 220 | py | Python | src/dht11.py | Brumawen/temperature | e8743c641961a5e5c512f24239ec4c7755db55be | [
"MIT"
] | null | null | null | src/dht11.py | Brumawen/temperature | e8743c641961a5e5c512f24239ec4c7755db55be | [
"MIT"
] | null | null | null | src/dht11.py | Brumawen/temperature | e8743c641961a5e5c512f24239ec4c7755db55be | [
"MIT"
] | null | null | null | import Adafruit_DHT
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, 17)
if humidity is not None and temperature is not None:
print(str(temperature) + "," + str(humidity))
else:
print('-1,-1') | 31.428571 | 71 | 0.731818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.045455 |
d82025e227ac9b101a17b6ebb0619fb765a8ed84 | 3,067 | py | Python | utils/losses/hybird.py | wufanyou/WRL-Agriculture-Vision | 5e1617d9b8b39dbf9e2cfccb2987b1dbb7fbd43a | [
"MIT"
] | 5 | 2021-06-15T06:06:20.000Z | 2022-03-18T09:19:29.000Z | utils/losses/hybird.py | wufanyou/WRL-Agriculture-Vision | 5e1617d9b8b39dbf9e2cfccb2987b1dbb7fbd43a | [
"MIT"
] | null | null | null | utils/losses/hybird.py | wufanyou/WRL-Agriculture-Vision | 5e1617d9b8b39dbf9e2cfccb2987b1dbb7fbd43a | [
"MIT"
] | null | null | null | import torch.nn as nn
from torch import Tensor
from typing import Optional
from .lovasz_loss import CustomizeLovaszLoss, LovaszLoss
from .binary_cross_entropy import (
MaskBinaryCrossEntropyIgnoreIndex,
MaskBinaryCrossEntropy,
)
from .dice_loss import CustomizeDiceLoss
from .jaccard import CustomiseJaccardLoss
from .focal_loss import CustomizeFocalLoss
__ALL__ = ["Hybird", "HybirdV3", "HybirdV4"]
class Hybird(nn.Module):
def __init__(self, l1: float = 1.0, weight: Optional = None, **kwargs):
super(Hybird, self).__init__()
self.BCE = MaskBinaryCrossEntropyIgnoreIndex(weight=weight)
self.lovasz_loss = CustomizeLovaszLoss()
self.l1 = l1
def forward(self, pred: Tensor, target: Tensor, mask: Tensor) -> Tensor:
N, C, W, H = pred.shape
mask = mask[:, None].expand([N, C, W, H])
target[mask == 0] = 255
loss = self.BCE(pred, target) + self.l1 * self.lovasz_loss(pred, target)
loss /= 1 + self.l1
return loss
class HybirdV3(nn.Module):
def __init__(self, l1: float = 1.0, weight: Optional = None, **kwargs):
super(HybirdV3, self).__init__()
self.lovasz_loss = LovaszLoss(mode="multiclass", ignore_index=255)
self.ce = nn.CrossEntropyLoss(ignore_index=255)
self.l1 = l1
def forward(self, pred: Tensor, target: Tensor, mask: Tensor) -> Tensor:
target = target.argmax(1)
target[mask == 0] = 255
loss = self.ce(pred, target) + self.l1 * self.lovasz_loss(pred, target)
loss /= 1 + self.l1
return loss
class HybirdV4(nn.Module):
def __init__(self, l1: float = 1.0, weight: Optional = None, **kwargs):
super(HybirdV4, self).__init__()
self.bce = MaskBinaryCrossEntropy(weight=weight)
self.jaccard = CustomiseJaccardLoss(**kwargs)
self.l1 = l1
def forward(self, pred: Tensor, target: Tensor, mask: Tensor) -> Tensor:
loss = self.bce(pred, target, mask) + self.l1 * self.jaccard(pred, target, mask)
loss /= 1 + self.l1
return loss
class HybirdV5(nn.Module):
def __init__(self, l1: float = 1.0, weight: Optional = None, **kwargs):
super(HybirdV5, self).__init__()
self.bce = MaskBinaryCrossEntropy(weight=weight)
self.dice = CustomizeDiceLoss(**kwargs)
self.l1 = l1
def forward(self, pred: Tensor, target: Tensor, mask: Tensor) -> Tensor:
loss = self.bce(pred, target, mask) + self.l1 * self.dice(pred, target, mask)
loss /= 1 + self.l1
return loss
class HybirdV6(nn.Module):
def __init__(self, l1: float = 1.0, weight: Optional = None, **kwargs):
super(HybirdV6, self).__init__()
self.focal = CustomizeFocalLoss(**kwargs)
self.jaccard = CustomiseJaccardLoss(**kwargs)
self.l1 = l1
def forward(self, pred: Tensor, target: Tensor, mask: Tensor) -> Tensor:
loss = self.focal(pred, target, mask) + self.l1 * self.jaccard(
pred, target, mask
)
loss /= 1 + self.l1
return loss
| 35.662791 | 88 | 0.638735 | 2,643 | 0.861754 | 0 | 0 | 0 | 0 | 0 | 0 | 40 | 0.013042 |
d820b4f0770506dfe9510b1820590790869fb745 | 247 | py | Python | apollo/embeds/__init__.py | rpetti/apollo | 1304d8623e6dfe8c9b269b7e90611b3688c0c61e | [
"MIT"
] | null | null | null | apollo/embeds/__init__.py | rpetti/apollo | 1304d8623e6dfe8c9b269b7e90611b3688c0c61e | [
"MIT"
] | null | null | null | apollo/embeds/__init__.py | rpetti/apollo | 1304d8623e6dfe8c9b269b7e90611b3688c0c61e | [
"MIT"
] | null | null | null | from .about_embed import AboutEmbed
from .event_embed import EventEmbed
from .help_embed import HelpEmbed
from .select_channel_embed import SelectChannelEmbed
from .start_time_embed import StartTimeEmbed
from .time_zone_embed import TimeZoneEmbed
| 35.285714 | 52 | 0.878543 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d820d8a909532943b52458c6fb765ad9ddb0d579 | 4,180 | py | Python | sapai/foods.py | clsandoval/sapai | 7d27e35e6554c62d3e0fa5a0a6377d1838a061e6 | [
"MIT"
] | null | null | null | sapai/foods.py | clsandoval/sapai | 7d27e35e6554c62d3e0fa5a0a6377d1838a061e6 | [
"MIT"
] | null | null | null | sapai/foods.py | clsandoval/sapai | 7d27e35e6554c62d3e0fa5a0a6377d1838a061e6 | [
"MIT"
] | null | null | null |
#%%
import numpy as np
from sapai.data import data
from sapai.rand import MockRandomState
#%%
class Food():
def __init__(self,
name="food-none",
shop=None,
team=[],
seed_state = None):
"""
Food class definition the types of interactions that food undergoes
"""
if len(name) != 0:
if not name.startswith("food-"):
name = "food-{}".format(name)
self.eaten = False
self.shop = shop
self.seed_state = seed_state
if self.seed_state != None:
self.rs = np.random.RandomState()
self.rs.set_state(self.seed_state)
else:
### Otherwise, set use
self.rs = MockRandomState()
self.attack = 0
self.health = 0
self.base_attack = 0
self.base_health = 0
self.status = "none"
self.effect = "none"
self.fd = {}
self.name = name
if name not in data["foods"]:
raise Exception("Food {} not found".format(name))
fd = data["foods"][name]["ability"]
self.fd = fd
self.attack = 0
self.health = 0
self.effect = fd["effect"]["kind"]
if "attackAmount" in fd["effect"]:
self.attack = fd["effect"]["attackAmount"]
self.base_attack = fd["effect"]["attackAmount"]
if "healthAmount" in fd["effect"]:
self.health = fd["effect"]["healthAmount"]
self.base_health = fd["effect"]["healthAmount"]
if "status" in fd["effect"]:
self.status = fd["effect"]["status"]
def apply(self, pet=None):
"""
Serve the food object to the input pet
"""
if self.eaten == True:
raise Exception("This should not be possible")
if self.name == "food-canned-food":
self.shop.can += self.attack
return
pet.attack += self.attack
pet.health += self.health
if self.effect == "ModifyStats":
### Done
return pet
elif self.effect == "ApplyStatus":
pet.status = self.status
def copy(self):
copy_food = Food(self.name, self.shop)
for key,value in self.__dict__.items():
### Although this approach will copy the internal dictionaries by
### reference rather than copy by value, these dictionaries will
### never be modified anyways.
### All integers and strings are copied by value automatically with
### Python, therefore, this achieves the correct behavior
copy_food.__dict__[key] = value
return copy_food
@property
def state(self):
#### Ensure that state can be JSON serialized
if getattr(self, "rs", False):
if type(self.rs).__name__ == "MockRandomState":
seed_state = None
else:
seed_state = list(self.rs.get_state())
seed_state[1] = seed_state[1].tolist()
else:
seed_state = None
state_dict = {
"type": "Food",
"name": self.name,
"eaten": self.eaten,
"attack": self.attack,
"health": self.health,
"seed_state": seed_state
}
return state_dict
@classmethod
def from_state(cls, state):
food = cls(name=state["name"])
food.attack = state["attack"]
food.health = state["health"]
food.eaten = state["eaten"],
### Supply seed_state in state dict should be optional
if "seed_state" in state:
if state["seed_state"] != None:
food.seed_state = state["seed_state"]
food.rs = np.random.RandomState()
food.rs.set_state(state["seed_state"])
return food
def __repr__(self):
return "< {} {}-{} {} >".format(
self.name, self.attack, self.health, self.status)
# %%
| 30.071942 | 79 | 0.510048 | 4,065 | 0.972488 | 0 | 0 | 1,163 | 0.27823 | 0 | 0 | 1,095 | 0.261962 |
d8215c6350572765b0f96735318eeda8369f7f6b | 459 | py | Python | git_lint_branch/single/__init__.py | juped/git-lint-branch | 7b4a89d2f707025671ec642919f83b38094f9300 | [
"0BSD"
] | 2 | 2020-11-12T03:34:38.000Z | 2021-02-20T01:34:00.000Z | git_lint_branch/single/__init__.py | juped/git-lint-branch | 7b4a89d2f707025671ec642919f83b38094f9300 | [
"0BSD"
] | 11 | 2020-07-13T18:38:05.000Z | 2020-07-17T14:44:52.000Z | git_lint_branch/single/__init__.py | MLH-Fellowship/git-lint-branch | 7b4a89d2f707025671ec642919f83b38094f9300 | [
"0BSD"
] | 1 | 2020-07-30T11:10:04.000Z | 2020-07-30T11:10:04.000Z | from pygit2 import Commit
from git_lint_branch.linter_output import *
from git_lint_branch.single.example_linter import *
from git_lint_branch.single.regex_linter import *
from git_lint_branch.single.diff_size_linter import diff_size_linter
from git_lint_branch.single.tense_linter import *
from git_lint_branch.single.backwards_merge_linter import *
single_linters = [
regex_linter,
diff_size_linter,
tense_linter,
backwards_merge_linter,
]
| 30.6 | 68 | 0.834423 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
d821891ab4fa48d558099ae464e5ae75e623dfe7 | 271 | py | Python | src/pyNonin/pynonin/__init__.py | Hammit/wais-pop | fdf1a7da38759d5d082b95c82dd883aa56df6816 | [
"MIT"
] | 1 | 2020-12-30T03:25:13.000Z | 2020-12-30T03:25:13.000Z | src/pyNonin/pynonin/__init__.py | Hammit/wais-pop | fdf1a7da38759d5d082b95c82dd883aa56df6816 | [
"MIT"
] | null | null | null | src/pyNonin/pynonin/__init__.py | Hammit/wais-pop | fdf1a7da38759d5d082b95c82dd883aa56df6816 | [
"MIT"
] | null | null | null | """
pyNonin package initalization file
(c) Charles Fracchia 2013
charlesfracchia@gmail.com
Permission granted for experimental and personal use;
license for commercial sale available from the author.
"""
#Import main Device base class
from pynonin.packet import Packet
| 22.583333 | 54 | 0.811808 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.867159 |
d8224ebaa753522d6abba555327e1a47576b6f4d | 6,004 | py | Python | ltcl/modules/birnn.py | anonymous-authors-iclr2022-481/ltcl | 0d8902228fa6c37f875bb60c4d16988462a9655a | [
"MIT"
] | 8 | 2021-10-16T08:35:37.000Z | 2022-02-10T09:25:50.000Z | leap/modules/birnn.py | weirayao/leap | 8d10b8413d02d3be49d5c02a13a0aa60a741d8da | [
"MIT"
] | null | null | null | leap/modules/birnn.py | weirayao/leap | 8d10b8413d02d3be49d5c02a13a0aa60a741d8da | [
"MIT"
] | 1 | 2021-11-30T04:06:43.000Z | 2021-11-30T04:06:43.000Z | # Require: input_dim, z_dim, hidden_dim, lag
# Input: {f_i}_i=1^T: [BS, len=T, dim=8]
# Output: {z_i}_i=1^T: [BS, len=T, dim=8]
# Bidirectional GRU/LSTM (1 layer)
# Sequential sampling & reparameterization
import pyro
import torch
import ipdb as pdb
import numpy as np
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import pyro.distributions as dist
from collections import defaultdict
from torch.autograd import Variable
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
def normal_init(m, mean, std):
if isinstance(m, (nn.Linear, nn.Conv2d)):
m.weight.data.normal_(mean, std)
if m.bias.data is not None:
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
m.weight.data.fill_(1)
if m.bias.data is not None:
m.bias.data.zero_()
class Inference_Net(nn.Module):
def __init__(self, input_dim=8, z_dim=8, hidden_dim=128, lag=2):
super(Inference_Net, self).__init__()
self.lag = lag
self.z_dim = z_dim
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.lstm = nn.LSTM(z_dim, z_dim, num_layers=1, batch_first=True, bidirectional=True)
self.gru = nn.GRU(z_dim, z_dim, num_layers=1, batch_first=True, bidirectional=True)
'''
# 1. encoder & decoder (weiran parts)
# input: {xi}_{i=1}^T; output: {fi}_{i=1}^T
# input: {zi}_{i=1}^T; output: {recon_xi}_{i=1}^T
self.encoder = nn.Sequential(
nn.Linear(input_dim, hidden_dim),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim, z_dim),
nn.LeakyReLU(0.2)
)
self.decoder = nn.Sequential(
nn.LeakyReLU(0.2),
nn.Linear(z_dim, hidden_dim),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim, input_dim)
)
'''
self.mu_sample = nn.Sequential(
nn.Linear(3*z_dim, hidden_dim),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim, z_dim),
nn.LeakyReLU(0.2)
)
self.var_sample = nn.Sequential(
nn.Linear(3*z_dim, hidden_dim),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim, hidden_dim),
nn.LeakyReLU(0.2),
nn.Linear(hidden_dim, z_dim),
nn.Softmax(0.2),
)
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def sample_latent(self, mu, sigma, sample):
if sample:
std = sigma.div(2).exp()
eps = Variable(std.data.new(std.size()).normal_())
latent = mu + eps*std
return latent
else:
return mu.contiguous()
def forward(self, ft, sample=True):
'''
## encoder (weiran part)
# input: xt(batch, seq_len, z_dim)
# output: ft(seq_len, batch, z_dim)
_, length, _ = xt.shape
ft = self.encoder(xt.view(-1, self.z_dim))
ft = ft.view(-1, length, self.z_dim)
'''
## bidirectional lstm/gru
# input: ft(seq_len, batch, z_dim)
# output: beta(batch, seq_len, z_dim)
hidden = None
beta, hidden = self.lstm(ft, hidden)
# beta, hidden = self.gru(ft, hidden)
## sequential sampling & reparametrization
## transition: p(zt|z_tau)
latent = []; mu = []; sigma = []
init = torch.zeros(beta.shape)
for i in range(self.lag):
latent.append(init[:,i,:])
for i in range(beta.shape[1]):
mid = torch.cat([latent[-self.lag], latent[-self.lag+1]], dim=1)
for j in range(1, self.lag-1): # assert self.lag > 1
mid = torch.cat([mid, latent[-self.lag+j+1]], dim=1)
input = torch.cat([mid, beta[:,i,:]], dim=1)
mut = self.mu_sample(input)
sigmat = self.var_sample(input)
latentt = self.sample_latent(mut, sigmat, sample)
latent.append(latentt)
mu.append(mut); sigma.append(sigmat)
latent = torch.squeeze(torch.stack(latent, dim=1))
mu = torch.squeeze(torch.stack(mu, dim=1))
sigma = torch.squeeze(torch.stack(sigma, dim=1))
'''
## decoder (weiran part)
# input: latent(batch, seq_len, z_dim)
# output: recon_xt(batch, seq_len, z_dim)
recon_xt = self.decoder(latent.view(-1, self.z_dim))
recon_xt = recon_xt.view(-1, length, self.z_dim)
'''
return latent, mu, sigma
| 38.987013 | 93 | 0.485676 | 4,874 | 0.811792 | 0 | 0 | 0 | 0 | 0 | 0 | 2,186 | 0.364091 |
d823ea6e9ab0742d603b2efd793e728e192e4ac9 | 4,103 | py | Python | examples/kalman/gnss_kf.py | karlamnordstrom/laika3 | a3fc4d6a6292cd0862451f67ad3f8db8f9d701de | [
"MIT"
] | 3 | 2019-01-03T04:44:05.000Z | 2019-04-20T06:39:19.000Z | examples/kalman/gnss_kf.py | karlamnordstrom/laika3 | a3fc4d6a6292cd0862451f67ad3f8db8f9d701de | [
"MIT"
] | null | null | null | examples/kalman/gnss_kf.py | karlamnordstrom/laika3 | a3fc4d6a6292cd0862451f67ad3f8db8f9d701de | [
"MIT"
] | 1 | 2018-12-23T18:01:37.000Z | 2018-12-23T18:01:37.000Z | #!/usr/bin/env python
import numpy as np
from kalman_helpers import ObservationKind
from ekf_sym import EKF_sym
from laika.raw_gnss import GNSSMeasurement
def parse_prr(m):
sat_pos_vel_i = np.concatenate((m[GNSSMeasurement.SAT_POS],
m[GNSSMeasurement.SAT_VEL]))
R_i = np.atleast_2d(m[GNSSMeasurement.PRR_STD]**2)
z_i = m[GNSSMeasurement.PRR]
return z_i, R_i, sat_pos_vel_i
def parse_pr(m):
pseudorange = m[GNSSMeasurement.PR]
pseudorange_stdev = m[GNSSMeasurement.PR_STD]
sat_pos_freq_i = np.concatenate((m[GNSSMeasurement.SAT_POS],
np.array([m[GNSSMeasurement.GLONASS_FREQ]])))
z_i = np.atleast_1d(pseudorange)
R_i = np.atleast_2d(pseudorange_stdev**2)
return z_i, R_i, sat_pos_freq_i
class States(object):
ECEF_POS = slice(0,3) # x, y and z in ECEF in meters
ECEF_VELOCITY = slice(3,6)
CLOCK_BIAS = slice(6, 7) # clock bias in light-meters,
CLOCK_DRIFT = slice(7, 8) # clock drift in light-meters/s,
CLOCK_ACCELERATION = slice(8, 9) # clock acceleration in light-meters/s**2
GLONASS_BIAS = slice(9, 10) # clock drift in light-meters/s,
GLONASS_FREQ_SLOPE = slice(10, 11) # GLONASS bias in m expressed as bias + freq_num*freq_slope
class GNSSKalman(object):
def __init__(self, N=0, max_tracks=3000):
x_initial = np.array([-2712700.6008, -4281600.6679, 3859300.1830,
0, 0, 0,
0, 0, 0,
0, 0])
# state covariance
P_initial = np.diag([10000**2, 10000**2, 10000**2,
10**2, 10**2, 10**2,
(2000000)**2, (100)**2, (0.5)**2,
(10)**2, (1)**2])
# process noise
Q = np.diag([0.3**2, 0.3**2, 0.3**2,
3**2, 3**2, 3**2,
(.1)**2, (0)**2, (0.01)**2,
.1**2, (.01)**2])
self.dim_state = x_initial.shape[0]
# mahalanobis outlier rejection
maha_test_kinds = []#ObservationKind.PSEUDORANGE_RATE, ObservationKind.PSEUDORANGE, ObservationKind.PSEUDORANGE_GLONASS]
name = 'gnss'
# init filter
self.filter = EKF_sym(name, Q, x_initial, P_initial, self.dim_state, self.dim_state, maha_test_kinds=maha_test_kinds)
@property
def x(self):
return self.filter.state()
@property
def P(self):
return self.filter.covs()
def predict(self, t):
return self.filter.predict(t)
def rts_smooth(self, estimates):
return self.filter.rts_smooth(estimates, norm_quats=False)
def init_state(self, state, covs_diag=None, covs=None, filter_time=None):
if covs_diag is not None:
P = np.diag(covs_diag)
elif covs is not None:
P = covs
else:
P = self.filter.covs()
self.filter.init_state(state, P, filter_time)
def predict_and_observe(self, t, kind, data):
if len(data) > 0:
data = np.atleast_2d(data)
if kind == ObservationKind.PSEUDORANGE_GPS or kind == ObservationKind.PSEUDORANGE_GLONASS:
r = self.predict_and_update_pseudorange(data, t, kind)
elif kind == ObservationKind.PSEUDORANGE_RATE_GPS or kind == ObservationKind.PSEUDORANGE_RATE_GLONASS:
r = self.predict_and_update_pseudorange_rate(data, t, kind)
return r
def predict_and_update_pseudorange(self, meas, t, kind):
R = np.zeros((len(meas), 1, 1))
sat_pos_freq = np.zeros((len(meas), 4))
z = np.zeros((len(meas), 1))
for i, m in enumerate(meas):
z_i, R_i, sat_pos_freq_i = parse_pr(m)
sat_pos_freq[i,:] = sat_pos_freq_i
z[i,:] = z_i
R[i,:,:] = R_i
return self.filter.predict_and_update_batch(t, kind, z, R, sat_pos_freq)
def predict_and_update_pseudorange_rate(self, meas, t, kind):
R = np.zeros((len(meas), 1, 1))
z = np.zeros((len(meas), 1))
sat_pos_vel = np.zeros((len(meas), 6))
for i, m in enumerate(meas):
z_i, R_i, sat_pos_vel_i = parse_prr(m)
sat_pos_vel[i] = sat_pos_vel_i
R[i,:,:] = R_i
z[i, :] = z_i
return self.filter.predict_and_update_batch(t, kind, z, R, sat_pos_vel)
if __name__ == "__main__":
GNSSKalman()
| 33.909091 | 124 | 0.635632 | 3,274 | 0.797953 | 0 | 0 | 109 | 0.026566 | 0 | 0 | 437 | 0.106507 |
d823f16bddcc63dc00f694bd5e520729549077ef | 3,728 | py | Python | lib/googlecloudsdk/api_lib/dataproc/poller/batch_poller.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/googlecloudsdk/api_lib/dataproc/poller/batch_poller.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/googlecloudsdk/api_lib/dataproc/poller/batch_poller.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Waiter utility for api_lib.util.waiter.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from apitools.base.py import exceptions as apitools_exceptions
from googlecloudsdk.api_lib.dataproc import exceptions
from googlecloudsdk.api_lib.dataproc import util
from googlecloudsdk.api_lib.dataproc.poller import (
abstract_operation_streamer_poller as dataproc_poller_base)
from googlecloudsdk.core import log
class BatchPoller(dataproc_poller_base.AbstractOperationStreamerPoller):
"""Poller for batch workload."""
def IsDone(self, batch):
"""See base class."""
if batch and batch.state in (
self.dataproc.messages.Batch.StateValueValuesEnum.SUCCEEDED,
self.dataproc.messages.Batch.StateValueValuesEnum.CANCELLED,
self.dataproc.messages.Batch.StateValueValuesEnum.FAILED):
return True
return False
def Poll(self, batch_ref):
"""See base class."""
request = (
self.dataproc.messages.DataprocProjectsLocationsBatchesGetRequest(
name=batch_ref))
try:
return self.dataproc.client.projects_locations_batches.Get(request)
except apitools_exceptions.HttpError as error:
log.warning('Get Batch failed:\n{}'.format(error))
if util.IsClientHttpException(error):
# Stop polling if encounter client Http error (4xx).
raise
def _GetResult(self, batch):
"""Handles errors.
Error handling for batch jobs. This happen after the batch reaches one of
the complete states.
Overrides.
Args:
batch: The batch resource.
Returns:
None. The result is directly output to log.err.
Raises:
JobTimeoutError: When waiter timed out.
JobError: When remote batch job is failed.
"""
if not batch:
# Batch resource is None but polling is considered done.
# This only happens when the waiter timed out.
raise exceptions.JobTimeoutError(
'Timed out while waiting for batch job.')
if (batch.state ==
self.dataproc.messages.Batch.StateValueValuesEnum.SUCCEEDED):
if not self.driver_log_streamer:
log.warning('Expected batch job output not found.')
elif self.driver_log_streamer.open:
# Remote output didn't end correctly.
log.warning('Batch job terminated, but output did not finish '
'streaming.')
elif (batch.state ==
self.dataproc.messages.Batch.StateValueValuesEnum.CANCELLED):
log.warning('Batch job is CANCELLED.')
else:
err_message = 'Batch job is FAILED.'
if batch.stateMessage:
err_message = '{} Detail: {}'.format(err_message, batch.stateMessage)
if err_message[-1] != '.':
err_message += '.'
raise exceptions.JobError(err_message)
# Nothing to return, since the result is directly output to users.
return None
def _GetOutputUri(self, batch):
"""See base class."""
if batch and batch.runtimeInfo and batch.runtimeInfo.outputUri:
return batch.runtimeInfo.outputUri
return None
| 35.169811 | 77 | 0.7103 | 2,621 | 0.703058 | 0 | 0 | 0 | 0 | 0 | 0 | 1,604 | 0.430258 |
d82729ab5767decffbc8b4f001723f7af4528444 | 3,107 | py | Python | 3.1 Prim Minimum Spanning Tree using Brute Force.py | INOS-soft/MOmmentum-SECList | 779db12933a5c351c3a5f3a3bc70d5f122033aba | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | null | null | null | 3.1 Prim Minimum Spanning Tree using Brute Force.py | INOS-soft/MOmmentum-SECList | 779db12933a5c351c3a5f3a3bc70d5f122033aba | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | null | null | null | 3.1 Prim Minimum Spanning Tree using Brute Force.py | INOS-soft/MOmmentum-SECList | 779db12933a5c351c3a5f3a3bc70d5f122033aba | [
"BSD-3-Clause",
"Apache-2.0",
"MIT"
] | 1 | 2021-04-20T18:57:55.000Z | 2021-04-20T18:57:55.000Z | """
3.Question 3
In this programming problem you'll code up Prim's minimum spanning tree algorithm.
This file (edges.txt) describes an undirected graph with integer edge costs. It has the format
[number_of_nodes] [number_of_edges]
[one_node_of_edge_1] [other_node_of_edge_1] [edge_1_cost]
[one_node_of_edge_2] [other_node_of_edge_2] [edge_2_cost]
...
For example, the third line of the file is "2 3 -8874", indicating that there is an edge connecting vertex #2 and vertex #3 that has cost -8874.
You should NOT assume that edge costs are positive, nor should you assume that they are distinct.
Your task is to run Prim's minimum spanning tree algorithm on this graph. You should report the overall cost of a minimum spanning tree --- an integer, which may or may not be negative --- in the box below.
IMPLEMENTATION NOTES: This graph is small enough that the straightforward O(mn) time implementation of Prim's algorithm should work fine. OPTIONAL: For those of you seeking an additional challenge, try implementing a heap-based version. The simpler approach, which should already give you a healthy speed-up, is to maintain relevant edges in a heap (with keys = edge costs). The superior approach stores the unprocessed vertices in the heap, as described in lecture. Note this requires a heap that supports deletions, and you'll probably need to maintain some kind of mapping between vertices and their positions in the heap.
"""
class Node(object):
def __init__(self, index):
self.index = index
self.connections = []
def dataReader(filePath):
with open(filePath) as f:
data = f.readlines()
for index, item in enumerate(data):
if index == 0:
numNodes, numEdges = list(map(int, item.split()))
nodes = [Node(index) for index in range(numNodes + 1)]
else:
node1, node2, cost = list(map(int, item.split()))
nodes[node1].connections.append((node2, cost))
nodes[node2].connections.append((node1, cost))
return numNodes, numEdges, nodes
def PRIM_minimumSpanningTree(nodes):
totalCost = 0
visited = [False] * len(nodes)
visitedNodes = []
# randomly choose starting node, here choose node 1
visited[1] = True
visitedNodes.append(nodes[1])
while len(visitedNodes) != len(nodes) - 1:
minCost = None
minNode = None
# using Brute Force to search the minimum cost
for node in visitedNodes:
for otherNodeIndex, otherCost in node.connections:
if not visited[otherNodeIndex] and (minCost == None or otherCost < minCost):
minCost = otherCost
minNode = nodes[otherNodeIndex]
if minNode:
visited[minNode.index] = True
visitedNodes.append(minNode)
totalCost += minCost
else:
break
if len(visitedNodes) == len(nodes) - 1:
print("The graph is connected.")
else:
print("The graph is not connected.")
return totalCost
def main():
filePath = "data/edges.txt"
numNodes, numEdges, nodes = dataReader(filePath)
totalCost = PRIM_minimumSpanningTree(nodes)
print("Total cost of MST: ", totalCost)
if __name__ == "__main__":
main()
| 34.142857 | 626 | 0.718378 | 95 | 0.030576 | 0 | 0 | 0 | 0 | 0 | 0 | 1,660 | 0.534277 |
d82a5e6a7cb6b7c55c3841490f1226e6b98ca874 | 2,108 | py | Python | base/vulcan_management/vulcan_agent.py | PeterStuck/teacher-app | e71c5b69019450a9ac8694fb461d343ce33e1b35 | [
"CC0-1.0"
] | null | null | null | base/vulcan_management/vulcan_agent.py | PeterStuck/teacher-app | e71c5b69019450a9ac8694fb461d343ce33e1b35 | [
"CC0-1.0"
] | null | null | null | base/vulcan_management/vulcan_agent.py | PeterStuck/teacher-app | e71c5b69019450a9ac8694fb461d343ce33e1b35 | [
"CC0-1.0"
] | null | null | null | from time import sleep
from selenium.common.exceptions import NoSuchElementException
from .vulcan_webdriver import VulcanWebdriver
class VulcanAgent:
""" Class to perform actions on Vulcan Uonet page """
def __init__(self, credentials: dict, vulcan_data = None):
self.driver = VulcanWebdriver()
self.driver.open_vulcan_page()
self.credentials = (credentials['email'], credentials['password'])
self.vd = vulcan_data
def go_to_lessons_menu(self):
self.login_into_service()
sleep(1)
self.__select_department()
sleep(1.5)
def login_into_service(self):
""" Login into Vulcan Uonet with passed credentials """
try:
self.driver.find_element_by_css_selector(".loginButton").click()
self.__send_credentials()
except NoSuchElementException as e:
print(e)
self.driver.execute_script("alert('#Error# Nie udało się znaleźć przycisku logowania.');")
def __send_credentials(self):
""" Pastes login data into fields on page and submit them """
try:
email_input = self.driver.find_element_by_css_selector("#LoginName")
email_input.send_keys(self.credentials[0])
pass_input = self.driver.find_element_by_css_selector("#Password")
pass_input.send_keys(self.credentials[1])
login_submit_btn = self.driver.find_element_by_xpath('//input[@value="Zaloguj się >"]')
login_submit_btn.click()
except NoSuchElementException as e:
print(e)
self.driver.execute_script(
"alert('#Error# Problem ze znalezieniem elementów lub wprowadzeniem danych do zalogowania.');")
def __select_department(self):
""" Selects department on main page """
try:
self.driver.find_element_by_xpath(f'//span[text()="{self.vd.department}"]/..').click()
except NoSuchElementException as e:
print(e)
self.driver.execute_script("alert('#Error# Problem ze znalezieniem podanego departamentu.');")
| 36.982456 | 111 | 0.6537 | 1,977 | 0.935194 | 0 | 0 | 0 | 0 | 0 | 0 | 566 | 0.267739 |
d82a7fa6110039063eb64f8069818904b8242eea | 695 | py | Python | shmeppytools/imageconverter/progress_bar.py | essarrjay/ShmeppyImagetoJSON | 45f12b817f2e9f0c162176493110a81a73cb327d | [
"MIT"
] | 4 | 2020-08-04T09:46:36.000Z | 2020-09-03T23:54:55.000Z | shmeppytools/imageconverter/progress_bar.py | essarrjay/ShmeppyImagetoJSON | 45f12b817f2e9f0c162176493110a81a73cb327d | [
"MIT"
] | 10 | 2020-08-04T03:03:46.000Z | 2020-08-29T03:44:46.000Z | shmeppytools/imageconverter/progress_bar.py | essarrjay/ShmeppyImagetoJSON | 45f12b817f2e9f0c162176493110a81a73cb327d | [
"MIT"
] | null | null | null | import sys
def progress_bar(it, prefix="", suffix="", width=60, file=sys.stdout):
"""An iterable-like obj for command line progress_bar
Usage:
for i in progress_bar(range(15), "Processing: ", "Part ", 40):
<some long running calculation>
Processing: [####################################] Part 16/16
"""
count = len(it)
def show(j):
x = int(width*(j)/count)
bar = "#"*x
remaining = "."*(width-x)
num = j
file.write(f"{prefix}[{bar}{remaining}]{suffix}{num}/{count}\r")
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
| 23.166667 | 72 | 0.516547 | 0 | 0 | 681 | 0.979856 | 0 | 0 | 0 | 0 | 314 | 0.451799 |
d82bde8f0fa19324bbaff372f3a7658cbcb864cb | 136 | py | Python | squarespiral.py | lstoomet/peeterscript | ef60163f6af98f8316c20a5fa55f3507f7ed895b | [
"Unlicense"
] | null | null | null | squarespiral.py | lstoomet/peeterscript | ef60163f6af98f8316c20a5fa55f3507f7ed895b | [
"Unlicense"
] | null | null | null | squarespiral.py | lstoomet/peeterscript | ef60163f6af98f8316c20a5fa55f3507f7ed895b | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import turtle
t = turtle.Pen()
for x in range(400):
t.forward(x)
t.left(90)
input("press enter to exit")
| 17 | 28 | 0.654412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.316176 |
d82c7d46da9c61535e8bf7de2dc34123809c1bd6 | 1,655 | py | Python | gitlab/tests/common.py | tanner-bruce/integrations-core | 36337b84fefb73e94d4f1ee28aaeb669dc12fb59 | [
"BSD-3-Clause"
] | null | null | null | gitlab/tests/common.py | tanner-bruce/integrations-core | 36337b84fefb73e94d4f1ee28aaeb669dc12fb59 | [
"BSD-3-Clause"
] | null | null | null | gitlab/tests/common.py | tanner-bruce/integrations-core | 36337b84fefb73e94d4f1ee28aaeb669dc12fb59 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import os
from datadog_checks.utils.common import get_docker_hostname
HERE = os.path.dirname(os.path.abspath(__file__))
# Networking
HOST = get_docker_hostname()
GITLAB_TEST_PASSWORD = "testroot"
GITLAB_LOCAL_PORT = 8086
GITLAB_LOCAL_PROMETHEUS_PORT = 8088
PROMETHEUS_ENDPOINT = "http://{}:{}/metrics".format(HOST, GITLAB_LOCAL_PROMETHEUS_PORT)
GITLAB_URL = "http://{}:{}".format(HOST, GITLAB_LOCAL_PORT)
GITLAB_TAGS = ['gitlab_host:{}'.format(HOST), 'gitlab_port:{}'.format(GITLAB_LOCAL_PORT)]
CUSTOM_TAGS = ['optional:tag1']
# Note that this is a subset of the ones defined in GitlabCheck
# When we stand up a clean test infrastructure some of those metrics might not
# be available yet, hence we validate a stable subset
ALLOWED_METRICS = [
'process_max_fds',
'process_open_fds',
'process_resident_memory_bytes',
'process_start_time_seconds',
'process_virtual_memory_bytes',
]
CONFIG = {
'init_config': {'allowed_metrics': ALLOWED_METRICS},
'instances': [
{
'prometheus_endpoint': PROMETHEUS_ENDPOINT,
'gitlab_url': GITLAB_URL,
'disable_ssl_validation': True,
'tags': list(CUSTOM_TAGS),
}
],
}
BAD_CONFIG = {
'init_config': {'allowed_metrics': ALLOWED_METRICS},
'instances': [
{
'prometheus_endpoint': 'http://{}:1234/metrics'.format(HOST),
'gitlab_url': 'http://{}:1234/ci'.format(HOST),
'disable_ssl_validation': True,
'tags': list(CUSTOM_TAGS),
}
],
}
| 28.534483 | 89 | 0.676133 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 786 | 0.474924 |
d82d9570e54e2426ab28a57e0502bef49ce33260 | 8,248 | py | Python | src/hyde/algorithm/analysis/satellite/hsaf/lib_ascat_analysis.py | c-hydro/hyde | 3a3ff92d442077ce353b071d5afe726fc5465201 | [
"MIT"
] | null | null | null | src/hyde/algorithm/analysis/satellite/hsaf/lib_ascat_analysis.py | c-hydro/hyde | 3a3ff92d442077ce353b071d5afe726fc5465201 | [
"MIT"
] | 18 | 2020-04-07T16:34:59.000Z | 2021-07-02T07:32:39.000Z | src/hyde/algorithm/analysis/satellite/hsaf/lib_ascat_analysis.py | c-hydro/fp-hyde | b0728397522aceebec3e7ff115aff160a10efede | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------------------
# Library
import tempfile
import rasterio
import numpy as np
from os import remove
from os.path import join, exists
from scipy.interpolate import griddata
from src.hyde.algorithm.utils.satellite.hsaf.lib_ascat_generic import random_string, exec_process
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to clip data 2D/3D using a min/max threshold(s) and assign a missing value
def clip_map(map, valid_range=[None, None], missing_value=None):
# Set variable valid range
if valid_range is not None:
if valid_range[0] is not None:
valid_range_min = float(valid_range[0])
else:
valid_range_min = None
if valid_range[1] is not None:
valid_range_max = float(valid_range[1])
else:
valid_range_max = None
# Set variable missing value
if missing_value is None:
missing_value_min = valid_range_min
missing_value_max = valid_range_max
else:
missing_value_min = missing_value
missing_value_max = missing_value
# Apply min and max condition(s)
if valid_range_min is not None:
map = map.where(map >= valid_range_min, missing_value_min)
if valid_range_max is not None:
map = map.where(map <= valid_range_max, missing_value_max)
return map
else:
return map
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create csv ancillary file
def create_file_csv(file_name_csv, var_data, var_name='values', lons_name='x', lats_name='y',
file_format='%10.4f', file_delimiter=','):
file_handle = open(file_name_csv, 'w')
file_handle.write(lons_name + ',' + lats_name + ',' + var_name + '\n')
np.savetxt(file_handle, var_data, fmt=file_format, delimiter=file_delimiter, newline='\n')
file_handle.close()
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to create vrt ancillary file
def create_file_vrt(file_name_vrt, file_name_csv, layer_name, var_name='values', lons_name='x', lats_name='y'):
file_handle = open(file_name_vrt, 'w')
file_handle.write('<OGRVRTDataSource>\n')
file_handle.write(' <OGRVRTLayer name="' + layer_name + '">\n')
file_handle.write(' <SrcDataSource>' + file_name_csv + '</SrcDataSource>\n')
file_handle.write(' <GeometryType>wkbPoint</GeometryType>\n')
file_handle.write(' <LayerSRS>WGS84</LayerSRS>\n')
file_handle.write(
' <GeometryField encoding="PointFromColumns" x="' +
lons_name + '" y="' + lats_name + '" z="' + var_name + '"/>\n')
file_handle.write(' </OGRVRTLayer>\n')
file_handle.write('</OGRVRTDataSource>\n')
file_handle.close()
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to interpolate data using scattered data
def interpolate_point2map(lons_in, lats_in, values_in, lons_out, lats_out,
interp_nodata=-9999,
interp_method='nearest', interp_option=None,
interp_radius_lon=None, interp_radius_lat=None,
values_tmp='values', lons_tmp='x', lats_tmp='y',
epsg_code=4326, folder_tmp=None):
# Define temporary folder
if folder_tmp is None:
folder_tmp = tempfile.mkdtemp()
# Define layer name (using a random string)
layer_tmp = random_string()
# Check interpolation radius x and y
if (interp_radius_lon is None) or (interp_radius_lat is None):
raise TypeError
# Define temporary file(s)
file_tmp_csv = join(folder_tmp, layer_tmp + '.csv')
file_tmp_vrt = join(folder_tmp, layer_tmp + '.vrt')
file_tmp_tiff = join(folder_tmp, layer_tmp + '.tif')
# Define geographical information
lon_out_min = min(lons_out.ravel())
lon_out_max = max(lons_out.ravel())
lat_out_min = min(lats_out.ravel())
lat_out_max = max(lats_out.ravel())
lon_out_cols = lons_out.shape[0]
lat_out_rows = lats_out.shape[1]
# Define dataset for interpolating function
data_in = np.zeros(shape=[values_in.shape[0], 3])
data_in[:, 0] = lons_in
data_in[:, 1] = lats_in
data_in[:, 2] = values_in
# Create csv file
create_file_csv(file_tmp_csv, data_in, values_tmp, lons_tmp, lats_tmp)
# Create vrt file
create_file_vrt(file_tmp_vrt, file_tmp_csv, layer_tmp, values_tmp, lons_tmp, lats_tmp)
# Grid option(s)
if interp_method == 'nearest':
if interp_option is None:
interp_option = ('-a nearest:radius1=' + str(interp_radius_lon) + ':radius2=' +
str(interp_radius_lat) + ':angle=0.0:nodata=' + str(interp_nodata))
elif interp_method == 'idw':
if interp_option is None:
interp_option = ('-a invdist:power=2.0:smoothing=0.0:radius1=' + str(interp_radius_lon) + ':radius2=' +
str(interp_radius_lat) + ':angle=0.0:nodata=' + str(interp_nodata))
else:
raise NotImplementedError
# Execute line command definition (using gdal_grid)
cmp_line = ('gdal_grid -zfield "' + values_tmp + '" -txe ' +
str(lon_out_min) + ' ' + str(lon_out_max) + ' -tye ' +
str(lat_out_min) + ' ' + str(lat_out_max) + ' -a_srs EPSG:' + str(epsg_code) + ' ' +
interp_option + ' -outsize ' + str(lat_out_rows) + ' ' + str(lon_out_cols) +
' -of GTiff -ot Float32 -l ' + layer_tmp + ' ' +
file_tmp_vrt + ' ' + file_tmp_tiff + ' --config GDAL_NUM_THREADS ALL_CPUS')
# Execute algorithm
[std_out, std_error, std_exit] = exec_process(command_line=cmp_line, command_path=folder_tmp)
# Read data in tiff format and get values
data_tmp = rasterio.open(file_tmp_tiff)
values_tmp = data_tmp.read()
# Image postprocessing to obtain 2d, south-north, east-west data
values_out = values_tmp[0, :, :]
values_out = np.flipud(values_out)
# Delete tmp file(s)
if exists(file_tmp_csv):
remove(file_tmp_csv)
if exists(file_tmp_vrt):
remove(file_tmp_vrt)
if exists(file_tmp_tiff):
remove(file_tmp_tiff)
return values_out
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to interpolate points to grid using gridded data
def interpolate_grid2map(lons_in, lats_in, values_in, lons_out, lats_out, nodata=-9999, interp_method='nearest'):
values_out = griddata((lons_in.ravel(), lats_in.ravel()), values_in.ravel(),
(lons_out, lats_out), method=interp_method,
fill_value=nodata)
return values_out
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to scale data using a mean-std scaling method
def mean_std(src_nrt, src_dr, ref_dr):
return ((src_nrt - np.mean(src_dr)) /
np.std(src_dr)) * np.std(ref_dr) + np.mean(ref_dr)
# -------------------------------------------------------------------------------------
# -------------------------------------------------------------------------------------
# Method to scale data using mix-max normalized scaling method
def norm_min_max(src, ref):
ref_min = np.min(ref) / 100
ref_max = np.max(ref) / 100
src = src / 100
norm_src = (src - ref_min) / (ref_max - ref_min) * 100
return norm_src
# -------------------------------------------------------------------------------------
| 40.431373 | 115 | 0.534433 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,012 | 0.365179 |
d82fafa5745811141dbc5de3b45d1ba61dddabe7 | 378 | py | Python | Dataset/Leetcode/valid/6/243.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/valid/6/243.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/valid/6/243.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution(object):
def XXX(self, s, numRows):
if numRows==1:
return s
res = ['' for _ in range(numRows)]
# 周期
T = numRows + numRows -2
for i in range(len(s)):
t_num = i%T
temp = t_num if t_num<numRows else numRows-(t_num)%numRows-2
res[temp] += s[i]
return ''.join(res)
| 27 | 72 | 0.486772 | 379 | 0.992147 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.031414 |
d83123081b0dcff8ed4a55f6cb8965f47011def8 | 515 | py | Python | projects/golem_e2e/tests/test_builder/add_action_to_teardown.py | kangchenwei/keyautotest2 | f980d46cabfc128b2099af3d33968f236923063f | [
"MIT"
] | null | null | null | projects/golem_e2e/tests/test_builder/add_action_to_teardown.py | kangchenwei/keyautotest2 | f980d46cabfc128b2099af3d33968f236923063f | [
"MIT"
] | null | null | null | projects/golem_e2e/tests/test_builder/add_action_to_teardown.py | kangchenwei/keyautotest2 | f980d46cabfc128b2099af3d33968f236923063f | [
"MIT"
] | null | null | null |
description = 'Verify the user can add an action to the teardown'
pages = ['common',
'index',
'tests',
'test_builder']
def setup(data):
common.access_golem(data.env.url, data.env.admin)
index.create_access_project('test')
common.navigate_menu('Tests')
tests.create_access_random_test()
def test(data):
test_builder.add_action('click', where='teardown')
test_builder.save_test()
refresh_page()
test_builder.verify_last_action('click', where='teardown')
| 25.75 | 65 | 0.683495 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 134 | 0.260194 |
d831754b8008aa90041fdf1bbc4b0e2c5b12649d | 9,829 | py | Python | src/previous/scaterplot.py | miaoli-psy/Psychophysics_exps | f27ab7027bb4890624fe003bb9459fd74d0bdb9c | [
"BSD-2-Clause"
] | null | null | null | src/previous/scaterplot.py | miaoli-psy/Psychophysics_exps | f27ab7027bb4890624fe003bb9459fd74d0bdb9c | [
"BSD-2-Clause"
] | null | null | null | src/previous/scaterplot.py | miaoli-psy/Psychophysics_exps | f27ab7027bb4890624fe003bb9459fd74d0bdb9c | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 23 19:43:56 2019
@author: MiaoLi
"""
#%%
import sys, os
import pandas as pd
# import seaborn as sns
# from shapely.geometry import Polygon, Point
sys.path.append('C:\\Users\\MiaoLi\\Desktop\\SCALab\\Programming\\crowdingnumerositygit\\GenerationAlgorithm\\VirtualEllipseFunc')
# import m_defineEllipses
import seaborn as sns
import matplotlib.pyplot as plt
# import scipy.stats
# import numpy as np
from sklearn import linear_model
from scipy import stats
#%%
# =============================================================================
# read differet sheets of the same excel file
# =============================================================================
# winsize0.6 crowding
totalC_N49_53 = pd.ExcelFile('../totalC_N49_53_scaterdata.xlsx')
totalC_N49_53_actualSize = pd.read_excel(totalC_N49_53, 'actualSize0.25_0.1')
totalC_N49_53_110 = pd.read_excel(totalC_N49_53, '110%0.275_0.11')
totalC_N49_53_120 = pd.read_excel(totalC_N49_53, '120%0.3_0.12')
totalC_N49_53_130 = pd.read_excel(totalC_N49_53, '130%0.325_0.13')
totalC_N49_53_140 = pd.read_excel(totalC_N49_53, '140%')
totalC_N49_53_150 = pd.read_excel(totalC_N49_53, '150%')
totalC_N49_53_160 = pd.read_excel(totalC_N49_53, '160%')
totalC_N49_53_170 = pd.read_excel(totalC_N49_53, '170%')
totalC_N49_53_180 = pd.read_excel(totalC_N49_53, '180%')
totalC_N49_53_190 = pd.read_excel(totalC_N49_53, '190%')
totalC_N49_53_200 = pd.read_excel(totalC_N49_53, '200%')
totalC_N49_53_210 = pd.read_excel(totalC_N49_53, '210%')
totalC_N49_53_220 = pd.read_excel(totalC_N49_53, '220%')
totalC_N49_53_230 = pd.read_excel(totalC_N49_53, '230%')
totalC_N49_53_240 = pd.read_excel(totalC_N49_53, '240%')
totalC_N49_53_250 = pd.read_excel(totalC_N49_53, '250%')
totalC_N49_53_260 = pd.read_excel(totalC_N49_53, '260%')
totalC_N49_53_270 = pd.read_excel(totalC_N49_53, '270%')
totalC_N49_53_280 = pd.read_excel(totalC_N49_53, '280%')
totalC_N49_53_290 = pd.read_excel(totalC_N49_53, '290%')
totalC_N49_53_300 = pd.read_excel(totalC_N49_53, '300%')
totalC_N49_53_310 = pd.read_excel(totalC_N49_53, '310%')
totalC_N49_53_320 = pd.read_excel(totalC_N49_53, '320%')
totalC_N49_53_330 = pd.read_excel(totalC_N49_53, '330%')
totalC_N49_53_340 = pd.read_excel(totalC_N49_53, '340%')
totalC_N49_53_350 = pd.read_excel(totalC_N49_53, '350%')
totalC_N49_53_360 = pd.read_excel(totalC_N49_53, '360%')
totalC_N49_53_370 = pd.read_excel(totalC_N49_53, '370%')
totalC_N49_53_380 = pd.read_excel(totalC_N49_53, '380%')
totalC_N49_53_390 = pd.read_excel(totalC_N49_53, '390%')
totalC_N49_53_400 = pd.read_excel(totalC_N49_53, '400%')
# winsize0.6 no-crowding
totalNC_N49_53 = pd.ExcelFile('../totalNC_N49_53_scaterdata.xlsx')
totalNC_N49_53_actualSize = pd.read_excel(totalNC_N49_53, 'actualSize')
totalNC_N49_53_110 = pd.read_excel(totalNC_N49_53, '110%')
totalNC_N49_53_120 = pd.read_excel(totalNC_N49_53, '120%')
totalNC_N49_53_130 = pd.read_excel(totalNC_N49_53, '130%')
totalNC_N49_53_140 = pd.read_excel(totalNC_N49_53, '140%')
totalNC_N49_53_150 = pd.read_excel(totalNC_N49_53, '150%')
totalNC_N49_53_160 = pd.read_excel(totalNC_N49_53, '160%')
totalNC_N49_53_170 = pd.read_excel(totalNC_N49_53, '170%')
totalNC_N49_53_180 = pd.read_excel(totalNC_N49_53, '180%')
totalNC_N49_53_190 = pd.read_excel(totalNC_N49_53, '190%')
totalNC_N49_53_200 = pd.read_excel(totalNC_N49_53, '200%')
totalNC_N49_53_210 = pd.read_excel(totalNC_N49_53, '210%')
totalNC_N49_53_220 = pd.read_excel(totalNC_N49_53, '220%')
totalNC_N49_53_230 = pd.read_excel(totalNC_N49_53, '230%')
totalNC_N49_53_240 = pd.read_excel(totalNC_N49_53, '240%')
totalNC_N49_53_250 = pd.read_excel(totalNC_N49_53, '250%')
totalNC_N49_53_260 = pd.read_excel(totalNC_N49_53, '260%')
totalNC_N49_53_270 = pd.read_excel(totalNC_N49_53, '270%')
totalNC_N49_53_280 = pd.read_excel(totalNC_N49_53, '280%')
totalNC_N49_53_290 = pd.read_excel(totalNC_N49_53, '290%')
totalNC_N49_53_300 = pd.read_excel(totalNC_N49_53, '300%')
totalNC_N49_53_310 = pd.read_excel(totalNC_N49_53, '310%')
totalNC_N49_53_320 = pd.read_excel(totalNC_N49_53, '320%')
totalNC_N49_53_330 = pd.read_excel(totalNC_N49_53, '330%')
totalNC_N49_53_340 = pd.read_excel(totalNC_N49_53, '340%')
totalNC_N49_53_350 = pd.read_excel(totalNC_N49_53, '350%')
totalNC_N49_53_360 = pd.read_excel(totalNC_N49_53, '360%')
totalNC_N49_53_370 = pd.read_excel(totalNC_N49_53, '370%')
totalNC_N49_53_380 = pd.read_excel(totalNC_N49_53, '380%')
totalNC_N49_53_390 = pd.read_excel(totalNC_N49_53, '390%')
totalNC_N49_53_400 = pd.read_excel(totalNC_N49_53, '400%')
# winsize0.7
totalC_N54_58 = pd.ExcelFile ('../totalC_N54_58_scaterdata.xlsx')
totalC_N54_58_actualSize = pd.read_excel(totalC_N54_58, 'actualSize')
totalC_N54_58_110 = pd.read_excel(totalC_N54_58, '110%')
totalC_N54_58_120 = pd.read_excel(totalC_N54_58, '120%')
totalC_N54_58_130 = pd.read_excel(totalC_N54_58, '130%')
totalC_N54_58_140 = pd.read_excel(totalC_N54_58, '140%')
totalC_N54_58_150 = pd.read_excel(totalC_N54_58, '150%')
totalC_N54_58_160 = pd.read_excel(totalC_N54_58, '160%')
totalC_N54_58_170 = pd.read_excel(totalC_N54_58, '170%')
totalC_N54_58_180 = pd.read_excel(totalC_N54_58, '180%')
totalC_N54_58_190 = pd.read_excel(totalC_N54_58, '190%')
totalC_N54_58_200 = pd.read_excel(totalC_N54_58, '200%')
# winsize0.7 no-crowding
totalNC_N54_58 = pd.ExcelFile('../totalNC_N54_58_scaterdata.xlsx')
totalNC_N54_58_actualSize = pd.read_excel(totalNC_N54_58, 'actualSize')
totalNC_N54_58_110 = pd.read_excel(totalNC_N54_58, '110%')
totalNC_N54_58_120 = pd.read_excel(totalNC_N54_58, '120%')
totalNC_N54_58_130 = pd.read_excel(totalNC_N54_58, '130%')
totalNC_N54_58_140 = pd.read_excel(totalNC_N54_58, '140%')
totalNC_N54_58_150 = pd.read_excel(totalNC_N54_58, '150%')
totalNC_N54_58_160 = pd.read_excel(totalNC_N54_58, '160%')
totalNC_N54_58_170 = pd.read_excel(totalNC_N54_58, '170%')
totalNC_N54_58_180 = pd.read_excel(totalNC_N54_58, '180%')
totalNC_N54_58_190 = pd.read_excel(totalNC_N54_58, '190%')
totalNC_N54_58_200 = pd.read_excel(totalNC_N54_58, '200%')
# =============================================================================
# Scatter plots
# =============================================================================
ellipseSize = '200'
ax = sns.stripplot(x='count_number10',y = 'deviation_score', data = totalC_N49_53_200, size = 8, jitter = 0.3,
alpha = 0.3, color = 'k', edgecolor = 'gray')
# ax.set(xscale = 'log', yscale = 'log')
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
# add corrosponing no-crowding average line
# ax.axhline(3.26, ls='--', color = 'lime',linewidth=5)
ax.set_xlabel('No. of discs in others crowding zones_ellipseSize%s' %(ellipseSize))
# ax.set_ylabel('')
# ax.set(xlim = (0, 16))
ax.set(ylim = (-20, 25))
sns.set(rc={'figure.figsize':(6,3)})
plt.savefig('../scaterplot06_c_%s.png' %(ellipseSize), dpi=200,bbox_inches = 'tight',pad_inches = 0)
#
# # myx = scaterP_07_data['Count_number'].to_numpy()
# # slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(x=scaterP_07_data['Count_number'][mask],y = scaterP_07_data['Deviation'][mask])
# reg = linear_model.LinearRegression()
# x = scaterP_07_data['Count_number'].values.reshape(-1,1)
# y = scaterP_07_data['Deviation'].values.reshape(-1,1)
# reg.fit(x,y)
# r = reg.coef_
# intercept = reg.intercept_
#%% =============================================================================
# regplot
# =============================================================================
ax_r = sns.regplot(x="count_number10", y="deviation_score", data=totalC_N49_53_200, x_jitter=0.5)
ax_r.spines['top'].set_visible(False)
ax_r.spines['right'].set_visible(False)
ax_r.set_xlabel('No. of discs in others crowding zones_ellipseSize%s' %(ellipseSize))
ax_r.set(ylim = (-20, 25))
# ax_r.set(xlim = (31, 55))
sns.set(rc={'figure.figsize':(6,3)})
# plt.savefig('../scaterplot06_c_%s.png' %(ellipseSize), dpi=200,bbox_inches = 'tight',pad_inches = 0)
#%%
# scaterP_06_data = pd.read_excel('scaterplot_raw06.xlsx')
# bx = sns.stripplot(x='Count_number',y = 'Deviation', data = scaterP_06_data, color = 'k', size = 8, jitter = 0.3,
# alpha = 0.3,edgecolor = 'gray')
# # bx = sns.regplot(x="Count_number", y="Deviation", data=scaterP_06_data, x_jitter = 0.3, color = 'k', y_jitter = 0.05)
# bx.spines['top'].set_visible(False)
# bx.spines['right'].set_visible(False)
# bx.axhline(3.902947846, ls ='--', color = 'lime', linewidth=5)
# bx.set_xlabel('')
# bx.set_ylabel('')
# # bx.set(xlim = (0, 16), xticks = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 14])
# bx.set(ylim = (-25, 25))
# plt.savefig('scaterplot06_1.png', dpi=200,bbox_inches = 'tight',pad_inches = 0)
#%%
# scaterP_both_data = pd.read_excel('scaterboth_raw.xlsx')
# ax = sns.stripplot(x='Count_number',y = 'Deviation', data = scaterP_both_data, size = 8, jitter = 0.3,
# alpha = 0.2, color = 'k', edgecolor = 'gray')
# ax.spines['top'].set_visible(False)
# ax.spines['right'].set_visible(False)
# ax.set_xlabel('')
# ax.set_ylabel('')
# # ax.set(xlim = (0, 16), xticks = [1, 3, 6, 7, 8, 9, 11, 15])
# ax.set(ylim = (-25,25))
# plt.savefig('scaterplotboth.png', dpi=200,bbox_inches = 'tight',pad_inches = 0) | 50.405128 | 152 | 0.665378 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,888 | 0.395564 |
d8319e6bc7fa01aaa7362ab0f4c1bc854e4660fc | 1,366 | py | Python | gumtree_spider.py | JemLukeBingham/used_car_scraper | 6c1502ee0639d769de98a6dee6f455149c26bd6a | [
"MIT"
] | 1 | 2021-05-11T14:23:21.000Z | 2021-05-11T14:23:21.000Z | gumtree_spider.py | JemLukeBingham/used_car_scraper | 6c1502ee0639d769de98a6dee6f455149c26bd6a | [
"MIT"
] | 7 | 2020-01-24T08:32:00.000Z | 2020-02-17T12:57:08.000Z | gumtree_spider.py | JemLukeBingham/used_car_scraper | 6c1502ee0639d769de98a6dee6f455149c26bd6a | [
"MIT"
] | null | null | null | import scrapy
from urllib.parse import urljoin
from text_formatting import format_mileage, format_year, format_price
class GumtreeSpider(scrapy.Spider):
name = 'gumtree'
base_url = 'https://www.gumtree.co.za/'
start_urls = [
urljoin(base_url, 's-cars-bakkies/v1c9077p1'),
]
def parse(self, response):
for result in response.xpath("//div[@class='view']/\
div[@id='srpAds']/\
div[@class='related-items']/\
div[@class='related-content']/\
div/div[@class='related-ad-content']"):
car = {}
price = result.xpath("div[@class='price']/span/span[@class='ad-price']/text()").get()
car['price'] = format_price(price)
car['description'] = result.xpath("div[@class='description-content']/\
span[@class='related-ad-description']/\
span[@class='description-text']/text()").get()
yield car
next_page = response.xpath("//div[@class='pagination-content']/span/a[@class=' icon-pagination-right']/@href").get()
if next_page is not None:
yield response.follow(urljoin(self.base_url, next_page), self.parse)
| 50.592593 | 125 | 0.522694 | 1,247 | 0.912884 | 1,062 | 0.777452 | 0 | 0 | 0 | 0 | 735 | 0.538067 |