hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
15e31514a9fcc45da82364eec547a0384aec823d | 225 | py | Python | Problem_20/main.py | jdalzatec/EulerProject | 2f2f4d9c009be7fd63bb229bb437ea75db77d891 | [
"MIT"
] | 1 | 2022-03-28T05:32:58.000Z | 2022-03-28T05:32:58.000Z | Problem_20/main.py | jdalzatec/EulerProject | 2f2f4d9c009be7fd63bb229bb437ea75db77d891 | [
"MIT"
] | null | null | null | Problem_20/main.py | jdalzatec/EulerProject | 2f2f4d9c009be7fd63bb229bb437ea75db77d891 | [
"MIT"
] | null | null | null | from functools import reduce
from math import factorial
if __name__ == '__main__':
main() | 18.75 | 59 | 0.635556 | from functools import reduce
from math import factorial
def main():
num = 100
value = factorial(num)
suma = reduce(lambda x, y: int(x) + int(y), str(value))
print(suma)
if __name__ == '__main__':
main() | 107 | 0 | 23 |
e962e1a7780ef210d97a0010b131b297ddef4442 | 782 | py | Python | hotel_test.py | joeryan/100days | 87ec10843018a43a8a93816df5f45e85521c1ac9 | [
"MIT"
] | null | null | null | hotel_test.py | joeryan/100days | 87ec10843018a43a8a93816df5f45e85521c1ac9 | [
"MIT"
] | null | null | null | hotel_test.py | joeryan/100days | 87ec10843018a43a8a93816df5f45e85521c1ac9 | [
"MIT"
] | null | null | null | import pytest
from hotel import Hotel
| 26.066667 | 53 | 0.716113 | import pytest
from hotel import Hotel
def test_check_in_a_guest():
hotel = Hotel()
hotel.check_in('Bob Barker', 302)
assert(('Bob Barker' in hotel.guests()) == True)
def test_does_not_accept_guest_in_occupied_room():
hotel = Hotel()
hotel.check_in('Bob Barker', 303)
assert(hotel.check_in('Roy Orbison', 303) == False)
def test_accepts_guest_into_unoccupied_room():
hotel = Hotel()
hotel.check_in('Bob Barker', 303)
assert(hotel.check_in('Roy Orbison', 305) == True)
def test_check_out_a_guest():
hotel = Hotel()
hotel.check_in('Bob Dylan', 306)
hotel.check_out('Bob Dylan')
assert(('Bob Dylan' in hotel.guests()) == False)
def test_check_out_a_guest_releases_room():
hotel = Hotel()
hotel.check_in('Jim Maui', 301)
hotel.check_out('Jim Maui')
| 628 | 0 | 115 |
17b6282c7573cb41d82adbd3f2e2fcbdd8890ba2 | 6,462 | py | Python | source/services/lex-bot/other_intents/test_help_intent.py | s3799570/P000075CSITCP | dcf9f388a22baffc99e01b445e5d95089a896113 | [
"Apache-2.0"
] | 3 | 2021-10-30T12:53:47.000Z | 2022-02-09T06:33:08.000Z | source/services/lex-bot/other_intents/test_help_intent.py | s3799570/P000075CSITCP | dcf9f388a22baffc99e01b445e5d95089a896113 | [
"Apache-2.0"
] | 10 | 2021-12-20T17:41:54.000Z | 2022-02-27T10:33:23.000Z | source/services/lex-bot/other_intents/test_help_intent.py | s3799570/P000075CSITCP | dcf9f388a22baffc99e01b445e5d95089a896113 | [
"Apache-2.0"
] | 3 | 2021-10-30T12:53:42.000Z | 2022-03-21T08:18:47.000Z | ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import os
from unittest import TestCase, mock
from mock import patch
import botocore
mock_env_variables = {
"botLanguage": "English",
"AWS_SDK_USER_AGENT": '{ "user_agent_extra": "AwsSolution/1234/1.6.0" }',
}
@patch.dict(os.environ, mock_env_variables)
| 52.536585 | 191 | 0.50325 | ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import os
from unittest import TestCase, mock
from mock import patch
import botocore
mock_env_variables = {
"botLanguage": "English",
"AWS_SDK_USER_AGENT": '{ "user_agent_extra": "AwsSolution/1234/1.6.0" }',
}
@patch.dict(os.environ, mock_env_variables)
class BookAppointmentIntentTest(TestCase):
def test_closing_response(self):
from other_intents.help_intent import closing_response
closing_response_value = {
"English": {"value": "Try 'What is your name', 'Weather Forecast', 'Leave Feedback', 'Order Pizza', or 'Book Appointment'"},
"French": {"value": "Essayez 'Quel est votre nom', 'Prévisions météo', 'Laisser les commentaires', 'Commander une pizza', ou 'Prendre rendez-vous'."},
"Italian": {"value": "Provare 'Qual è il tuo nome', 'Previsione del moto', 'lasciare un feedback', 'Ordina la pizza', o 'Fissa un appuntamento'."},
"Spanish": {"value": "Intentar 'Cual es tu nombre', 'Como esta el clima', 'Dejar un comentario', 'Quiero pizza', o 'Reservar una cita'."},
"German": {"value": "Versuchen 'Wie heißen Sie', 'Wettervorhersage', 'Hinterlasse ein Feedback', 'Pizza bestellen' oder, 'Einen Termin buchen'."},
}
response = closing_response("English")
self.assertEqual(response, closing_response_value["English"])
response = closing_response("French")
self.assertEqual(response, closing_response_value["French"])
response = closing_response("Spanish")
self.assertEqual(response, closing_response_value["Spanish"])
response = closing_response("Italian")
self.assertEqual(response, closing_response_value["Italian"])
response = closing_response("German")
self.assertEqual(response, closing_response_value["German"])
self.assertRaises(KeyError, closing_response, "invalidLanguage")
def test_utterances(self):
from other_intents.help_intent import utterances
utterance_values = {
"English": [
{"utterance": "help"},
{"utterance": "help me"},
{"utterance": "what do you know"},
{"utterance": "answer me something"},
],
"French": [
{"utterance": "aider"},
{"utterance": "aidez-moi"},
{"utterance": "ce que vous savez"},
{"utterance": "répondez-moi quelque chose"},
],
"Italian": [
{"utterance": "aiuto"},
{"utterance": "aiutami"},
{"utterance": "cosa sai"},
{"utterance": "rispondami qualcosa"},
],
"Spanish": [
{"utterance": "ayuda"},
{"utterance": "me ayuda"},
{"utterance": "lo que usted sabe"},
{"utterance": "me responda algo"},
],
"German": [
{"utterance": "hilfe"},
{"utterance": "hilf mir"},
{"utterance": "was weißt du"},
{"utterance": "antworte mir etwas"},
],
}
response = utterances("English")
self.assertEqual(response, utterance_values["English"])
response = utterances("French")
self.assertEqual(response, utterance_values["French"])
response = utterances("Spanish")
self.assertEqual(response, utterance_values["Spanish"])
response = utterances("Italian")
self.assertEqual(response, utterance_values["Italian"])
response = utterances("German")
self.assertEqual(response, utterance_values["German"])
self.assertRaises(KeyError, utterances, "invalidLanguage")
@patch("botocore.client.BaseClient._make_api_call")
def test_create_help_intent(self, mock_client):
from other_intents.help_intent import create_help_intent
create_help_intent(locale_id="en_US", bot_id="testid1234")
mock_client.assert_called_with(
"CreateIntent",
{
"intentName": "Help",
"description": "Help intent created by serverless bot.",
"sampleUtterances": [
{"utterance": "help"},
{"utterance": "help me"},
{"utterance": "what do you know"},
{"utterance": "answer me something"},
],
"dialogCodeHook": {"enabled": False},
"fulfillmentCodeHook": {"enabled": False},
"intentClosingSetting": {
"closingResponse": {
"messageGroups": [{"message": {"plainTextMessage": {"value": "Try 'What is your name', 'Weather Forecast', 'Leave Feedback', 'Order Pizza', or 'Book Appointment'"}}}],
"allowInterrupt": True,
}
},
"botId": "testid1234",
"botVersion": "DRAFT",
"localeId": "en_US",
},
)
| 4,597 | 157 | 22 |
42ceed1a1a21505baf6adca410e9fdef4602f5b8 | 131,835 | py | Python | pareto/operational_water_management/operational_produced_water_optimization_model.py | ksbeattie/project-pareto | aafa060d938ff253691447c080d6727e3f719b36 | [
"BSD-3-Clause-LBNL"
] | 4 | 2022-01-28T17:39:33.000Z | 2022-02-25T20:15:53.000Z | pareto/operational_water_management/operational_produced_water_optimization_model.py | ksbeattie/project-pareto | aafa060d938ff253691447c080d6727e3f719b36 | [
"BSD-3-Clause-LBNL"
] | 24 | 2021-11-12T14:31:52.000Z | 2022-03-29T19:02:34.000Z | pareto/operational_water_management/operational_produced_water_optimization_model.py | ksbeattie/project-pareto | aafa060d938ff253691447c080d6727e3f719b36 | [
"BSD-3-Clause-LBNL"
] | 5 | 2021-11-10T15:27:16.000Z | 2022-02-25T16:53:38.000Z | #####################################################################################################
# PARETO was produced under the DOE Produced Water Application for Beneficial Reuse Environmental
# Impact and Treatment Optimization (PARETO), and is copyright (c) 2021 by the software owners: The
# Regents of the University of California, through Lawrence Berkeley National Laboratory, et al. All
# rights reserved.
#
# NOTICE. This Software was developed under funding from the U.S. Department of Energy and the
# U.S. Government consequently retains certain rights. As such, the U.S. Government has been granted
# for itself and others acting on its behalf a paid-up, nonexclusive, irrevocable, worldwide license
# in the Software to reproduce, distribute copies to the public, prepare derivative works, and perform
# publicly and display publicly, and to permit other to do so.
#####################################################################################################
# Title: OPERATIONAL Produced Water Optimization Model
# Notes:
# - Introduced new completions-to-completions trucking arc (CCT) to account for possible flowback reuse
# - Implemented a generic OPERATIONAL case study example (updated model sets, additional input data)
# - Implemented an initial formulation for production tank modeling (see updated documentation)
# - Implemented a corrected version of the disposal capacity constraint considering more trucking-to-disposal arcs (PKT, SKT, SKT, RKT) [June 28]
# - Implemented an improved slack variable display loop [June 29]
# - Implemented fresh sourcing via trucking [July 2]
# - Implemented completions pad storage [July 6]
# - Implemeted an equalized production tank formulation [July 7]
# - Implemented changes to flowback processing [July 13]
# - Implemented production tank config option [August 4]
# Import
from pyomo.environ import (
Var,
Param,
Set,
ConcreteModel,
Constraint,
Objective,
minimize,
NonNegativeReals,
Reals,
Binary,
)
from pareto.utilities.get_data import get_data
from importlib import resources
import pyomo.environ
from pyomo.core.base.constraint import simple_constraint_rule
# import gurobipy
from pyomo.common.config import ConfigBlock, ConfigValue, In
from enum import Enum
from pareto.utilities.solvers import get_solver
# create config dictionary
CONFIG = ConfigBlock()
CONFIG.declare(
"has_pipeline_constraints",
ConfigValue(
default=True,
domain=In([True, False]),
description="build pipeline constraints",
doc="""Indicates whether holdup terms should be constructed or not.
**default** - True.
**Valid values:** {
**True** - construct pipeline constraints,
**False** - do not construct pipeline constraints}""",
),
)
CONFIG.declare(
"production_tanks",
ConfigValue(
default=ProdTank.individual,
domain=In(ProdTank),
description="production tank type selection",
doc="Type of production tank arrangement (i.e., Individual, Equalized)",
),
)
# Creation of a Concrete Model
| 34.385759 | 152 | 0.520605 | #####################################################################################################
# PARETO was produced under the DOE Produced Water Application for Beneficial Reuse Environmental
# Impact and Treatment Optimization (PARETO), and is copyright (c) 2021 by the software owners: The
# Regents of the University of California, through Lawrence Berkeley National Laboratory, et al. All
# rights reserved.
#
# NOTICE. This Software was developed under funding from the U.S. Department of Energy and the
# U.S. Government consequently retains certain rights. As such, the U.S. Government has been granted
# for itself and others acting on its behalf a paid-up, nonexclusive, irrevocable, worldwide license
# in the Software to reproduce, distribute copies to the public, prepare derivative works, and perform
# publicly and display publicly, and to permit other to do so.
#####################################################################################################
# Title: OPERATIONAL Produced Water Optimization Model
# Notes:
# - Introduced new completions-to-completions trucking arc (CCT) to account for possible flowback reuse
# - Implemented a generic OPERATIONAL case study example (updated model sets, additional input data)
# - Implemented an initial formulation for production tank modeling (see updated documentation)
# - Implemented a corrected version of the disposal capacity constraint considering more trucking-to-disposal arcs (PKT, SKT, SKT, RKT) [June 28]
# - Implemented an improved slack variable display loop [June 29]
# - Implemented fresh sourcing via trucking [July 2]
# - Implemented completions pad storage [July 6]
# - Implemeted an equalized production tank formulation [July 7]
# - Implemented changes to flowback processing [July 13]
# - Implemented production tank config option [August 4]
# Import
from pyomo.environ import (
Var,
Param,
Set,
ConcreteModel,
Constraint,
Objective,
minimize,
NonNegativeReals,
Reals,
Binary,
)
from pareto.utilities.get_data import get_data
from importlib import resources
import pyomo.environ
from pyomo.core.base.constraint import simple_constraint_rule
# import gurobipy
from pyomo.common.config import ConfigBlock, ConfigValue, In
from enum import Enum
from pareto.utilities.solvers import get_solver
class ProdTank(Enum):
individual = 0
equalized = 1
# create config dictionary
CONFIG = ConfigBlock()
CONFIG.declare(
"has_pipeline_constraints",
ConfigValue(
default=True,
domain=In([True, False]),
description="build pipeline constraints",
doc="""Indicates whether holdup terms should be constructed or not.
**default** - True.
**Valid values:** {
**True** - construct pipeline constraints,
**False** - do not construct pipeline constraints}""",
),
)
CONFIG.declare(
"production_tanks",
ConfigValue(
default=ProdTank.individual,
domain=In(ProdTank),
description="production tank type selection",
doc="Type of production tank arrangement (i.e., Individual, Equalized)",
),
)
# Creation of a Concrete Model
def create_model(df_sets, df_parameters, default={}):
model = ConcreteModel()
# import config dictionary
model.config = CONFIG(default)
model.type = "operational"
model.proprietary_data = df_parameters["proprietary_data"][0]
## Define sets ##
model.s_T = Set(initialize=df_sets["TimePeriods"], doc="Time Periods", ordered=True)
model.s_PP = Set(initialize=df_sets["ProductionPads"], doc="Production Pads")
model.s_CP = Set(initialize=df_sets["CompletionsPads"], doc="Completions Pads")
model.s_A = Set(initialize=df_sets["ProductionTanks"], doc="Production Tanks")
model.s_P = Set(initialize=(model.s_PP | model.s_CP), doc="Pads")
model.s_F = Set(initialize=df_sets["FreshwaterSources"], doc="Freshwater Sources")
model.s_K = Set(initialize=df_sets["SWDSites"], doc="Disposal Sites")
model.s_S = Set(initialize=df_sets["StorageSites"], doc="Storage Sites")
model.s_R = Set(initialize=df_sets["TreatmentSites"], doc="Treatment Sites")
model.s_O = Set(initialize=df_sets["ReuseOptions"], doc="Reuse Options")
model.s_N = Set(initialize=df_sets["NetworkNodes"], doc=["Network Nodes"])
model.s_W = Set(
initialize=df_sets["WaterQualityComponents"], doc="Water Quality Components"
)
model.s_L = Set(
initialize=(
model.s_P
| model.s_F
| model.s_K
| model.s_S
| model.s_R
| model.s_O
| model.s_N
),
doc="Locations",
)
# COMMENT: Remove pipeline diameter, storage capacity and injection capacity sets
model.s_D = Set(initialize=["D0"], doc="Pipeline diameters")
model.s_C = Set(initialize=["C0"], doc="Storage capacities")
model.s_I = Set(initialize=["I0"], doc="Injection (i.e. disposal) capacities")
# model.s_P.pprint()
# model.s_L.pprint()
## Define continuous variables ##
model.v_Z = Var(within=Reals, doc="Objective function variable [$]")
model.v_F_Piped = Var(
model.s_L,
model.s_L,
model.s_T,
within=NonNegativeReals,
initialize=0,
doc="Produced water quantity piped from location l to location l [bbl/day]",
)
model.v_F_Trucked = Var(
model.s_L,
model.s_L,
model.s_T,
within=NonNegativeReals,
initialize=0,
doc="Produced water quantity trucked from location l to location l [bbl/day]",
)
model.v_F_Sourced = Var(
model.s_F,
model.s_CP,
model.s_T,
within=NonNegativeReals,
initialize=0,
doc="Fresh water sourced from source f to completions pad p [bbl/day]",
)
model.v_F_PadStorageIn = Var(
model.s_CP,
model.s_T,
within=NonNegativeReals,
initialize=0,
doc="Water put into completions" " pad storage [bbl/day]",
)
model.v_F_PadStorageOut = Var(
model.s_CP,
model.s_T,
within=NonNegativeReals,
initialize=0,
doc="Water from completions pad storage" " used for fracturing [bbl/day]",
)
model.v_F_UnusedTreatedWater = Var(
model.s_R,
model.s_T,
within=NonNegativeReals,
initialize=0,
doc="Water leftover from the treatment process [bbl/day]",
)
if model.config.production_tanks == ProdTank.individual:
model.v_F_Drain = Var(
model.s_P,
model.s_A,
model.s_T,
within=NonNegativeReals,
doc="Produced water drained from" " production tank [bbl/day]",
)
model.v_L_ProdTank = Var(
model.s_P,
model.s_A,
model.s_T,
within=NonNegativeReals,
doc="Water level in production tank [bbl]",
)
elif model.config.production_tanks == ProdTank.equalized:
model.v_F_Drain = Var(
model.s_P,
model.s_T,
within=NonNegativeReals,
doc="Produced water drained from" " production tank [bbl/day]",
)
model.v_L_ProdTank = Var(
model.s_P,
model.s_T,
within=NonNegativeReals,
doc="Water level in production tank [bbl]",
)
else:
raise Exception("storage type not supported")
model.v_L_PadStorage = Var(
model.s_CP,
model.s_T,
within=NonNegativeReals,
initialize=0,
doc="Water level in completions pad storage [bbl]",
)
model.v_B_Production = Var(
model.s_P,
model.s_T,
within=NonNegativeReals,
doc="Produced water for transport from pad [bbl/day]",
)
model.v_L_Storage = Var(
model.s_S,
model.s_T,
within=NonNegativeReals,
doc="Water level at storage site [bbl]",
)
model.v_C_Piped = Var(
model.s_L,
model.s_L,
model.s_T,
within=NonNegativeReals,
doc="Cost of piping produced water from location l to location l [$/day]",
)
model.v_C_Trucked = Var(
model.s_L,
model.s_L,
model.s_T,
within=NonNegativeReals,
doc="Cost of trucking produced water from location l to location l [$/day]",
)
model.v_C_Sourced = Var(
model.s_F,
model.s_CP,
model.s_T,
within=NonNegativeReals,
doc="Cost of sourcing fresh water from source f to completion pad p [$/day]",
)
model.v_C_Disposal = Var(
model.s_K,
model.s_T,
within=NonNegativeReals,
doc="Cost of injecting produced water at disposal site [$/day]",
)
model.v_C_Treatment = Var(
model.s_R,
model.s_T,
within=NonNegativeReals,
doc="Cost of treating produced water at treatment site [$/day]",
)
model.v_C_Reuse = Var(
model.s_CP,
model.s_T,
within=NonNegativeReals,
doc="Cost of reusing produced water at completions site [$/day]",
)
model.v_C_Storage = Var(
model.s_S,
model.s_T,
within=NonNegativeReals,
doc="Cost of storing produced water at storage site [$/day]",
)
model.v_R_Storage = Var(
model.s_S,
model.s_T,
within=NonNegativeReals,
doc="Credit for retrieving stored produced water from storage site [$/bbl]",
)
model.v_F_TotalSourced = Var(
within=NonNegativeReals, doc="Total volume freshwater sourced [bbl]"
)
model.v_C_TotalSourced = Var(
within=NonNegativeReals, doc="Total cost of sourcing freshwater [$]"
)
model.v_C_TotalDisposal = Var(
within=NonNegativeReals, doc="Total cost of injecting produced water [$]"
)
model.v_C_TotalTreatment = Var(
within=NonNegativeReals, doc="Total cost of treating produced water [$]"
)
model.v_C_TotalReuse = Var(
within=NonNegativeReals, doc="Total cost of reusing produced water [$]"
)
model.v_C_TotalPiping = Var(
within=NonNegativeReals, doc="Total cost of piping produced water [$]"
)
model.v_C_TotalStorage = Var(
within=NonNegativeReals, doc="Total cost of storing produced water [$]"
)
model.v_C_TotalPadStorage = Var(
within=NonNegativeReals,
doc="Total cost of storing produced water at completions site [$]",
)
model.v_C_TotalTrucking = Var(
within=NonNegativeReals, doc="Total cost of trucking produced water [$]"
)
model.v_C_Slack = Var(
within=NonNegativeReals, doc="Total cost of slack variables [$"
)
model.v_R_TotalStorage = Var(
within=NonNegativeReals, doc="Total credit for withdrawing produced water [$]"
)
model.v_F_ReuseDestination = Var(
model.s_CP,
model.s_T,
within=NonNegativeReals,
doc="Total deliveries to completions pad [bbl/week]",
)
model.v_F_DisposalDestination = Var(
model.s_K,
model.s_T,
within=NonNegativeReals,
doc="Total deliveries to disposal site [bbl/week]",
)
model.v_F_TreatmentDestination = Var(
model.s_R,
model.s_T,
within=NonNegativeReals,
doc="Total deliveries to treatment site [bbl/week]",
)
model.v_F_BeneficialReuseDestination = Var(
model.s_O,
model.s_T,
within=NonNegativeReals,
doc="Total deliveries to Beneficial Reuse Site [bbl/week]",
)
# COMMENT: Remove the disposal/storage/flow capacity variables
model.v_D_Capacity = Var(
model.s_K,
within=NonNegativeReals,
doc="Disposal capacity at a disposal site [bbl/day]",
)
model.v_X_Capacity = Var(
model.s_S,
within=NonNegativeReals,
doc="Storage capacity at a storage site [bbl/day]",
)
model.v_F_Capacity = Var(
model.s_L,
model.s_L,
within=NonNegativeReals,
doc="Flow capacity along pipeline arc [bbl/day]",
)
# COMMENT: Remove the disposal/pipine/storage capital capacity variables
model.v_C_DisposalCapEx = Var(
within=NonNegativeReals,
doc="Capital cost of constructing or expanding disposal capacity [$]",
)
model.v_C_PipelineCapEx = Var(
within=NonNegativeReals,
doc="Capital cost of constructing or expanding piping capacity [$]",
)
model.v_C_StorageCapEx = Var(
within=NonNegativeReals,
doc="Capital cost of constructing or expanding storage capacity [$]",
)
model.v_S_FracDemand = Var(
model.s_CP,
model.s_T,
within=NonNegativeReals,
doc="Slack variable to meet the completions demand [bbl/day]",
)
model.v_S_Production = Var(
model.s_PP,
model.s_T,
within=NonNegativeReals,
doc="Slack variable to process the produced water production [bbl/day]",
)
model.v_S_Flowback = Var(
model.s_CP,
model.s_T,
within=NonNegativeReals,
doc="Slack variable to proces flowback water production [bbl/day]",
)
model.v_S_PipelineCapacity = Var(
model.s_L,
model.s_L,
within=NonNegativeReals,
doc="Slack variable to provide necessary pipeline capacity [bbl]",
)
model.v_S_StorageCapacity = Var(
model.s_S,
within=NonNegativeReals,
doc="Slack variable to provide necessary storage capacity [bbl]",
)
model.v_S_DisposalCapacity = Var(
model.s_K,
within=NonNegativeReals,
doc="Slack variable to provide necessary disposal capacity [bbl/day]",
)
model.v_S_TreatmentCapacity = Var(
model.s_R,
within=NonNegativeReals,
doc="Slack variable to provide necessary treatment capacity [bbl/weel]",
)
model.v_S_ReuseCapacity = Var(
model.s_O,
within=NonNegativeReals,
doc="Slack variable to provide necessary reuse capacity [bbl/day]",
)
## Define binary variables ##
# COMMENT: Remove the binary pipeline/storage/disposal variables
model.vb_y_Pipeline = Var(
model.s_L,
model.s_L,
model.s_D,
within=Binary,
doc="New pipeline installed between one location and another location with specific diameter",
)
model.vb_y_Storage = Var(
model.s_S,
model.s_C,
within=Binary,
doc="New or additional storage facility installed at storage site with specific storage capacity",
)
model.vb_y_Disposal = Var(
model.s_K,
model.s_I,
within=Binary,
doc="New or additional disposal facility installed at disposal site with specific injection capacity",
)
model.vb_y_Flow = Var(
model.s_L,
model.s_L,
model.s_T,
within=Binary,
doc="Directional flow between two locations",
)
model.vb_z_PadStorage = Var(
model.s_CP, model.s_T, within=Binary, doc="Completions pad storage use"
)
model.vb_y_Truck = Var(
model.s_L,
model.s_L,
model.s_T,
within=Binary,
doc="Trucking between two locations",
)
# model.vb_z_Pipeline = Var(model.s_L,model.s_L,model.s_D,model.s_T,within=Binary, doc='Timing of pipeline installation between two locations')
# model.vb_z_Storage = Var(model.s_S,model.s_C,model.s_T,within=Binary, doc='Timing of storage facility installation at storage site')
# model.vb_z_Disposal = Var(model.s_K,model.s_I,model.s_T,within=Binary, doc='Timing of disposal facility installation at disposal site')
## Define set parameters ##
PCA_Table = {}
PNA_Table = {}
PPA_Table = {}
CNA_Table = {}
CCA_Table = {}
NNA_Table = {}
NCA_Table = {}
NKA_Table = {}
NSA_Table = {}
NRA_Table = {}
NOA_Table = {}
RCA_Table = {}
FCA_Table = {}
RNA_Table = {}
RKA_Table = {}
SNA_Table = {}
SCA_Table = {}
SKA_Table = {}
SRA_Table = {}
SOA_Table = {}
PCT_Table = {}
PKT_Table = {}
PST_Table = {}
PRT_Table = {}
POT_Table = {}
CKT_Table = {}
CST_Table = {}
CRT_Table = {}
CCT_Table = {}
SCT_Table = {}
CRT_Table = {}
SCT_Table = {}
SKT_Table = {}
RKT_Table = {}
PAL_Table = {}
model.p_PCA = Param(
model.s_PP,
model.s_CP,
default=0,
initialize=PCA_Table,
doc="Valid production-to-completions pipeline arcs [-]",
)
model.p_PNA = Param(
model.s_PP,
model.s_N,
default=0,
initialize=PNA_Table,
doc="Valid production-to-node pipeline arcs [-]",
)
model.p_PPA = Param(
model.s_PP,
model.s_PP,
default=0,
initialize=PPA_Table,
doc="Valid production-to-production pipeline arcs [-]",
)
model.p_CNA = Param(
model.s_CP,
model.s_N,
default=0,
initialize=CNA_Table,
doc="Valid completion-to-node pipeline arcs [-]",
)
model.p_CCA = Param(
model.s_CP,
model.s_CP,
default=0,
initialize=CCA_Table,
doc="Valid completion-to-completion pipeline arcs [-]",
)
model.p_NNA = Param(
model.s_N,
model.s_N,
default=0,
initialize=NNA_Table,
doc="Valid node-to-node pipeline arcs [-]",
)
model.p_NCA = Param(
model.s_N,
model.s_CP,
default=0,
initialize=NCA_Table,
doc="Valid node-to-completions pipeline arcs [-]",
)
model.p_NKA = Param(
model.s_N,
model.s_K,
default=0,
initialize=NKA_Table,
doc="Valid node-to-disposal pipeline arcs [-]",
)
model.p_NSA = Param(
model.s_N,
model.s_S,
default=0,
initialize=NSA_Table,
doc="Valid node-to-storage pipeline arcs [-]",
)
model.p_NRA = Param(
model.s_N,
model.s_R,
default=0,
initialize=NRA_Table,
doc="Valid node-to-treatment pipeline arcs [-]",
)
model.p_NOA = Param(
model.s_N,
model.s_O,
default=0,
initialize=NOA_Table,
doc="Valid node-to-reuse pipeline arcs [-]",
)
model.p_RCA = Param(
model.s_R,
model.s_CP,
default=0,
initialize=df_parameters["RCA"],
doc="Valid treatment-to-completions pipeline arcs [-]",
)
model.p_FCA = Param(
model.s_F,
model.s_CP,
default=0,
initialize=df_parameters["FCA"],
doc="Valid freshwater-to-completions pipeline arcs [-]",
)
model.p_RNA = Param(
model.s_R,
model.s_N,
default=0,
initialize=RNA_Table,
doc="Valid treatment-to-node pipeline arcs [-]",
)
model.p_RKA = Param(
model.s_R,
model.s_K,
default=0,
initialize=RKA_Table,
doc="Valid treatment-to-disposal pipeline arcs [-]",
)
model.p_SNA = Param(
model.s_S,
model.s_N,
default=0,
initialize=SNA_Table,
doc="Valid storage-to-node pipeline arcs [-]",
)
model.p_SCA = Param(
model.s_S,
model.s_CP,
default=0,
initialize=SCA_Table,
doc="Valid storage-to-completions pipeline arcs [-]",
)
model.p_SKA = Param(
model.s_S,
model.s_K,
default=0,
initialize=SKA_Table,
doc="Valid storage-to-disposal pipeline arcs [-]",
)
model.p_SRA = Param(
model.s_S,
model.s_R,
default=0,
initialize=SRA_Table,
doc="Valid storage-to-treatment pipeline arcs [-]",
)
model.p_SOA = Param(
model.s_S,
model.s_O,
default=0,
initialize=SOA_Table,
doc="Valid storage-to-reuse pipeline arcs [-]",
)
model.p_PCT = Param(
model.s_PP,
model.s_CP,
default=0,
initialize=df_parameters["PCT"],
doc="Valid production-to-completions trucking arcs [-]",
)
model.p_FCT = Param(
model.s_F,
model.s_CP,
default=0,
initialize=df_parameters["FCT"],
doc="Valid freshwater-to-completions trucking arcs [-]",
)
model.p_PKT = Param(
model.s_PP,
model.s_K,
default=0,
initialize=df_parameters["PKT"],
doc="Valid production-to-disposal trucking arcs [-]",
)
model.p_PST = Param(
model.s_PP,
model.s_S,
default=0,
initialize=PST_Table,
doc="Valid production-to-storage trucking arcs [-]",
)
model.p_PRT = Param(
model.s_PP,
model.s_R,
default=0,
initialize=df_parameters["PRT"],
doc="Valid production-to-treatment trucking arcs [-]",
)
model.p_POT = Param(
model.s_PP,
model.s_O,
default=0,
initialize=POT_Table,
doc="Valid production-to-reuse trucking arcs [-]",
)
model.p_CKT = Param(
model.s_CP,
model.s_K,
default=0,
initialize=df_parameters["CKT"],
doc="Valid completions-to-disposal trucking arcs [-]",
)
model.p_CST = Param(
model.s_CP,
model.s_S,
default=0,
initialize=CST_Table,
doc="Valid completions-to-storage trucking arcs [-]",
)
model.p_CRT = Param(
model.s_CP,
model.s_R,
default=0,
initialize=df_parameters["CRT"],
doc="Valid completions-to-treatment trucking arcs [-]",
)
model.p_CCT = Param(
model.s_CP,
model.s_CP,
default=0,
initialize=df_parameters["CCT"],
doc="Valid completions-to-completions trucking arcs [-]",
)
model.p_SCT = Param(
model.s_S,
model.s_CP,
default=0,
initialize=SCT_Table,
doc="Valid storage-to-completions trucking arcs [-]",
)
model.p_SKT = Param(
model.s_S,
model.s_K,
default=0,
initialize=SKT_Table,
doc="Valid storage-to-disposal trucking arcs [-]",
)
model.p_RKT = Param(
model.s_R,
model.s_K,
default=0,
initialize=RKT_Table,
doc="Valid treatment-to-disposal trucking arcs [-]",
)
df_parameters["LLT"] = {
**df_parameters["PCT"],
**df_parameters["CCT"],
**df_parameters["CRT"],
**df_parameters["CKT"],
**df_parameters["FCT"],
**df_parameters["PKT"],
**df_parameters["PRT"],
}
model.p_LLT = Param(
model.s_L,
model.s_L,
default=0,
initialize=df_parameters["LLT"],
doc="Valid location-to-location trucking arcs [-]",
)
if model.config.production_tanks == ProdTank.individual:
model.p_PAL = Param(
model.s_P,
model.s_A,
default=0,
initialize=df_parameters["PAL"],
doc="Valid pad-to-tank links [-]",
)
elif model.config.production_tanks == ProdTank.equalized:
model.p_PAL = Param(
model.s_P, model.s_A, default=0, doc="Valid pad-to-tank links [-]"
)
else:
raise Exception("storage type not supported")
# model.p_FCA.pprint()
# model.p_PKT.pprint()
# model.p_PKT.pprint()
# model.p_PCA.pprint()
# model.p_PNA.pprint()
# model.p_CNA.pprint()
# model.p_NNA.pprint()
# model.p_PAL.pprint()
# model.p_CCT.pprint()
## Define set parameters ##
CompletionsDemandTable = {}
ProductionTable = {}
FlowbackTable = {}
InitialPipelineCapacityTable = {}
# COMMENT: For EXISTING/INITAL pipeline capacity (l,l_tilde)=(l_tilde=l); needs implemented!
InitialDisposalCapacityTable = {}
InitialStorageCapacityTable = {}
InitialTreatmentCapacityTable = {}
InitialReuseCapacityTable = {}
FreshwaterSourcingAvailabilityTable = {}
PadOffloadingCapacityTable = {}
StorageOffloadingCapacityTable = {}
ProcessingCapacityPadTable = {}
ProcessingCapacityStorageTable = {}
PipelineCapacityIncrementsTable = {("D0"): 0}
DisposalCapacityIncrementsTable = {("I0"): 0}
StorageDisposalCapacityIncrementsTable = {("C0"): 0}
TruckingTimeTable = {}
DisposalCapExTable = {("K02", "I0"): 0}
StorageCapExTable = {}
PipelineCapExTable = {}
DisposalOperationalCostTable = {}
TreatmentOperationalCostTable = {}
ReuseOperationalCostTable = {}
StorageOperationalCostTable = {}
StorageOperationalCreditTable = {}
PipelineOperationalCostTable = {}
TruckingHourlyCostTable = {}
FreshSourcingCostTable = {}
InitialTankLevelTable = {}
model.p_gamma_Completions = Param(
model.s_P,
model.s_T,
default=0,
initialize=df_parameters["CompletionsDemand"],
doc="Completions water demand [bbl/day]",
)
if model.config.production_tanks == ProdTank.individual:
model.p_beta_Production = Param(
model.s_P,
model.s_A,
model.s_T,
default=0,
initialize=df_parameters["ProductionRates"],
doc="Produced water supply forecast [bbl/day]",
)
model.p_sigma_ProdTank = Param(
model.s_P, model.s_A, default=500, doc="Production tank capacity [bbl]"
)
model.p_lambda_ProdTank = Param(
model.s_P,
model.s_A,
default=0,
initialize=InitialTankLevelTable,
doc="Initial water level in " "production tank [bbl]",
)
elif model.config.production_tanks == ProdTank.equalized:
model.p_beta_Production = Param(
model.s_P,
model.s_T,
default=0,
initialize=df_parameters["PadRates"],
doc="Produced water supply " "forecast [bbl/day]",
)
model.p_sigma_ProdTank = Param(
model.s_P,
default=500,
initialize=df_parameters["ProductionTankCapacity"],
doc="Combined capacity equalized " "production tanks [bbl]",
)
model.p_lambda_ProdTank = Param(
model.s_P,
default=0,
initialize=InitialTankLevelTable,
doc="Initial water level in " "equalized production tanks [bbl]",
)
else:
raise Exception("storage type not supported")
model.p_beta_Flowback = Param(
model.s_P,
model.s_T,
default=0,
initialize=df_parameters["FlowbackRates"],
doc="Flowback supply forecast for a completions bad [bbl/day]",
)
model.p_sigma_Pipeline = Param(
model.s_L,
model.s_L,
default=0,
initialize=InitialPipelineCapacityTable,
doc="Initial daily pipeline capacity between two locations [bbl/day]",
)
model.p_sigma_Disposal = Param(
model.s_K,
default=0,
initialize=df_parameters["InitialDisposalCapacity"],
doc="Initial daily disposal capacity at disposal sites [bbl/day]",
)
model.p_sigma_Storage = Param(
model.s_S,
default=0,
initialize=InitialStorageCapacityTable,
doc="Initial storage capacity at storage site [bbl]",
)
model.p_sigma_PadStorage = Param(
model.s_CP,
model.s_T,
default=0,
initialize=df_parameters["CompletionsPadStorage"],
doc="Storage capacity at completions site [bbl]",
)
model.p_sigma_Treatment = Param(
model.s_R,
default=0,
initialize=df_parameters["TreatmentCapacity"],
doc="Initial daily treatment capacity at treatment site [bbl/day]",
)
model.p_sigma_Reuse = Param(
model.s_O,
default=0,
initialize=InitialReuseCapacityTable,
doc="Initial daily reuse capacity at reuse site [bbl/day]",
)
model.p_sigma_Freshwater = Param(
model.s_F,
model.s_T,
default=0,
initialize=df_parameters["FreshwaterSourcingAvailability"],
doc="daily freshwater sourcing capacity at freshwater source [bbl/day]",
)
# model.p_sigma_Disposal.pprint()
# model.p_sigma_Freshwater.pprint()
model.p_sigma_OffloadingPad = Param(
model.s_P,
default=9999999,
initialize=df_parameters["PadOffloadingCapacity"],
doc="Weekly truck offloading sourcing capacity per pad [bbl/day]",
)
model.p_sigma_OffloadingStorage = Param(
model.s_S,
default=9999999,
initialize=StorageOffloadingCapacityTable,
doc="Weekly truck offloading capacity per pad [bbl/day]",
)
model.p_sigma_MinTruckFlow = Param(
default=0,
initialize=df_parameters["MinTruckFlow"],
doc="Minimum truck capacity [bbl]",
)
model.p_sigma_MaxTruckFlow = Param(
default=0,
initialize=df_parameters["MaxTruckFlow"],
doc="Minimum truck capacity [bbl]",
)
model.p_sigma_ProcessingPad = Param(
model.s_P,
default=9999999,
initialize=ProcessingCapacityPadTable,
doc="Weekly processing (e.g. clarification) capacity per pad [bbl/day]",
)
model.p_sigma_ProcessingStorage = Param(
model.s_S,
default=9999999,
initialize=ProcessingCapacityStorageTable,
doc="Weekly processing (e.g. clarification) capacity per storage site [bbl/day]",
)
model.p_epsilon_Treatment = Param(
model.s_R,
model.s_W,
default=1.0,
initialize=df_parameters["TreatmentEfficiency"],
doc="Treatment efficiency [%]",
)
# COMMENT: Remove pipeline/disposal/storage capacity expansion increment parameters
model.p_delta_Pipeline = Param(
model.s_D,
default=10,
initialize=PipelineCapacityIncrementsTable,
doc="Pipeline capacity installation/expansion increments [bbl/day]",
)
model.p_delta_Disposal = Param(
model.s_I,
default=10,
initialize=DisposalCapacityIncrementsTable,
doc="Disposal capacity installation/expansion increments [bbl/day]",
)
model.p_delta_Storage = Param(
model.s_C,
default=10,
initialize=StorageDisposalCapacityIncrementsTable,
doc="Storage capacity installation/expansion increments [bbl]",
)
model.p_delta_Truck = Param(default=110, doc="Truck capacity [bbl]")
# COMMENT: Remove disposal/storage/pipeline lead time parameters
model.p_tau_Disposal = Param(
model.s_K, default=12, doc="Disposal construction/expansion lead time [days]"
)
model.p_tau_Storage = Param(
model.s_S, default=12, doc="Storage constructin/expansion lead time [days]"
)
model.p_tau_Pipeline = Param(
model.s_L,
model.s_L,
default=12,
doc="Pipeline construction/expansion lead time [days",
)
model.p_tau_Trucking = Param(
model.s_L,
model.s_L,
default=9999999,
initialize=df_parameters["DriveTimes"],
doc="Drive time between locations [hr]",
)
# model.p_tau_Trucking.pprint()
# COMMENT: Many more parameters missing. See documentation for details.
model.p_lambda_Storage = Param(
model.s_S, default=0, doc="Initial storage level at storage site [bbl]"
)
model.p_lambda_PadStorage = Param(
model.s_CP, default=0, doc="Initial storage level at completions site [bbl]"
)
model.p_theta_PadStorage = Param(
model.s_CP, default=0, doc="Terminal storage level at completions site [bbl]"
)
model.p_lambda_Pipeline = Param(
model.s_L, model.s_L, default=9999999, doc="Pipeline segment length [miles]"
)
# COMMENT: Remove disosal/storage/pipeline capital cost parameters
model.p_kappa_Disposal = Param(
model.s_K,
model.s_I,
default=9999999,
initialize=df_parameters["DisposalPipeCapEx"],
doc="Disposal construction/expansion capital cost for selected increment [$/bbl]",
)
model.p_kappa_Storage = Param(
model.s_S,
model.s_C,
default=9999999,
initialize=StorageCapExTable,
doc="Storage construction/expansion capital cost for selected increment [$/bbl]",
)
model.p_kappa_Pipeline = Param(
model.s_L,
model.s_L,
model.s_D,
default=9999999,
initialize=PipelineCapExTable,
doc="Pipeline construction/expansion capital cost for selected increment [$/bbl]",
)
model.p_pi_Disposal = Param(
model.s_K,
default=9999999,
initialize=df_parameters["DisposalOperationalCost"],
doc="Disposal operational cost [$/bbl]",
)
model.p_pi_Treatment = Param(
model.s_R,
default=9999999,
initialize=df_parameters["TreatmentOperationalCost"],
doc="Treatment operational cost [$/bbl",
)
model.p_pi_Reuse = Param(
model.s_CP,
default=9999999,
initialize=df_parameters["ReuseOperationalCost"],
doc="Reuse operational cost [$/bbl]",
)
model.p_pi_Storage = Param(
model.s_S,
default=9999999,
initialize=StorageOperationalCostTable,
doc="Storage deposit operational cost [$/bbl]",
)
model.p_pi_PadStorage = Param(
model.s_CP,
model.s_T,
default=0,
initialize=df_parameters["PadStorageCost"],
doc="Completions pad storage operational cost [$]",
)
model.p_rho_Storage = Param(
model.s_S,
default=0,
initialize=StorageOperationalCreditTable,
doc="Storage withdrawal operational credit [$/bbl]",
)
model.p_pi_Pipeline = Param(
model.s_L,
model.s_L,
default=0,
initialize=df_parameters["PipingOperationalCost"],
doc="Pipeline operational cost [$/bbl]",
)
model.p_pi_Trucking = Param(
model.s_L,
default=9999999,
initialize=df_parameters["TruckingHourlyCost"],
doc="Trucking hourly cost (by source) [$/hour]",
)
model.p_pi_Sourcing = Param(
model.s_F,
default=9999999,
initialize=df_parameters["FreshSourcingCost"],
doc="Fresh sourcing cost [$/bbl]",
)
# model.p_pi_Disposal.pprint()
# model.p_pi_Reuse.pprint()
# model.p_pi_Pipeline.pprint()
model.p_M_Flow = Param(default=9999999, doc="Big-M flow parameter [bbl/day]")
model.p_psi_FracDemand = Param(default=9999999, doc="Slack cost parameter [$]")
model.p_psi_Production = Param(default=9999999, doc="Slack cost parameter [$]")
model.p_psi_Flowback = Param(default=9999999, doc="Slack cost parameter [$]")
model.p_psi_PipelineCapacity = Param(
default=9999999, doc="Slack cost parameter [$]"
)
model.p_psi_StorageCapacity = Param(default=9999999, doc="Slack cost parameter [$]")
model.p_psi_DisposalCapacity = Param(
default=9999999, doc="Slack cost parameter [$]"
)
model.p_psi_TreatmentCapacity = Param(
default=9999999, doc="Slack cost parameter [$]"
)
model.p_psi_ReuseCapacity = Param(default=9999999, doc="Slack cost parameter [$]")
# model.p_sigma_Freshwater.pprint()
## Define objective function ##
def ObjectiveFunctionRule(model):
return model.v_Z == (
model.v_C_TotalSourced
+ model.v_C_TotalDisposal
+ model.v_C_TotalTreatment
+ model.v_C_TotalReuse
+ model.v_C_TotalPiping
+ model.v_C_TotalStorage
+ model.v_C_TotalPadStorage
+ model.v_C_TotalTrucking
+ model.v_C_DisposalCapEx
+ model.v_C_StorageCapEx
+ model.v_C_PipelineCapEx
+ model.v_C_Slack
- model.v_R_TotalStorage
)
model.ObjectiveFunction = Constraint(
rule=ObjectiveFunctionRule, doc="Objective function"
)
# model.ObjectiveFunction.pprint()
## Define constraints ##
def CompletionsPadDemandBalanceRule(model, p, t):
return model.p_gamma_Completions[p, t] == (
sum(model.v_F_Piped[n, p, t] for n in model.s_N if model.p_NCA[n, p])
+ sum(
model.v_F_Piped[p_tilde, p, t]
for p_tilde in model.s_PP
if model.p_PCA[p_tilde, p]
)
+ sum(model.v_F_Piped[s, p, t] for s in model.s_S if model.p_SCA[s, p])
+ sum(
model.v_F_Piped[p_tilde, p, t]
for p_tilde in model.s_CP
if model.p_CCA[p_tilde, p]
)
+ sum(model.v_F_Piped[r, p, t] for r in model.s_R if model.p_RCA[r, p])
+ sum(model.v_F_Sourced[f, p, t] for f in model.s_F if model.p_FCA[f, p])
+ sum(
model.v_F_Trucked[p_tilde, p, t]
for p_tilde in model.s_PP
if model.p_PCT[p_tilde, p]
)
+ sum(model.v_F_Trucked[s, p, t] for s in model.s_S if model.p_SCT[s, p])
+ sum(
model.v_F_Trucked[p_tilde, p, t]
for p_tilde in model.s_CP
if model.p_CCT[p_tilde, p]
)
+ sum(model.v_F_Trucked[f, p, t] for f in model.s_F if model.p_FCT[f, p])
+ model.v_F_PadStorageOut[p, t]
- model.v_F_PadStorageIn[p, t]
+ model.v_S_FracDemand[p, t]
)
model.CompletionsPadDemandBalance = Constraint(
model.s_CP,
model.s_T,
rule=CompletionsPadDemandBalanceRule,
doc="Completions pad demand balance",
)
# model.CompletionsPadDemandBalance.pprint()
def CompletionsPadStorageBalanceRule(model, p, t):
if t == model.s_T.first():
return (
model.v_L_PadStorage[p, t]
== model.p_lambda_PadStorage[p]
+ model.v_F_PadStorageIn[p, t]
- model.v_F_PadStorageOut[p, t]
)
else:
return (
model.v_L_PadStorage[p, t]
== model.v_L_PadStorage[p, model.s_T.prev(t)]
+ model.v_F_PadStorageIn[p, t]
- model.v_F_PadStorageOut[p, t]
)
model.CompletionsPadStorageBalance = Constraint(
model.s_CP,
model.s_T,
rule=CompletionsPadStorageBalanceRule,
doc="Completions pad storage balance",
)
# model.CompletionsPadStorageBalance.pprint()
def CompletionsPadStorageCapacityRule(model, p, t):
return (
model.v_L_PadStorage[p, t]
<= model.vb_z_PadStorage[p, t] * model.p_sigma_PadStorage[p, t]
)
model.CompletionsPadStorageCapacity = Constraint(
model.s_CP,
model.s_T,
rule=CompletionsPadStorageCapacityRule,
doc="Completions pad storage capacity",
)
# model.CompletionsPadStorageCapacity.pprint()
def TerminalCompletionsPadStorageLevelRule(model, p, t):
if t == model.s_T.last():
return model.v_L_PadStorage[p, t] <= model.p_theta_PadStorage[p]
else:
return Constraint.Skip
model.TerminalCompletionsPadStorageLevel = Constraint(
model.s_CP,
model.s_T,
rule=TerminalCompletionsPadStorageLevelRule,
doc="Terminal completions pad storage level",
)
# model.TerminalCompletionsPadStorageLevel.pprint()
def FreshwaterSourcingCapacityRule(model, f, t):
if not (
any(model.p_FCA[f, p] for p in model.s_CP)
or any(model.p_FCT[f, p] for p in model.s_CP)
):
return Constraint.Skip
return (
sum(model.v_F_Sourced[f, p, t] for p in model.s_CP if model.p_FCA[f, p])
+ sum(model.v_F_Trucked[f, p, t] for p in model.s_CP if model.p_FCT[f, p])
) <= model.p_sigma_Freshwater[f, t]
model.FreshwaterSourcingCapacity = Constraint(
model.s_F,
model.s_T,
rule=FreshwaterSourcingCapacityRule,
doc="Freshwater sourcing capacity",
)
# model.FreshwaterSourcingCapacity.pprint()
def CompletionsPadTruckOffloadingCapacityRule(model, p, t):
return (
sum(
model.v_F_Trucked[p_tilde, p, t]
for p_tilde in model.s_PP
if model.p_PCT[p_tilde, p]
)
+ sum(model.v_F_Trucked[s, p, t] for s in model.s_S if model.p_SCT[s, p])
+ sum(
model.v_F_Trucked[p_tilde, p, t]
for p_tilde in model.s_CP
if model.p_CCT[p_tilde, p]
)
+ sum(model.v_F_Trucked[f, p, t] for f in model.s_F if model.p_FCT[f, p])
) <= model.p_sigma_OffloadingPad[p]
model.CompletionsPadTruckOffloadingCapacity = Constraint(
model.s_CP,
model.s_T,
rule=CompletionsPadTruckOffloadingCapacityRule,
doc="Completions pad truck offloading capacity",
)
def TrucksMaxCapacityRule(model, l, l_tilde, t):
if model.p_LLT[l, l_tilde]:
return (
model.v_F_Trucked[l, l_tilde, t]
<= model.p_sigma_MaxTruckFlow * model.vb_y_Truck[l, l_tilde, t]
)
else:
return Constraint.Skip
model.TrucksMaxCapacity = Constraint(
model.s_L,
model.s_L,
model.s_T,
rule=TrucksMaxCapacityRule,
doc="Maximum amount of water that can be transported by trucks",
)
def TrucksMinCapacityRule(model, l, l_tilde, t):
if model.p_LLT[l, l_tilde]:
return (
model.v_F_Trucked[l, l_tilde, t]
>= model.p_sigma_MinTruckFlow * model.vb_y_Truck[l, l_tilde, t]
)
else:
return Constraint.Skip
model.TrucksMinCapacity = Constraint(
model.s_L,
model.s_L,
model.s_T,
rule=TrucksMinCapacityRule,
doc="Minimum amount of water that can be transported by trucks",
)
# model.CompletionsPadTruckOffloadingCapacity.pprint()
def StorageSiteTruckOffloadingCapacityRule(model, s, t):
return (
sum(model.v_F_Trucked[p, s, t] for p in model.s_PP if model.p_PST[p, s])
+ sum(model.v_F_Trucked[p, s, t] for p in model.s_CP if model.p_CST[p, s])
<= model.p_sigma_OffloadingStorage[s]
)
model.StorageSiteTruckOffloadingCapacity = Constraint(
model.s_S,
model.s_T,
rule=StorageSiteTruckOffloadingCapacityRule,
doc="Storage site truck offloading capacity",
)
# model.StorageSiteTruckOffloadingCapacity.pprint()
def StorageSiteProcessingCapacityRule(model, s, t):
return (
sum(model.v_F_Piped[n, s, t] for n in model.s_N if model.p_NSA[n, s])
+ sum(model.v_F_Trucked[p, s, t] for p in model.s_PP if model.p_PST[p, s])
+ sum(model.v_F_Trucked[p, s, t] for p in model.s_CP if model.p_CST[p, s])
<= model.p_sigma_ProcessingStorage[s]
)
model.StorageSiteProcessingCapacity = Constraint(
model.s_S,
model.s_T,
rule=StorageSiteProcessingCapacityRule,
doc="Storage site processing capacity",
)
# model.StorageSiteProcessingCapacity.pprint()
if model.config.production_tanks == ProdTank.individual:
def ProductionTankBalanceRule(model, p, a, t):
if t == model.s_T.first():
if p in model.s_P and a in model.s_A:
if model.p_PAL[p, a]:
return (
model.v_L_ProdTank[p, a, t]
== model.p_lambda_ProdTank[p, a]
+ model.p_beta_Production[p, a, t]
- model.v_F_Drain[p, a, t]
)
else:
return Constraint.Skip
else:
return Constraint.Skip
else:
if p in model.s_P and a in model.s_A:
if model.p_PAL[p, a]:
return (
model.v_L_ProdTank[p, a, t]
== model.v_L_ProdTank[p, a, model.s_T.prev(t)]
+ model.p_beta_Production[p, a, t]
- model.v_F_Drain[p, a, t]
)
else:
return Constraint.Skip
else:
return Constraint.Skip
model.ProductionTankBalance = Constraint(
model.s_P,
model.s_A,
model.s_T,
rule=ProductionTankBalanceRule,
doc="Production tank balance",
)
elif model.config.production_tanks == ProdTank.equalized:
def ProductionTankBalanceRule(model, p, t):
if t == model.s_T.first():
if p in model.s_P:
return (
model.v_L_ProdTank[p, t]
== model.p_lambda_ProdTank[p]
+ model.p_beta_Production[p, t]
+ model.p_beta_Flowback[p, t]
- model.v_F_Drain[p, t]
)
else:
return Constraint.Skip
else:
if p in model.s_P:
return (
model.v_L_ProdTank[p, t]
== model.v_L_ProdTank[p, model.s_T.prev(t)]
+ model.p_beta_Production[p, t]
+ model.p_beta_Flowback[p, t]
- model.v_F_Drain[p, t]
)
else:
return Constraint.Skip
model.ProductionTankBalance = Constraint(
model.s_P,
model.s_T,
rule=ProductionTankBalanceRule,
doc="Production tank balance",
)
else:
raise Exception("storage type not supported")
# model.ProductionTankBalance.pprint()
if model.config.production_tanks == ProdTank.individual:
def ProductionTankCapacityRule(model, p, a, t):
if p in model.s_P and a in model.s_A:
if model.p_PAL[p, a]:
return model.v_L_ProdTank[p, a, t] <= model.p_sigma_ProdTank[p, a]
else:
return Constraint.Skip
else:
return Constraint.Skip
model.ProductionTankCapacity = Constraint(
model.s_P,
model.s_A,
model.s_T,
rule=ProductionTankCapacityRule,
doc="Production tank capacity",
)
elif model.config.production_tanks == ProdTank.equalized:
def ProductionTankCapacityRule(model, p, t):
if p in model.s_P:
return model.v_L_ProdTank[p, t] <= model.p_sigma_ProdTank[p]
else:
return Constraint.Skip
model.ProductionTankCapacity = Constraint(
model.s_P,
model.s_T,
rule=ProductionTankCapacityRule,
doc="Production tank capacity",
)
else:
raise Exception("storage type not supported")
# model.ProductionTankCapacity.pprint()
if model.config.production_tanks == ProdTank.individual:
def TankToPadProductionBalanceRule(model, p, t):
return (
sum(model.v_F_Drain[p, a, t] for a in model.s_A if model.p_PAL[p, a])
== model.v_B_Production[p, t]
)
model.TankToPadProductionBalance = Constraint(
model.s_P,
model.s_T,
rule=TankToPadProductionBalanceRule,
doc="Tank-to-pad production balance",
)
elif model.config.production_tanks == ProdTank.equalized:
def TankToPadProductionBalanceRule(model, p, t):
return model.v_F_Drain[p, t] == model.v_B_Production[p, t]
model.TankToPadProductionBalance = Constraint(
model.s_P,
model.s_T,
rule=TankToPadProductionBalanceRule,
doc="Tank-to-pad production balance",
)
else:
raise Exception("storage type not supported")
# model.TankToPadProductionBalance.pprint()
if model.config.production_tanks == ProdTank.individual:
def TerminalProductionTankLevelBalanceRule(model, p, a, t):
if t == model.s_T.last():
if p in model.s_P and a in model.s_A:
if model.p_PAL[p, a]:
return (
model.v_L_ProdTank[p, a, t] == model.p_lambda_ProdTank[p, a]
)
else:
return Constraint.Skip
else:
return Constraint.Skip
else:
return Constraint.Skip
model.TerminalProductionTankLevelBalance = Constraint(
model.s_P,
model.s_A,
model.s_T,
rule=TerminalProductionTankLevelBalanceRule,
doc="Terminal production tank level balance",
)
elif model.config.production_tanks == ProdTank.equalized:
def TerminalProductionTankLevelBalanceRule(model, p, t):
if t == model.s_T.last():
if p in model.s_P:
return model.v_L_ProdTank[p, t] == model.p_lambda_ProdTank[p]
else:
return Constraint.Skip
else:
return Constraint.Skip
model.TerminalProductionTankLevelBalance = Constraint(
model.s_P,
model.s_T,
rule=TerminalProductionTankLevelBalanceRule,
doc="Terminal production tank level balance",
)
else:
raise Exception("storage type not supported")
# model.TerminalProductionTankLevelBalance.pprint()
def ProductionPadSupplyBalanceRule(model, p, t):
return (
model.v_B_Production[p, t]
== sum(model.v_F_Piped[p, n, t] for n in model.s_N if model.p_PNA[p, n])
+ sum(
model.v_F_Piped[p, p_tilde, t]
for p_tilde in model.s_CP
if model.p_PCA[p, p_tilde]
)
+ sum(
model.v_F_Piped[p, p_tilde, t]
for p_tilde in model.s_PP
if model.p_PPA[p, p_tilde]
)
+ sum(
model.v_F_Trucked[p, p_tilde, t]
for p_tilde in model.s_CP
if model.p_PCT[p, p_tilde]
)
+ sum(model.v_F_Trucked[p, k, t] for k in model.s_K if model.p_PKT[p, k])
+ sum(model.v_F_Trucked[p, s, t] for s in model.s_S if model.p_PST[p, s])
+ sum(model.v_F_Trucked[p, r, t] for r in model.s_R if model.p_PRT[p, r])
+ sum(model.v_F_Trucked[p, o, t] for o in model.s_O if model.p_POT[p, o])
+ model.v_S_Production[p, t]
)
model.ProductionPadSupplyBalance = Constraint(
model.s_PP,
model.s_T,
rule=ProductionPadSupplyBalanceRule,
doc="Production pad supply balance",
)
# model.ProductionPadSupplyBalance.pprint()
def CompletionsPadSupplyBalanceRule(model, p, t):
return (
model.v_B_Production[p, t]
== sum(model.v_F_Piped[p, n, t] for n in model.s_N if model.p_CNA[p, n])
+ sum(
model.v_F_Piped[p, p_tilde, t]
for p_tilde in model.s_CP
if model.p_CCA[p, p_tilde]
)
+ sum(model.v_F_Trucked[p, k, t] for k in model.s_K if model.p_CKT[p, k])
+ sum(model.v_F_Trucked[p, s, t] for s in model.s_S if model.p_CST[p, s])
+ sum(model.v_F_Trucked[p, r, t] for r in model.s_R if model.p_CRT[p, r])
+ sum(
model.v_F_Trucked[p, p_tilde, t]
for p_tilde in model.s_CP
if model.p_CCT[p, p_tilde]
)
+ model.v_S_Flowback[p, t]
)
model.CompletionsPadSupplyBalance = Constraint(
model.s_CP,
model.s_T,
rule=CompletionsPadSupplyBalanceRule,
doc="Completions pad supply balance (i.e. flowback balance)",
)
# model.CompletionsPadSupplyBalance.pprint()
def NetworkNodeBalanceRule(model, n, t):
return sum(
model.v_F_Piped[p, n, t] for p in model.s_PP if model.p_PNA[p, n]
) + sum(
model.v_F_Piped[p, n, t] for p in model.s_CP if model.p_CNA[p, n]
) + sum(
model.v_F_Piped[s, n, t] for s in model.s_S if model.p_SNA[s, n]
) + sum(
model.v_F_Piped[n_tilde, n, t]
for n_tilde in model.s_N
if model.p_NNA[n_tilde, n]
) == sum(
model.v_F_Piped[n, n_tilde, t]
for n_tilde in model.s_N
if model.p_NNA[n, n_tilde]
) + sum(
model.v_F_Piped[n, p, t] for p in model.s_CP if model.p_NCA[n, p]
) + sum(
model.v_F_Piped[n, k, t] for k in model.s_K if model.p_NKA[n, k]
) + sum(
model.v_F_Piped[n, r, t] for r in model.s_R if model.p_NRA[n, r]
) + sum(
model.v_F_Piped[n, s, t] for s in model.s_S if model.p_NSA[n, s]
) + sum(
model.v_F_Piped[n, o, t] for o in model.s_O if model.p_NOA[n, o]
)
model.NetworkBalance = Constraint(
model.s_N, model.s_T, rule=NetworkNodeBalanceRule, doc="Network node balance"
)
# model.NetworkBalance.pprint()
def BidirectionalFlowRule1(model, l, l_tilde, t):
if l in model.s_PP and l_tilde in model.s_CP:
if model.p_PCA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_N:
if model.p_PNA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_PP:
if model.p_PPA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_CP and l_tilde in model.s_N:
if model.p_CNA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_N:
if model.p_NNA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_CP:
if model.p_NCA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_K:
if model.p_NKA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_S:
if model.p_NSA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_R:
if model.p_NRA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_O:
if model.p_NOA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_R and l_tilde in model.s_N:
if model.p_RNA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_R and l_tilde in model.s_K:
if model.p_RKA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_CP:
if model.p_SCA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_K:
if model.p_SKA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_R:
if model.p_SRA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_O:
if model.p_SOA[l, l_tilde]:
return (
model.vb_y_Flow[l, l_tilde, t] + model.vb_y_Flow[l_tilde, l, t] == 1
)
else:
return Constraint.Skip
else:
return Constraint.Skip
model.BidirectionalFlow1 = Constraint(
model.s_L,
model.s_L,
model.s_T,
rule=BidirectionalFlowRule1,
doc="Bi-directional flow",
)
# model.BidirectionalFlow1.pprint()
def BidirectionalFlowRule2(model, l, l_tilde, t):
if l in model.s_PP and l_tilde in model.s_CP:
if model.p_PCA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_N:
if model.p_PNA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_PP:
if model.p_PPA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_CP and l_tilde in model.s_N:
if model.p_CNA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_N:
if model.p_NNA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_CP:
if model.p_NCA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_K:
if model.p_NKA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_S:
if model.p_NSA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_R:
if model.p_NRA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_O:
if model.p_NOA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_R and l_tilde in model.s_N:
if model.p_RNA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_R and l_tilde in model.s_K:
if model.p_RKA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_N:
if model.p_SNA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_CP:
if model.p_SCA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_K:
if model.p_SKA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_R:
if model.p_SRA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_O:
if model.p_SOA[l, l_tilde]:
return (
model.v_F_Piped[l, l_tilde, t]
<= model.vb_y_Flow[l, l_tilde, t] * model.p_M_Flow
)
else:
return Constraint.Skip
else:
return Constraint.Skip
model.BidirectionalFlow2 = Constraint(
model.s_L,
model.s_L,
model.s_T,
rule=BidirectionalFlowRule2,
doc="Bi-directional flow",
)
# model.BidirectionalFlow2.pprint()
def StorageSiteBalanceRule(model, s, t):
if t == model.s_T.first():
return model.v_L_Storage[s, t] == model.p_lambda_Storage[s] + sum(
model.v_F_Piped[n, s, t] for n in model.s_N if model.p_NSA[n, s]
) + sum(
model.v_F_Trucked[p, s, t] for p in model.s_PP if model.p_PST[p, s]
) + sum(
model.v_F_Trucked[p, s, t] for p in model.s_CP if model.p_CST[p, s]
) - sum(
model.v_F_Piped[s, n, t] for n in model.s_N if model.p_SNA[s, n]
) - sum(
model.v_F_Piped[s, p, t] for p in model.s_CP if model.p_SCA[s, p]
) - sum(
model.v_F_Piped[s, k, t] for k in model.s_K if model.p_SKA[s, k]
) - sum(
model.v_F_Piped[s, r, t] for r in model.s_R if model.p_SRA[s, r]
) - sum(
model.v_F_Piped[s, o, t] for o in model.s_O if model.p_SOA[s, o]
) - sum(
model.v_F_Trucked[s, p, t] for p in model.s_CP if model.p_SCT[s, p]
) - sum(
model.v_F_Trucked[s, k, t] for k in model.s_K if model.p_SKT[s, k]
)
else:
return model.v_L_Storage[s, t] == model.v_L_Storage[
s, model.s_T.prev(t)
] + sum(
model.v_F_Piped[n, s, t] for n in model.s_N if model.p_NSA[n, s]
) + sum(
model.v_F_Trucked[p, s, t] for p in model.s_PP if model.p_PST[p, s]
) + sum(
model.v_F_Trucked[p, s, t] for p in model.s_CP if model.p_CST[p, s]
) - sum(
model.v_F_Piped[s, n, t] for n in model.s_N if model.p_SNA[s, n]
) - sum(
model.v_F_Piped[s, p, t] for p in model.s_CP if model.p_SCA[s, p]
) - sum(
model.v_F_Piped[s, k, t] for k in model.s_K if model.p_SKA[s, k]
) - sum(
model.v_F_Piped[s, r, t] for r in model.s_R if model.p_SRA[s, r]
) - sum(
model.v_F_Piped[s, o, t] for o in model.s_O if model.p_SOA[s, o]
) - sum(
model.v_F_Trucked[s, p, t] for p in model.s_CP if model.p_SCT[s, p]
) - sum(
model.v_F_Trucked[s, k, t] for k in model.s_K if model.p_SKT[s, k]
)
model.StorageSiteBalance = Constraint(
model.s_S,
model.s_T,
rule=StorageSiteBalanceRule,
doc="Storage site balance rule",
)
# model.StorageSiteBalance.pprint()
def PipelineCapacityExpansionRule(model, l, l_tilde):
if l in model.s_PP and l_tilde in model.s_CP:
if model.p_PCA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_N:
if model.p_PNA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_PP:
if model.p_PPA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_CP and l_tilde in model.s_N:
if model.p_CNA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_N:
if model.p_NNA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_CP:
if model.p_NCA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_K:
if model.p_NKA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_S:
if model.p_NSA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_R:
if model.p_NRA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_O:
if model.p_NOA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_F and l_tilde in model.s_CP:
if model.p_FCA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_R and l_tilde in model.s_N:
if model.p_RNA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_R and l_tilde in model.s_K:
if model.p_RKA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_N:
if model.p_SNA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_CP:
if model.p_SCA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_K:
if model.p_SKA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_R:
if model.p_SRA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_O:
if model.p_SOA[l, l_tilde]:
return (
model.v_F_Capacity[l, l_tilde]
== model.p_sigma_Pipeline[l, l_tilde]
+ model.v_S_PipelineCapacity[l, l_tilde]
)
else:
return Constraint.Skip
else:
return Constraint.Skip
model.PipelineCapacityExpansion = Constraint(
model.s_L,
model.s_L,
rule=PipelineCapacityExpansionRule,
doc="Pipeline capacity construction/expansion",
)
# model.PipelineCapacityExpansion.pprint()
def PipelineCapacityRule(model, l, l_tilde, t):
if l in model.s_PP and l_tilde in model.s_CP:
if model.p_PCA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_N:
if model.p_PNA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_PP:
if model.p_PPA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_CP and l_tilde in model.s_N:
if model.p_CNA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_N:
if model.p_NNA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_CP:
if model.p_NCA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_K:
if model.p_NKA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_S:
if model.p_NSA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_R:
if model.p_NRA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_O:
if model.p_NOA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_F and l_tilde in model.s_CP:
if model.p_FCA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_R and l_tilde in model.s_N:
if model.p_RNA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_R and l_tilde in model.s_K:
if model.p_RKA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_N:
if model.p_SNA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_CP:
if model.p_SCA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_K:
if model.p_SKA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_R:
if model.p_SRA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_O:
if model.p_SOA[l, l_tilde]:
return model.v_F_Piped[l, l_tilde, t] <= model.v_F_Capacity[l, l_tilde]
else:
return Constraint.Skip
else:
return Constraint.Skip
model.PipelineCapacity = Constraint(
model.s_L,
model.s_L,
model.s_T,
rule=PipelineCapacityRule,
doc="Pipeline capacity",
)
# model.PipelineCapacity.pprint()
def StorageCapacityExpansionRule(model, s):
return (
model.v_X_Capacity[s]
== model.p_sigma_Storage[s] + model.v_S_StorageCapacity[s]
)
model.StorageCapacityExpansion = Constraint(
model.s_S,
rule=StorageCapacityExpansionRule,
doc="Storage capacity construction/expansion",
)
# model.StorageCapacityExpansion.pprint()
def StorageCapacityRule(model, s, t):
return model.v_L_Storage[s, t] <= model.v_X_Capacity[s]
model.StorageCapacity = Constraint(
model.s_S, model.s_T, rule=StorageCapacityRule, doc="Storage capacity"
)
# model.StorageCapacity.pprint()
def DisposalCapacityExpansionRule(model, k):
return (
model.v_D_Capacity[k]
== model.p_sigma_Disposal[k] + model.v_S_DisposalCapacity[k]
)
model.DisposalCapacityExpansion = Constraint(
model.s_K,
rule=DisposalCapacityExpansionRule,
doc="Disposal capacity construction/expansion",
)
# model.DisposalCapacityExpansion1.pprint()
def DisposalCapacityRule(model, k, t):
return (
sum(model.v_F_Piped[n, k, t] for n in model.s_N if model.p_NKA[n, k])
+ sum(model.v_F_Piped[s, k, t] for s in model.s_S if model.p_SKA[s, k])
+ sum(model.v_F_Trucked[s, k, t] for s in model.s_S if model.p_SKT[s, k])
+ sum(model.v_F_Trucked[p, k, t] for p in model.s_PP if model.p_PKT[p, k])
+ sum(model.v_F_Trucked[p, k, t] for p in model.s_CP if model.p_CKT[p, k])
+ sum(model.v_F_Trucked[r, k, t] for r in model.s_R if model.p_RKT[r, k])
<= model.v_D_Capacity[k]
)
model.DisposalCapacity = Constraint(
model.s_K, model.s_T, rule=DisposalCapacityRule, doc="Disposal capacity"
)
# model.DisposalCapacity.pprint()
def TreatmentCapacityRule(model, r, t):
return (
sum(model.v_F_Piped[n, r, t] for n in model.s_N if model.p_NRA[n, r])
+ sum(model.v_F_Piped[s, r, t] for s in model.s_S if model.p_SRA[s, r])
+ sum(model.v_F_Trucked[p, r, t] for p in model.s_PP if model.p_PRT[p, r])
+ sum(model.v_F_Trucked[p, r, t] for p in model.s_CP if model.p_CRT[p, r])
<= model.p_sigma_Treatment[r] + model.v_S_TreatmentCapacity[r]
)
model.TreatmentCapacity = Constraint(
model.s_R, model.s_T, rule=TreatmentCapacityRule, doc="Treatment capacity"
)
# model.TreatmentCapacity.pprint()
def TreatmentBalanceRule(model, r, t):
return (
model.p_epsilon_Treatment[r, "TDS"]
* (
sum(model.v_F_Piped[n, r, t] for n in model.s_N if model.p_NRA[n, r])
+ sum(model.v_F_Piped[s, r, t] for s in model.s_S if model.p_SRA[s, r])
+ sum(
model.v_F_Trucked[p, r, t] for p in model.s_PP if model.p_PRT[p, r]
)
+ sum(
model.v_F_Trucked[p, r, t] for p in model.s_CP if model.p_CRT[p, r]
)
)
== sum(model.v_F_Piped[r, p, t] for p in model.s_CP if model.p_RCA[r, p])
+ model.v_F_UnusedTreatedWater[r, t]
)
model.TreatmentBalance = Constraint(
model.s_R,
model.s_T,
rule=simple_constraint_rule(TreatmentBalanceRule),
doc="Treatment balance",
)
def BeneficialReuseCapacityRule(model, o, t):
return (
sum(model.v_F_Piped[n, o, t] for n in model.s_N if model.p_NOA[n, o])
+ sum(model.v_F_Piped[s, o, t] for s in model.s_S if model.p_SOA[s, o])
+ sum(model.v_F_Trucked[p, o, t] for p in model.s_PP if model.p_POT[p, o])
<= model.p_sigma_Reuse[o] + model.v_S_ReuseCapacity[o]
)
model.BeneficialReuseCapacity = Constraint(
model.s_O,
model.s_T,
rule=BeneficialReuseCapacityRule,
doc="Beneficial reuse capacity",
)
# model.BeneficialReuseCapacity.pprint()
# COMMENT: Beneficial reuse capacity constraint has not been tested yet
def FreshSourcingCostRule(model, f, p, t):
return (
model.v_C_Sourced[f, p, t]
== (model.v_F_Sourced[f, p, t] + model.v_F_Trucked[f, p, t])
* model.p_pi_Sourcing[f]
)
model.FreshSourcingCost = Constraint(
model.s_F,
model.s_CP,
model.s_T,
rule=FreshSourcingCostRule,
doc="Fresh sourcing cost",
)
# model.FreshSourcingCost.pprint()
def TotalFreshSourcingCostRule(model):
return model.v_C_TotalSourced == sum(
sum(sum(model.v_C_Sourced[f, p, t] for f in model.s_F) for p in model.s_CP)
for t in model.s_T
)
model.TotalFreshSourcingCost = Constraint(
rule=TotalFreshSourcingCostRule, doc="Total fresh sourcing cost"
)
def TotalFreshSourcingVolumeRule(model):
return model.v_F_TotalSourced == sum(
sum(
sum(model.v_F_Sourced[f, p, t] for f in model.s_F if model.p_FCA[f, p])
for p in model.s_CP
)
for t in model.s_T
) + sum(
sum(
sum(model.v_F_Trucked[f, p, t] for f in model.s_F if model.p_FCT[f, p])
for p in model.s_CP
)
for t in model.s_T
)
model.TotalFreshSourcingVolume = Constraint(
rule=TotalFreshSourcingVolumeRule, doc="Total fresh sourcing volume"
)
def DisposalCostRule(model, k, t):
return (
model.v_C_Disposal[k, t]
== (
sum(model.v_F_Piped[n, k, t] for n in model.s_N if model.p_NKA[n, k])
+ sum(model.v_F_Piped[r, k, t] for r in model.s_R if model.p_RKA[r, k])
+ sum(model.v_F_Piped[s, k, t] for s in model.s_S if model.p_SKA[s, k])
+ sum(
model.v_F_Trucked[p, k, t] for p in model.s_PP if model.p_PKT[p, k]
)
+ sum(
model.v_F_Trucked[p, k, t] for p in model.s_CP if model.p_CKT[p, k]
)
+ sum(
model.v_F_Trucked[s, k, t] for s in model.s_S if model.p_SKT[s, k]
)
+ sum(
model.v_F_Trucked[r, k, t] for r in model.s_R if model.p_RKT[r, k]
)
)
* model.p_pi_Disposal[k]
)
model.DisposalCost = Constraint(
model.s_K, model.s_T, rule=DisposalCostRule, doc="Disposal cost"
)
# model.DisposalCost.pprint()
def TotalDisposalCostRule(model):
return model.v_C_TotalDisposal == sum(
sum(model.v_C_Disposal[k, t] for k in model.s_K) for t in model.s_T
)
model.TotalDisposalCost = Constraint(
rule=TotalDisposalCostRule, doc="Total disposal cost"
)
# model.TotalDisposalCost.pprint()
def TreatmentCostRule(model, r, t):
return (
model.v_C_Treatment[r, t]
== (
sum(model.v_F_Piped[n, r, t] for n in model.s_N if model.p_NRA[n, r])
+ sum(model.v_F_Piped[s, r, t] for s in model.s_S if model.p_SRA[s, r])
+ sum(
model.v_F_Trucked[p, r, t] for p in model.s_PP if model.p_PRT[p, r]
)
+ sum(
model.v_F_Trucked[p, r, t] for p in model.s_CP if model.p_CRT[p, r]
)
)
* model.p_pi_Treatment[r]
)
model.TreatmentCost = Constraint(
model.s_R, model.s_T, rule=TreatmentCostRule, doc="Treatment cost"
)
# model.TreatmentCost.pprint()
def TotalTreatmentCostRule(model):
return model.v_C_TotalTreatment == sum(
sum(model.v_C_Treatment[r, t] for r in model.s_R) for t in model.s_T
)
model.TotalTreatmentCost = Constraint(
rule=TotalTreatmentCostRule, doc="Total treatment cost"
)
# model.TotalTreatmentCost.pprint()
def CompletionsReuseCostRule(
model,
p,
t,
):
return model.v_C_Reuse[p, t] == (
(
sum(model.v_F_Piped[n, p, t] for n in model.s_N if model.p_NCA[n, p])
+ sum(
model.v_F_Piped[p_tilde, p, t]
for p_tilde in model.s_PP
if model.p_PCA[p_tilde, p]
)
+ sum(model.v_F_Piped[r, p, t] for r in model.s_R if model.p_RCA[r, p])
+ sum(model.v_F_Piped[s, p, t] for s in model.s_S if model.p_SCA[s, p])
+ sum(
model.v_F_Piped[p_tilde, p, t]
for p_tilde in model.s_CP
if model.p_CCA[p_tilde, p]
)
+ sum(
model.v_F_Trucked[p_tilde, p, t]
for p_tilde in model.s_PP
if model.p_PCT[p_tilde, p]
)
+ sum(
model.v_F_Trucked[p_tilde, p, t]
for p_tilde in model.s_CP
if model.p_CCT[p_tilde, p]
)
+ sum(
model.v_F_Trucked[s, p, t] for s in model.s_S if model.p_SCT[s, p]
)
)
* model.p_pi_Reuse[p]
)
model.CompletionsReuseCost = Constraint(
model.s_CP,
model.s_T,
rule=CompletionsReuseCostRule,
doc="Reuse completions cost",
)
# model.CompletionsReuseCost.pprint()
def TotalCompletionsReuseCostRule(model):
return model.v_C_TotalReuse == sum(
sum(model.v_C_Reuse[p, t] for p in model.s_CP) for t in model.s_T
)
model.TotalCompletionsReuseCost = Constraint(
rule=TotalCompletionsReuseCostRule, doc="Total completions reuse cost"
)
# model.TotalCompletionsReuseCost.pprint()
def PipingCostRule(model, l, l_tilde, t):
if l in model.s_PP and l_tilde in model.s_CP:
if model.p_PCA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_N:
if model.p_PNA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_PP:
if model.p_PPA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_CP and l_tilde in model.s_N:
if model.p_CNA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_CP and l_tilde in model.s_CP:
if model.p_CCA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_N:
if model.p_NNA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_CP:
if model.p_NCA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_K:
if model.p_NKA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_S:
if model.p_NSA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_R:
if model.p_NRA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_N and l_tilde in model.s_O:
if model.p_NOA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_R and l_tilde in model.s_N:
if model.p_RNA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_R and l_tilde in model.s_K:
if model.p_RKA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_N:
if model.p_SNA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_K:
if model.p_SKA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_R:
if model.p_SRA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_O:
if model.p_SOA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Piped[l, l_tilde, t] * model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
elif l in model.s_F and l_tilde in model.s_CP:
if model.p_FCA[l, l_tilde]:
return (
model.v_C_Piped[l, l_tilde, t]
== model.v_F_Sourced[l, l_tilde, t]
* model.p_pi_Pipeline[l, l_tilde]
)
else:
return Constraint.Skip
else:
return Constraint.Skip
model.PipingCost = Constraint(
model.s_L, model.s_L, model.s_T, rule=PipingCostRule, doc="Piping cost"
)
# model.PipingCost.pprint()
def TotalPipingCostRule(model):
return model.v_C_TotalPiping == (
sum(
sum(
sum(
model.v_C_Piped[p, p_tilde, t]
for p in model.s_PP
if model.p_PCA[p, p_tilde]
)
for p_tilde in model.s_CP
)
+ sum(
sum(
model.v_C_Piped[p, n, t]
for p in model.s_PP
if model.p_PNA[p, n]
)
for n in model.s_N
)
+ sum(
sum(
model.v_C_Piped[p, p_tilde, t]
for p in model.s_PP
if model.p_PPA[p, p_tilde]
)
for p_tilde in model.s_PP
)
+ sum(
sum(
model.v_C_Piped[p, n, t]
for p in model.s_CP
if model.p_CNA[p, n]
)
for n in model.s_N
)
+ sum(
sum(
model.v_C_Piped[n, n_tilde, t]
for n in model.s_N
if model.p_NNA[n, n_tilde]
)
for n_tilde in model.s_N
)
+ sum(
sum(
model.v_C_Piped[n, p, t] for n in model.s_N if model.p_NCA[n, p]
)
for p in model.s_CP
)
+ sum(
sum(
model.v_C_Piped[n, k, t] for n in model.s_N if model.p_NKA[n, k]
)
for k in model.s_K
)
+ sum(
sum(
model.v_C_Piped[n, s, t] for n in model.s_N if model.p_NSA[n, s]
)
for s in model.s_S
)
+ sum(
sum(
model.v_C_Piped[n, r, t] for n in model.s_N if model.p_NRA[n, r]
)
for r in model.s_R
)
+ sum(
sum(
model.v_C_Piped[n, o, t] for n in model.s_N if model.p_NOA[n, o]
)
for o in model.s_O
)
+ sum(
sum(
model.v_C_Piped[f, p, t] for f in model.s_F if model.p_FCA[f, p]
)
for p in model.s_CP
)
+ sum(
sum(
model.v_C_Piped[r, n, t] for r in model.s_R if model.p_RNA[r, n]
)
for n in model.s_N
)
+ sum(
sum(
model.v_C_Piped[r, k, t] for r in model.s_R if model.p_RKA[r, k]
)
for k in model.s_K
)
+ sum(
sum(
model.v_C_Piped[s, n, t] for s in model.s_S if model.p_SNA[s, n]
)
for n in model.s_N
)
+ sum(
sum(
model.v_C_Piped[s, r, t] for s in model.s_S if model.p_SRA[s, r]
)
for r in model.s_R
)
+ sum(
sum(
model.v_C_Piped[s, o, t] for s in model.s_S if model.p_SOA[s, o]
)
for o in model.s_O
)
+ sum(
sum(
model.v_C_Piped[f, p, t] for f in model.s_F if model.p_FCA[f, p]
)
for p in model.s_CP
)
+ sum(
sum(
model.v_C_Piped[p, p_tilde, t]
for p in model.s_CP
if model.p_CCA[p, p_tilde]
)
for p_tilde in model.s_CP
)
for t in model.s_T
)
)
model.TotalPipingCost = Constraint(
rule=TotalPipingCostRule, doc="Total piping cost"
)
# model.TotalPipingCost.pprint()
def StorageDepositCostRule(model, s, t):
return model.v_C_Storage[s, t] == (
(
sum(model.v_F_Piped[n, s, t] for n in model.s_N if model.p_NSA[n, s])
+ sum(
model.v_F_Trucked[p, s, t] for p in model.s_PP if model.p_PST[p, s]
)
+ sum(
model.v_F_Trucked[p, s, t] for p in model.s_CP if model.p_CST[p, s]
)
)
* model.p_pi_Storage[s]
)
model.StorageDepositCost = Constraint(
model.s_S, model.s_T, rule=StorageDepositCostRule, doc="Storage deposit cost"
)
# model.StorageDepositCost.pprint()
def TotalStorageCostRule(model):
return model.v_C_TotalStorage == sum(
sum(model.v_C_Storage[s, t] for s in model.s_S) for t in model.s_T
)
model.TotalStorageCost = Constraint(
rule=TotalStorageCostRule, doc="Total storage deposit cost"
)
# model.TotalStorageCost.pprint()
def StorageWithdrawalCreditRule(model, s, t):
return model.v_R_Storage[s, t] == (
(
sum(model.v_F_Piped[s, n, t] for n in model.s_N if model.p_SNA[s, n])
+ sum(model.v_F_Piped[s, p, t] for p in model.s_CP if model.p_SCA[s, p])
+ sum(model.v_F_Piped[s, k, t] for k in model.s_K if model.p_SKA[s, k])
+ sum(model.v_F_Piped[s, r, t] for r in model.s_R if model.p_SRA[s, r])
+ sum(model.v_F_Piped[s, o, t] for o in model.s_O if model.p_SOA[s, o])
+ sum(
model.v_F_Trucked[s, p, t] for p in model.s_CP if model.p_SCT[s, p]
)
+ sum(
model.v_F_Trucked[s, k, t] for k in model.s_K if model.p_SKT[s, k]
)
)
* model.p_rho_Storage[s]
)
model.StorageWithdrawalCredit = Constraint(
model.s_S,
model.s_T,
rule=StorageWithdrawalCreditRule,
doc="Storage withdrawal credit",
)
# model.StorageWithdrawalCredit.pprint()
def TotalStorageWithdrawalCreditRule(model):
return model.v_R_TotalStorage == sum(
sum(model.v_R_Storage[s, t] for s in model.s_S) for t in model.s_T
)
model.TotalStorageWithdrawalCredit = Constraint(
rule=TotalStorageWithdrawalCreditRule, doc="Total storage withdrawal credit"
)
# model.TotalStorageWithdrawalCredit.pprint()
def TotalPadStorageCostRule(model):
return model.v_C_TotalPadStorage == sum(
sum(
model.vb_z_PadStorage[p, t] * model.p_pi_PadStorage[p, t]
for p in model.s_CP
)
for t in model.s_T
)
model.TotalPadStorageCost = Constraint(
rule=TotalPadStorageCostRule, doc="Total completions pad storage cost"
)
def TruckingCostRule(model, l, l_tilde, t):
if l in model.s_PP and l_tilde in model.s_CP:
if model.p_PCT[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
elif l in model.s_F and l_tilde in model.s_CP:
if model.p_FCT[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_K:
if model.p_PKT[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_S:
if model.p_PST[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_R:
if model.p_PRT[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
elif l in model.s_PP and l_tilde in model.s_O:
if model.p_POT[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
elif l in model.s_CP and l_tilde in model.s_K:
if model.p_CKT[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
elif l in model.s_CP and l_tilde in model.s_CP:
if model.p_CCT[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
elif l in model.s_CP and l_tilde in model.s_S:
if model.p_CST[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
elif l in model.s_CP and l_tilde in model.s_R:
if model.p_CRT[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_CP:
if model.p_SCT[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
elif l in model.s_S and l_tilde in model.s_K:
if model.p_SKT[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
elif l in model.s_R and l_tilde in model.s_K:
if model.p_RKT[l, l_tilde]:
return (
model.v_C_Trucked[l, l_tilde, t]
== model.v_F_Trucked[l, l_tilde, t]
* 1
/ model.p_delta_Truck
* model.p_tau_Trucking[l, l_tilde]
* model.p_pi_Trucking[l]
)
else:
return Constraint.Skip
else:
return Constraint.Skip
model.TruckingCost = Constraint(
model.s_L, model.s_L, model.s_T, rule=TruckingCostRule, doc="Trucking cost"
)
# model.TruckingCost.pprint()
def TotalTruckingCostRule(model):
return model.v_C_TotalTrucking == (
sum(
sum(
sum(
model.v_C_Trucked[p, p_tilde, t]
for p in model.s_PP
if model.p_PCT[p, p_tilde]
)
for p_tilde in model.s_CP
)
+ sum(
sum(
model.v_C_Trucked[p, k, t]
for p in model.s_PP
if model.p_PKT[p, k]
)
for k in model.s_K
)
+ sum(
sum(
model.v_C_Trucked[p, s, t]
for p in model.s_PP
if model.p_PST[p, s]
)
for s in model.s_S
)
+ sum(
sum(
model.v_C_Trucked[p, r, t]
for p in model.s_PP
if model.p_PRT[p, r]
)
for r in model.s_R
)
+ sum(
sum(
model.v_C_Trucked[p, o, t]
for p in model.s_PP
if model.p_POT[p, o]
)
for o in model.s_O
)
+ sum(
sum(
model.v_C_Trucked[p, k, t]
for p in model.s_CP
if model.p_CKT[p, k]
)
for k in model.s_K
)
+ sum(
sum(
model.v_C_Trucked[p, s, t]
for p in model.s_CP
if model.p_CST[p, s]
)
for s in model.s_S
)
+ sum(
sum(
model.v_C_Trucked[p, r, t]
for p in model.s_CP
if model.p_CRT[p, r]
)
for r in model.s_R
)
+ sum(
sum(
model.v_C_Trucked[s, p, t]
for s in model.s_S
if model.p_SCT[s, p]
)
for p in model.s_CP
)
+ sum(
sum(
model.v_C_Trucked[s, k, t]
for s in model.s_S
if model.p_SKT[s, k]
)
for k in model.s_K
)
+ sum(
sum(
model.v_C_Trucked[r, k, t]
for r in model.s_R
if model.p_RKT[r, k]
)
for k in model.s_K
)
+ sum(
sum(
model.v_C_Trucked[f, p, t]
for f in model.s_F
if model.p_FCT[f, p]
)
for p in model.s_CP
)
+ sum(
sum(
model.v_C_Trucked[p, p_tilde, t]
for p in model.s_CP
if model.p_CCT[p, p_tilde]
)
for p_tilde in model.s_CP
)
for t in model.s_T
)
)
model.TotalTruckingCost = Constraint(
rule=TotalTruckingCostRule, doc="Total trucking cost"
)
# model.TotalTruckingCost.pprint()
def SlackCostsRule(model):
return model.v_C_Slack == (
sum(
sum(
model.v_S_FracDemand[p, t] * model.p_psi_FracDemand
for p in model.s_CP
)
for t in model.s_T
)
+ sum(
sum(
model.v_S_Production[p, t] * model.p_psi_Production
for p in model.s_PP
)
for t in model.s_T
)
+ sum(
sum(model.v_S_Flowback[p, t] * model.p_psi_Flowback for p in model.s_CP)
for t in model.s_T
)
+ sum(
sum(
model.v_S_PipelineCapacity[p, p_tilde]
for p in model.s_PP
if model.p_PCA[p, p_tilde]
)
for p_tilde in model.s_CP
)
+ sum(
sum(
model.v_S_PipelineCapacity[p, n]
for p in model.s_PP
if model.p_PNA[p, n]
)
for n in model.s_N
)
+ sum(
sum(
model.v_S_PipelineCapacity[p, p_tilde]
for p in model.s_PP
if model.p_PPA[p, p_tilde]
)
for p_tilde in model.s_PP
)
+ sum(
sum(
model.v_S_PipelineCapacity[p, n]
for p in model.s_CP
if model.p_CNA[p, n]
)
for n in model.s_N
)
+ sum(
sum(
model.v_S_PipelineCapacity[n, n_tilde]
for n in model.s_N
if model.p_NNA[n, n_tilde]
)
for n_tilde in model.s_N
)
+ sum(
sum(
model.v_S_PipelineCapacity[n, p]
for n in model.s_N
if model.p_NCA[n, p]
)
for p in model.s_CP
)
+ sum(
sum(
model.v_S_PipelineCapacity[n, k]
for n in model.s_N
if model.p_NKA[n, k]
)
for k in model.s_K
)
+ sum(
sum(
model.v_S_PipelineCapacity[n, s]
for n in model.s_N
if model.p_NSA[n, s]
)
for s in model.s_S
)
+ sum(
sum(
model.v_S_PipelineCapacity[n, r]
for n in model.s_N
if model.p_NRA[n, r]
)
for r in model.s_R
)
+ sum(
sum(
model.v_S_PipelineCapacity[n, o]
for n in model.s_N
if model.p_NOA[n, o]
)
for o in model.s_O
)
+ sum(
sum(
model.v_S_PipelineCapacity[f, p]
for f in model.s_F
if model.p_FCA[f, p]
)
for p in model.s_CP
)
+ sum(
sum(
model.v_S_PipelineCapacity[r, n]
for r in model.s_R
if model.p_RNA[r, n]
)
for n in model.s_N
)
+ sum(
sum(
model.v_S_PipelineCapacity[r, k]
for r in model.s_R
if model.p_RKA[r, k]
)
for k in model.s_K
)
+ sum(
sum(
model.v_S_PipelineCapacity[s, n]
for s in model.s_S
if model.p_SNA[s, n]
)
for n in model.s_N
)
+ sum(
sum(
model.v_S_PipelineCapacity[s, p]
for s in model.s_S
if model.p_SCA[s, p]
)
for p in model.s_CP
)
+ sum(
sum(
model.v_S_PipelineCapacity[s, k]
for s in model.s_S
if model.p_SKA[s, k]
)
for k in model.s_K
)
+ sum(
sum(
model.v_S_PipelineCapacity[s, r]
for s in model.s_S
if model.p_SRA[s, r]
)
for r in model.s_R
)
+ sum(
sum(
model.v_S_PipelineCapacity[s, o]
for s in model.s_S
if model.p_SOA[s, o]
)
for o in model.s_O
)
+ sum(
model.v_S_StorageCapacity[s] * model.p_psi_StorageCapacity
for s in model.s_S
)
+ sum(
model.v_S_DisposalCapacity[k] * model.p_psi_DisposalCapacity
for k in model.s_K
)
+ sum(
model.v_S_TreatmentCapacity[r] * model.p_psi_TreatmentCapacity
for r in model.s_R
)
+ sum(
model.v_S_ReuseCapacity[o] * model.p_psi_ReuseCapacity
for o in model.s_O
)
)
model.SlackCosts = Constraint(rule=SlackCostsRule, doc="Slack costs")
def ReuseDestinationDeliveriesRule(model, p, t):
return model.v_F_ReuseDestination[p, t] == sum(
model.v_F_Piped[n, p, t] for n in model.s_N if model.p_NCA[n, p]
) + sum(
model.v_F_Piped[p_tilde, p, t]
for p_tilde in model.s_PP
if model.p_PCA[p_tilde, p]
) + sum(
model.v_F_Piped[r, p, t] for r in model.s_R if model.p_RCA[r, p]
) + sum(
model.v_F_Piped[s, p, t] for s in model.s_S if model.p_SCA[s, p]
) + sum(
model.v_F_Piped[p_tilde, p, t]
for p_tilde in model.s_CP
if model.p_CCA[p_tilde, p]
) + sum(
model.v_F_Trucked[p_tilde, p, t]
for p_tilde in model.s_CP
if model.p_CCT[p_tilde, p]
) + sum(
model.v_F_Trucked[p_tilde, p, t]
for p_tilde in model.s_PP
if model.p_PCT[p_tilde, p]
) + sum(
model.v_F_Trucked[s, p, t] for s in model.s_S if model.p_SCT[s, p]
)
model.ReuseDestinationDeliveries = Constraint(
model.s_CP,
model.s_T,
rule=ReuseDestinationDeliveriesRule,
doc="Reuse destinations volume",
)
# model.ReuseDestinationDeliveries.pprint()
def DisposalDestinationDeliveriesRule(model, k, t):
return model.v_F_DisposalDestination[k, t] == sum(
model.v_F_Piped[n, k, t] for n in model.s_N if model.p_NKA[n, k]
) + sum(model.v_F_Piped[s, k, t] for s in model.s_S if model.p_SKA[s, k]) + sum(
model.v_F_Piped[r, k, t] for r in model.s_R if model.p_RKA[r, k]
) + sum(
model.v_F_Trucked[s, k, t] for s in model.s_S if model.p_SKT[s, k]
) + sum(
model.v_F_Trucked[p, k, t] for p in model.s_PP if model.p_PKT[p, k]
) + sum(
model.v_F_Trucked[p, k, t] for p in model.s_CP if model.p_CKT[p, k]
) + sum(
model.v_F_Trucked[r, k, t] for r in model.s_R if model.p_RKT[r, k]
)
model.DisposalDestinationDeliveries = Constraint(
model.s_K,
model.s_T,
rule=DisposalDestinationDeliveriesRule,
doc="Disposal destinations volume",
)
# model.DisposalDestinationDeliveries.pprint()
def TreatmentDestinationDeliveriesRule(model, r, t):
return model.v_F_TreatmentDestination[r, t] == sum(
model.v_F_Piped[n, r, t] for n in model.s_N if model.p_NRA[n, r]
) + sum(model.v_F_Piped[s, r, t] for s in model.s_S if model.p_SRA[s, r]) + sum(
model.v_F_Trucked[p, r, t] for p in model.s_PP if model.p_PRT[p, r]
) + sum(
model.v_F_Trucked[s, r, t] for s in model.s_CP if model.p_CRT[s, r]
)
model.TreatmentDestinationDeliveries = Constraint(
model.s_R,
model.s_T,
rule=TreatmentDestinationDeliveriesRule,
doc="Treatment destinations volume",
)
def BeneficialReuseDeliveriesRule(model, o, t):
return model.v_F_BeneficialReuseDestination[o, t] == sum(
model.v_F_Piped[n, o, t] for n in model.s_N if model.p_NOA[n, o]
) + sum(model.v_F_Piped[s, o, t] for s in model.s_S if model.p_SOA[s, o]) + sum(
model.v_F_Trucked[p, o, t] for p in model.s_PP if model.p_POT[p, o]
)
model.BeneficialReuseDeliveries = Constraint(
model.s_O,
model.s_T,
rule=BeneficialReuseDeliveriesRule,
doc="Beneficial reuse destinations volume",
)
# model.TreatmentDestinationDeliveries.pprint()
## Fixing Decision Variables ##
# # model.v_F_Piped['PP1','SS1'].fix(3500)
# model.vb_y_Disposal['K02','I0'].fix(0)
# model.v_F_PadStorageIn['CP01','T2'].fix(2000)
## Define Objective and Solve Statement ##
model.objective = Objective(
expr=model.v_Z, sense=minimize, doc="Objective function"
)
return model
def water_quality(model, df_sets, df_parameters):
# Add parameter for water quality at each pad
model.p_nu = Param(
model.s_P,
model.s_W,
default=0,
initialize=df_parameters["PadWaterQuality"],
doc="Water Quality at pad [mg/L]",
)
# Add parameter for initial water quality at each storage location
StorageInitialWaterQuality_Table = {}
# note: initialize p_xi with df_parameters["StorageInitialWaterQuality"] when data in input file is populated
model.p_xi = Param(
model.s_S,
model.s_W,
default=0,
initialize=StorageInitialWaterQuality_Table,
doc="Initial Water Quality at storage site [mg/L]",
)
# Add variable to track water quality at each location over time
model.v_Q = Var(
model.s_L,
model.s_W,
model.s_T,
within=NonNegativeReals,
doc="Water quality at location [mg/L]",
)
# Material Balance
def DisposalWaterQualityRule(model, k, w, t):
return (
sum(
model.v_F_Piped[n, k, t] * model.v_Q[n, w, t]
for n in model.s_N
if model.p_NKA[n, k]
)
+ sum(
model.v_F_Piped[s, k, t] * model.v_Q[s, w, t]
for s in model.s_S
if model.p_SKA[s, k]
)
+ sum(
model.v_F_Piped[r, k, t] * model.v_Q[r, w, t]
for r in model.s_R
if model.p_RKA[r, k]
)
+ sum(
model.v_F_Trucked[s, k, t] * model.v_Q[s, w, t]
for s in model.s_S
if model.p_SKT[s, k]
)
+ sum(
model.v_F_Trucked[p, k, t] * model.v_Q[p, w, t]
for p in model.s_PP
if model.p_PKT[p, k]
)
+ sum(
model.v_F_Trucked[p, k, t] * model.v_Q[p, w, t]
for p in model.s_CP
if model.p_CKT[p, k]
)
+ sum(
model.v_F_Trucked[r, k, t] * model.v_Q[r, w, t]
for r in model.s_R
if model.p_RKT[r, k]
)
== model.v_Q[k, w, t] * model.v_F_DisposalDestination[k, t]
)
model.DisposalWaterQuality = Constraint(
model.s_K,
model.s_W,
model.s_T,
rule=DisposalWaterQualityRule,
doc="Disposal water quality rule",
)
def StorageSiteWaterQualityRule(model, s, w, t):
if t == model.s_T.first():
return model.p_lambda_Storage[s] * model.p_xi[s, w] + sum(
model.v_F_Piped[n, s, t] * model.v_Q[n, w, t]
for n in model.s_N
if model.p_NSA[n, s]
) + sum(
model.v_F_Trucked[p, s, t] * model.v_Q[p, w, t]
for p in model.s_PP
if model.p_PST[p, s]
) + sum(
model.v_F_Trucked[p, s, t] * model.v_Q[p, w, t]
for p in model.s_CP
if model.p_CST[p, s]
) == model.v_Q[
s, w, t
] * (
model.v_L_Storage[s, t]
+ sum(model.v_F_Piped[s, n, t] for n in model.s_N if model.p_SNA[s, n])
+ sum(model.v_F_Piped[s, p, t] for p in model.s_CP if model.p_SCA[s, p])
+ sum(model.v_F_Piped[s, k, t] for k in model.s_K if model.p_SKA[s, k])
+ sum(model.v_F_Piped[s, r, t] for r in model.s_R if model.p_SRA[s, r])
+ sum(model.v_F_Piped[s, o, t] for o in model.s_O if model.p_SOA[s, o])
+ sum(
model.v_F_Trucked[s, p, t] for p in model.s_CP if model.p_SCT[s, p]
)
+ sum(
model.v_F_Trucked[s, k, t] for k in model.s_K if model.p_SKT[s, k]
)
)
else:
return model.v_L_Storage[s, model.s_T.prev(t)] * model.v_Q[
s, w, model.s_T.prev(t)
] + sum(
model.v_F_Piped[n, s, t] * model.v_Q[n, w, t]
for n in model.s_N
if model.p_NSA[n, s]
) + sum(
model.v_F_Trucked[p, s, t] * model.v_Q[p, w, t]
for p in model.s_PP
if model.p_PST[p, s]
) + sum(
model.v_F_Trucked[p, s, t] * model.v_Q[p, w, t]
for p in model.s_CP
if model.p_CST[p, s]
) == model.v_Q[
s, w, t
] * (
model.v_L_Storage[s, t]
+ sum(model.v_F_Piped[s, n, t] for n in model.s_N if model.p_SNA[s, n])
+ sum(model.v_F_Piped[s, p, t] for p in model.s_CP if model.p_SCA[s, p])
+ sum(model.v_F_Piped[s, k, t] for k in model.s_K if model.p_SKA[s, k])
+ sum(model.v_F_Piped[s, r, t] for r in model.s_R if model.p_SRA[s, r])
+ sum(model.v_F_Piped[s, o, t] for o in model.s_O if model.p_SOA[s, o])
+ sum(
model.v_F_Trucked[s, p, t] for p in model.s_CP if model.p_SCT[s, p]
)
+ sum(
model.v_F_Trucked[s, k, t] for k in model.s_K if model.p_SKT[s, k]
)
)
model.StorageSiteWaterQuality = Constraint(
model.s_S,
model.s_W,
model.s_T,
rule=StorageSiteWaterQualityRule,
doc="Storage site water quality rule",
)
# Treatment Facility
def TreatmentWaterQualityRule(model, r, w, t):
return model.p_epsilon_Treatment[r, w] * (
sum(
model.v_F_Piped[n, r, t] * model.v_Q[n, w, t]
for n in model.s_N
if model.p_NRA[n, r]
)
+ sum(
model.v_F_Piped[s, r, t] * model.v_Q[s, w, t]
for s in model.s_S
if model.p_SRA[s, r]
)
+ sum(
model.v_F_Trucked[p, r, t] * model.v_Q[p, w, t]
for p in model.s_PP
if model.p_PRT[p, r]
)
+ sum(
model.v_F_Trucked[p, r, t] * model.v_Q[p, w, t]
for p in model.s_CP
if model.p_CRT[p, r]
)
) == model.v_Q[r, w, t] * (
sum(model.v_F_Piped[r, p, t] for p in model.s_CP if model.p_RCA[r, p])
+ model.v_F_UnusedTreatedWater[r, t]
)
model.TreatmentWaterQuality = Constraint(
model.s_R,
model.s_W,
model.s_T,
rule=simple_constraint_rule(TreatmentWaterQualityRule),
doc="Treatment water quality",
)
def NetworkNodeWaterQualityRule(model, n, w, t):
return sum(
model.v_F_Piped[p, n, t] * model.v_Q[p, w, t]
for p in model.s_PP
if model.p_PNA[p, n]
) + sum(
model.v_F_Piped[p, n, t] * model.v_Q[p, w, t]
for p in model.s_CP
if model.p_CNA[p, n]
) + sum(
model.v_F_Piped[s, n, t] * model.v_Q[s, w, t]
for s in model.s_S
if model.p_SNA[s, n]
) + sum(
model.v_F_Piped[n_tilde, n, t] * model.v_Q[n_tilde, w, t]
for n_tilde in model.s_N
if model.p_NNA[n_tilde, n]
) == model.v_Q[
n, w, t
] * (
sum(
model.v_F_Piped[n, n_tilde, t]
for n_tilde in model.s_N
if model.p_NNA[n, n_tilde]
)
+ sum(model.v_F_Piped[n, p, t] for p in model.s_CP if model.p_NCA[n, p])
+ sum(model.v_F_Piped[n, k, t] for k in model.s_K if model.p_NKA[n, k])
+ sum(model.v_F_Piped[n, r, t] for r in model.s_R if model.p_NRA[n, r])
+ sum(model.v_F_Piped[n, s, t] for s in model.s_S if model.p_NSA[n, s])
+ sum(model.v_F_Piped[n, o, t] for o in model.s_O if model.p_NOA[n, o])
)
model.NetworkWaterQuality = Constraint(
model.s_N,
model.s_W,
model.s_T,
rule=NetworkNodeWaterQualityRule,
doc="Network water quality",
)
def BeneficialReuseWaterQuality(model, o, w, t):
return (
sum(
model.v_F_Piped[n, o, t] * model.v_Q[n, w, t]
for n in model.s_N
if model.p_NOA[n, o]
)
+ sum(
model.v_F_Piped[s, o, t] * model.v_Q[s, w, t]
for s in model.s_S
if model.p_SOA[s, o]
)
+ sum(
model.v_F_Trucked[p, o, t] * model.v_Q[p, w, t]
for p in model.s_PP
if model.p_POT[p, o]
)
== model.v_Q[o, w, t] * model.v_F_BeneficialReuseDestination[o, t]
)
model.BeneficialReuseWaterQuality = Constraint(
model.s_O,
model.s_W,
model.s_T,
rule=BeneficialReuseWaterQuality,
doc="Beneficial reuse capacity",
)
# Fix variables
# Fix variables: produced water flows, binary
model.v_F_Piped.fix()
model.v_F_Trucked.fix()
model.v_F_Sourced.fix()
model.v_F_PadStorageIn.fix()
model.v_F_PadStorageOut.fix()
model.v_L_Storage.fix()
model.v_F_UnusedTreatedWater.fix()
model.v_F_DisposalDestination.fix()
model.v_F_BeneficialReuseDestination.fix()
# Use p_nu to fix v_Q for pads
for p in model.s_P:
for w in model.s_W:
for t in model.s_T:
model.v_Q[p, w, t].fix(model.p_nu[p, w])
return model
def postprocess_water_quality_calculation(model, df_sets, df_parameters, opt):
# Add water quality formulation to input solved model
water_quality_model = water_quality(model, df_sets, df_parameters)
# Calculate water quality
opt.solve(water_quality_model, tee=True)
return water_quality_model
| 128,620 | 37 | 92 |
1becc35c1544729b291b76e5e1c8312395737ac8 | 8,641 | py | Python | get_ext_repo.py | natemellendorf/configpy | 750da5eaef33cede9f3ef532453d63e507f34a2c | [
"MIT"
] | 4 | 2020-01-20T06:46:33.000Z | 2021-07-28T21:53:29.000Z | get_ext_repo.py | natemellendorf/configpy | 750da5eaef33cede9f3ef532453d63e507f34a2c | [
"MIT"
] | 5 | 2020-03-24T17:00:44.000Z | 2021-12-13T19:59:01.000Z | get_ext_repo.py | natemellendorf/configpy | 750da5eaef33cede9f3ef532453d63e507f34a2c | [
"MIT"
] | null | null | null | import requests
from pprint import pprint
import redis
import json
from datetime import datetime
from config_snips import cluster_config
if __name__ == '__main__':
github = 'https://github.com/natemellendorf/tr_templates'
gitlab = 'http://gitlab/root/awesome'
test = get_ext_repo(gitlab)
pprint(test)
| 36.306723 | 121 | 0.490568 | import requests
from pprint import pprint
import redis
import json
from datetime import datetime
from config_snips import cluster_config
def current_time():
current_time = str(datetime.now().time())
no_sec = current_time.split('.')
time = no_sec.pop(0)
return time
def static_error(error):
new_data = {}
print(error)
new_data['event_time'] = current_time()
new_data['event'] = str(error)
return new_data
def pushtorepo(**kwargs):
REDIS_URI = kwargs["REDIS_URI"]
data = kwargs["data"]
serialNumber = kwargs["serialNumber"]
node = kwargs["node"]
new_data = {}
words = data["repo_uri"].split("/")
protocol = words[0]
domain = words[2]
gitlab_url = '{0}//{1}'.format(protocol, domain)
findall = '{0}/api/v4/projects/'.format(gitlab_url)
rd = redis.Redis(host=REDIS_URI, port=6379, db=0)
headers = {
'PRIVATE-TOKEN': "{0}".format(data['repo_auth_token']),
'Content-Type': "application/json",
'User-Agent': "ConfigPy",
'Accept': "*/*",
'Cache-Control': "no-cache",
'Connection': "keep-alive",
'cache-control': "no-cache"
}
nodeconfig = data['device_config']
pprint(data['cluster'])
if data['cluster']:
print('Clustering requested...')
cluster = ''
if node == 'node0serialNumber':
rd.hmset(serialNumber, {'ztp_cluster_node': '0'})
elif node == 'node1serialNumber':
rd.hmset(serialNumber, {'ztp_cluster_node': '1'})
nodeconfig = (data['device_config'] + cluster)
payload = {"branch": "master", "content": nodeconfig, "commit_message": "new commit"}
querystring = {"per_page": "100"}
try:
r = requests.get(findall, headers=headers, params=querystring, timeout=5)
returned = r.json()
for x in returned:
if x['path_with_namespace'] in data["repo_uri"]:
new_file_url = f'{findall}{x["id"]}/repository/files/{data["clientID"]}%2F{serialNumber}%2Eset'
try:
returned = requests.post(new_file_url, data=json.dumps(payload), headers=headers, timeout=5)
if returned.status_code == 201:
if data['ztp']:
rd.hmset(serialNumber, {'ztp': str(data['clientID'])})
rd.hmset(serialNumber, {'hostname': f'{serialNumber} - [ZTP]'})
rd.hmset(serialNumber, {'config': 'awaiting device'})
rd.hmset(serialNumber, {'device_sn': f'{serialNumber}'})
url_list = ['edit', 'blob']
for item in url_list:
url = f'{data["repo_uri"]}/{item}/master/{str(data["clientID"])}/{serialNumber}.set'
rd.hmset(serialNumber, {f'device_repo_{item}': f'{url}'})
new_data['event_time'] = current_time()
new_data['event'] = returned.text
return new_data
elif returned.status_code == 400 and 'this name already exists' in returned.text:
try:
returned = requests.put(new_file_url, data=json.dumps(payload), headers=headers, timeout=5)
new_data['event_time'] = current_time()
new_data['event'] = returned.text
if data['ztp']:
rd = redis.Redis(host=REDIS_URI, port=6379, db=0)
rd.hmset(serialNumber, {'ztp': str(data['clientID'])})
rd.hmset(serialNumber, {'hostname': f'{serialNumber} - [ZTP]'})
rd.hmset(serialNumber, {'config': 'awaiting device'})
rd.hmset(serialNumber, {'device_sn': f'{serialNumber}'})
url_list = ['edit', 'blob']
for item in url_list:
url = f'{data["repo_uri"]}/{item}/master/{str(data["clientID"])}/{serialNumber}.set'
rd.hmset(serialNumber, {f'device_repo_{item}': f'{url}'})
return new_data
else:
new_data['event_time'] = current_time()
new_data['event'] = returned.text
return new_data
except Exception as e:
error = static_error(e)
return error
else:
new_data['event_time'] = current_time()
new_data['event'] = returned.text
return new_data
except Exception as e:
error = static_error(e)
return error
except Exception as e:
error = static_error(e)
return error
def get_ext_repo(ext_url, *args):
error_results = dict()
if 'git' not in ext_url:
error_results['error'] = 'URL must contain github or gitlab'
return error_results
if 'gitlab' in ext_url:
try:
words = ext_url.split("/")
protocol = words[0]
domain = words[2]
gitlab_url = '{0}//{1}'.format(protocol, domain)
findall = '{0}/api/v4/projects/?per_page=100'.format(gitlab_url)
r = requests.get(findall)
retuned = r.json()
for x in retuned:
#pprint(x['web_url'])
#pprint(ext_url)
if x['path_with_namespace'] in ext_url:
ext_repo_info = {}
#pprint(x)
r = requests.get('{0}/api/v4/projects/{1}/repository/tree'.format(gitlab_url, x['id']))
d = r.json()
#pprint(d)
ext_repo_files = {}
for path in d:
if '.j2' in path['path']:
if 'all' in args:
ext_repo_files[path['path']] = '{0}/raw/master/{1}'.format(ext_url, path['path'])
#print('---- ALL ----')
else:
filename = path['path'].replace("j2", "yml")
for yaml_search in d:
if filename in yaml_search['path']:
ext_repo_files[path['path']] = '{0}/raw/master/{1}'.format(ext_url, path['path'])
ext_repo_info['files'] = ext_repo_files
return ext_repo_info
except Exception as e:
error_results['error'] = 'Unable to access GitHub repo...'
return error_results
if 'github.com' in ext_url:
try:
# Convert user provided url to API url.
ext_url = ext_url.replace('https://github.com', 'https://api.github.com/repos')
# With requests, get basic info on repo.
r = requests.get(ext_url)
if 'API rate limit exceeded' in r.text:
error_results['error'] = 'API rate limit exceeded'
return error_results
ext_repo_info = r.json()
# With Requests, get a list of all files in the repo.
r = requests.get(ext_url + '/contents/')
d = r.json()
# Loop over the dictionary we acquired with d, and put interesting info in repo dict.
ext_repo_files = {}
for path in d:
if '.j2' in path['path']:
if 'all' in args:
ext_repo_files[path['path']] = path['download_url']
#print('---- ALL ----')
else:
filename = path['path'].replace("j2", "yml")
for yaml_search in d:
if filename in yaml_search['path']:
ext_repo_files[path['path']] = path['download_url']
ext_repo_info['files'] = ext_repo_files
return ext_repo_info
except Exception as e:
error_results['error'] = 'Unable to access GitHub repo...'
return error_results
if __name__ == '__main__':
github = 'https://github.com/natemellendorf/tr_templates'
gitlab = 'http://gitlab/root/awesome'
test = get_ext_repo(gitlab)
pprint(test)
| 8,228 | 0 | 92 |
5b158e28007251306ff82f9b52ca056b2713569a | 15,114 | py | Python | valueIteration/value_iteration_4D.py | kensukenk/optimized_dp | 4771787366ca04139c168c8988dad378ad404ab6 | [
"MIT"
] | 41 | 2020-06-23T01:58:03.000Z | 2022-03-28T01:45:12.000Z | valueIteration/value_iteration_4D.py | kensukenk/optimized_dp | 4771787366ca04139c168c8988dad378ad404ab6 | [
"MIT"
] | 1 | 2021-08-01T06:58:57.000Z | 2021-08-01T06:58:57.000Z | valueIteration/value_iteration_4D.py | kensukenk/optimized_dp | 4771787366ca04139c168c8988dad378ad404ab6 | [
"MIT"
] | 20 | 2020-06-05T20:52:02.000Z | 2022-03-01T03:17:39.000Z | import heterocl as hcl
import numpy as np
import time
import os
######################################### HELPER FUNCTIONS #########################################
# Update the value function at position (i,j,k,l)
# iVals: holds index values (i,j,k,l) that correspond to state values (si,sj,sk,sl)
# intermeds: holds the estimated value associated with taking each action
# interpV: holds the estimated value of a successor state (linear interpolation only)
# gamma: discount factor
# ptsEachDim: the number of grid points in each dimension of the state space
# useNN: a mode flag (0: use linear interpolation, 1: use nearest neighbour)
# Returns 0 if convergence has been reached
# Converts state values into indeces using nearest neighbour rounding
# Convert indices into state values
# Sets iVals equal to (i,j,k,l) and sVals equal to the corresponding state values
######################################### VALUE ITERATION ##########################################
# Main value iteration algorithm
# reSweep: a convergence flag (1: continue iterating, 0: convergence reached)
# epsilon: convergence criteria
# maxIters: maximum number of iterations that can occur without convergence being reached
# count: the number of iterations that have been performed
| 60.943548 | 173 | 0.494707 | import heterocl as hcl
import numpy as np
import time
import os
######################################### HELPER FUNCTIONS #########################################
# Update the value function at position (i,j,k,l)
# iVals: holds index values (i,j,k,l) that correspond to state values (si,sj,sk,sl)
# intermeds: holds the estimated value associated with taking each action
# interpV: holds the estimated value of a successor state (linear interpolation only)
# gamma: discount factor
# ptsEachDim: the number of grid points in each dimension of the state space
# useNN: a mode flag (0: use linear interpolation, 1: use nearest neighbour)
def updateVopt(obj, i, j, k, l, iVals, sVals, actions, Vopt, intermeds, trans, interpV, gamma, bounds, goal, ptsEachDim, useNN):
p = hcl.scalar(0, "p")
with hcl.for_(0, actions.shape[0], name="a") as a:
# set iVals equal to (i,j,k,l) and sVals equal to the corresponding state values (si,sj,sk,sl)
updateStateVals(i, j, k, l, iVals, sVals, bounds, ptsEachDim)
# call the transition function to obtain the outcome(s) of action a from state (si,sj,sk,sl)
obj.transition(sVals, actions[a], bounds, trans, goal)
# initialize the value of the action Q value with the immediate reward of taking that action
intermeds[a] = obj.reward(sVals, actions[a], bounds, goal, trans)
# add the value of each possible successor state to the Q value
with hcl.for_(0, trans.shape[0], name="si") as si:
p[0] = trans[si,0]
sVals[0] = trans[si,1]
sVals[1] = trans[si,2]
sVals[2] = trans[si,3]
sVals[3] = trans[si,4]
# Nearest neighbour
with hcl.if_(useNN[0] == 1):
# convert the state values of the successor state (si,sj,sk,sl) into indeces (ia,ja,ka,la)
stateToIndex(sVals, iVals, bounds, ptsEachDim)
# if (ia,ja,ka,la) is within the state space, add its discounted value to the Q value
with hcl.if_(hcl.and_(iVals[0] < Vopt.shape[0], iVals[1] < Vopt.shape[1], iVals[2] < Vopt.shape[2], iVals[3] < Vopt.shape[3])):
with hcl.if_(hcl.and_(iVals[0] >= 0, iVals[1] >= 0, iVals[2] >= 0, iVals[3] >= 0)):
intermeds[a] += (gamma[0] * (p[0] * Vopt[iVals[0], iVals[1], iVals[2], iVals[3]]))
# maximize over each Q value to obtain the optimal value
Vopt[i,j,k,l] = -1000000
with hcl.for_(0, intermeds.shape[0], name="r") as r:
with hcl.if_(Vopt[i,j,k,l] < intermeds[r]):
Vopt[i,j,k,l] = intermeds[r]
# Returns 0 if convergence has been reached
def evaluateConvergence(newV, oldV, epsilon, reSweep):
delta = hcl.scalar(0, "delta")
# Calculate the difference, if it's negative, make it positive
delta[0] = newV[0] - oldV[0]
with hcl.if_(delta[0] < 0):
delta[0] = delta[0] * -1
with hcl.if_(delta[0] > epsilon[0]):
reSweep[0] = 1
# Converts state values into indeces using nearest neighbour rounding
def stateToIndex(sVals, iVals, bounds, ptsEachDim):
iVals[0] = ((sVals[0] - bounds[0,0]) / (bounds[0,1] - bounds[0,0])) * (ptsEachDim[0] - 1)
iVals[1] = ((sVals[1] - bounds[1,0]) / (bounds[1,1] - bounds[1,0])) * (ptsEachDim[1] - 1)
iVals[2] = ((sVals[2] - bounds[2,0]) / (bounds[2,1] - bounds[2,0])) * (ptsEachDim[2] - 1)
iVals[3] = ((sVals[3] - bounds[3,0]) / (bounds[3,1] - bounds[3,0])) * (ptsEachDim[3] - 1)
# NOTE: add 0.5 to simulate rounding
iVals[0] = hcl.cast(hcl.Int(), iVals[0] + 0.5)
iVals[1] = hcl.cast(hcl.Int(), iVals[1] + 0.5)
iVals[2] = hcl.cast(hcl.Int(), iVals[2] + 0.5)
iVals[3] = hcl.cast(hcl.Int(), iVals[3] + 0.5)
# Convert indices into state values
def indexToState(iVals, sVals, bounds, ptsEachDim):
sVals[0] = bounds[0,0] + ( (bounds[0,1] - bounds[0,0]) * (iVals[0] / (ptsEachDim[0]-1)) )
sVals[1] = bounds[1,0] + ( (bounds[1,1] - bounds[1,0]) * (iVals[1] / (ptsEachDim[1]-1)) )
sVals[2] = bounds[2,0] + ( (bounds[2,1] - bounds[2,0]) * (iVals[2] / (ptsEachDim[2]-1)) )
sVals[3] = bounds[3,0] + ( (bounds[3,1] - bounds[3,0]) * (iVals[3] / (ptsEachDim[3]-1)) )
# Sets iVals equal to (i,j,k,l) and sVals equal to the corresponding state values
def updateStateVals(i, j, k, l, iVals, sVals, bounds, ptsEachDim):
iVals[0] = i
iVals[1] = j
iVals[2] = k
iVals[3] = l
indexToState(iVals, sVals, bounds, ptsEachDim)
######################################### VALUE ITERATION ##########################################
# Main value iteration algorithm
# reSweep: a convergence flag (1: continue iterating, 0: convergence reached)
# epsilon: convergence criteria
# maxIters: maximum number of iterations that can occur without convergence being reached
# count: the number of iterations that have been performed
def value_iteration_4D(MDP_object):
def solve_Vopt(Vopt, actions, intermeds, trans, interpV, gamma, epsilon, iVals, sVals, bounds, goal, ptsEachDim, count, maxIters, useNN):
reSweep = hcl.scalar(1, "reSweep")
oldV = hcl.scalar(0, "oldV")
newV = hcl.scalar(0, "newV")
with hcl.while_(hcl.and_(reSweep[0] == 1, count[0] < maxIters[0])):
reSweep[0] = 0
# Perform value iteration by sweeping in direction 1
with hcl.Stage("Sweep_1"):
with hcl.for_(0, Vopt.shape[0], name="i") as i:
with hcl.for_(0, Vopt.shape[1], name="j") as j:
with hcl.for_(0, Vopt.shape[2], name="k") as k:
with hcl.for_(0, Vopt.shape[3], name="l") as l:
oldV[0] = Vopt[i,j,k,l]
updateVopt(MDP_object, i, j, k, l, iVals, sVals, actions, Vopt, intermeds, trans, interpV, gamma, bounds, goal, ptsEachDim, useNN)
newV[0] = Vopt[i,j,k,l]
evaluateConvergence(newV, oldV, epsilon, reSweep)
count[0] += 1
# Perform value iteration by sweeping in direction 2
with hcl.Stage("Sweep_2"):
with hcl.if_(useNN[0] == 1):
with hcl.for_(1, Vopt.shape[0] + 1, name="i") as i:
with hcl.for_(1, Vopt.shape[1] + 1, name="j") as j:
with hcl.for_(1, Vopt.shape[2] + 1, name="k") as k:
with hcl.for_(0, Vopt.shape[3], name="l") as l:
i2 = Vopt.shape[0] - i
j2 = Vopt.shape[1] - j
k2 = Vopt.shape[2] - k
oldV[0] = Vopt[i2,j2,k2,l]
updateVopt(MDP_object, i2, j2, k2, l, iVals, sVals, actions, Vopt, intermeds, trans, interpV, gamma, bounds, goal, ptsEachDim, useNN)
newV[0] = Vopt[i2,j2,k2,l]
evaluateConvergence(newV, oldV, epsilon, reSweep)
count[0] += 1
# Perform value iteration by sweeping in direction 3
with hcl.Stage("Sweep_3"):
with hcl.if_(useNN[0] == 1):
with hcl.for_(1, Vopt.shape[0] + 1, name="i") as i:
with hcl.for_(0, Vopt.shape[1], name="j") as j:
with hcl.for_(0, Vopt.shape[2], name="k") as k:
with hcl.for_(0, Vopt.shape[3], name="l") as l:
i2 = Vopt.shape[0] - i
oldV[0] = Vopt[i2,j,k,l]
updateVopt(MDP_object, i2, j, k, l, iVals, sVals, actions, Vopt, intermeds, trans, interpV, gamma, bounds, goal, ptsEachDim, useNN)
newV[0] = Vopt[i2,j,k,l]
evaluateConvergence(newV, oldV, epsilon, reSweep)
count[0] += 1
# Perform value iteration by sweeping in direction 4
with hcl.Stage("Sweep_4"):
with hcl.if_(useNN[0] == 1):
with hcl.for_(0, Vopt.shape[0], name="i") as i:
with hcl.for_(1, Vopt.shape[1] + 1, name="j") as j:
with hcl.for_(0, Vopt.shape[2], name="k") as k:
with hcl.for_(0, Vopt.shape[3], name="l") as l:
j2 = Vopt.shape[1] - j
oldV[0] = Vopt[i,j2,k,l]
updateVopt(MDP_object, i, j2, k, l, iVals, sVals, actions, Vopt, intermeds, trans, interpV, gamma, bounds, goal, ptsEachDim, useNN)
newV[0] = Vopt[i,j2,k,l]
evaluateConvergence(newV, oldV, epsilon, reSweep)
count[0] += 1
# Perform value iteration by sweeping in direction 5
with hcl.Stage("Sweep_5"):
with hcl.if_(useNN[0] == 1):
with hcl.for_(0, Vopt.shape[0], name="i") as i:
with hcl.for_(0, Vopt.shape[1], name="j") as j:
with hcl.for_(1, Vopt.shape[2] + 1, name="k") as k:
with hcl.for_(0, Vopt.shape[3], name="l") as l:
k2 = Vopt.shape[2] - k
oldV[0] = Vopt[i,j,k2,l]
updateVopt(MDP_object, i, j, k2, l, iVals, sVals, actions, Vopt, intermeds, trans, interpV, gamma, bounds, goal, ptsEachDim, useNN)
newV[0] = Vopt[i,j,k2,l]
evaluateConvergence(newV, oldV, epsilon, reSweep)
count[0] += 1
# Perform value iteration by sweeping in direction 6
with hcl.Stage("Sweep_6"):
with hcl.if_(useNN[0] == 1):
with hcl.for_(1, Vopt.shape[0] + 1, name="i") as i:
with hcl.for_(1, Vopt.shape[1] + 1, name="j") as j:
with hcl.for_(0, Vopt.shape[2], name="k") as k:
with hcl.for_(0, Vopt.shape[3], name="l") as l:
i2 = Vopt.shape[0] - i
j2 = Vopt.shape[1] - j
oldV[0] = Vopt[i2,j2,k,l]
updateVopt(MDP_object, i2, j2, k, l, iVals, sVals, actions, Vopt, intermeds, trans, interpV, gamma, bounds, goal, ptsEachDim, useNN)
newV[0] = Vopt[i2,j2,k,l]
evaluateConvergence(newV, oldV, epsilon, reSweep)
count[0] += 1
# Perform value iteration by sweeping in direction 7
with hcl.Stage("Sweep_7"):
with hcl.if_(useNN[0] == 1):
with hcl.for_(1, Vopt.shape[0] + 1, name="i") as i:
with hcl.for_(0, Vopt.shape[1], name="j") as j:
with hcl.for_(1, Vopt.shape[2] + 1, name="k") as k:
with hcl.for_(0, Vopt.shape[3], name="l") as l:
i2 = Vopt.shape[0] - i
k2 = Vopt.shape[2] - k
oldV[0] = Vopt[i2,j,k2,l]
updateVopt(MDP_object, i2, j, k2, l, iVals, sVals, actions, Vopt, intermeds, trans, interpV, gamma, bounds, goal, ptsEachDim, useNN)
newV[0] = Vopt[i2,j,k2,l]
evaluateConvergence(newV, oldV, epsilon, reSweep)
count[0] += 1
# Perform value iteration by sweeping in direction 8
with hcl.Stage("Sweep_8"):
with hcl.if_(useNN[0] == 1):
with hcl.for_(0, Vopt.shape[0], name="i") as i:
with hcl.for_(1, Vopt.shape[1] + 1, name="j") as j:
with hcl.for_(1, Vopt.shape[2] + 1, name="k") as k:
with hcl.for_(0, Vopt.shape[3], name="l") as l:
j2 = Vopt.shape[1] - j
k2 = Vopt.shape[2] - k
oldV[0] = Vopt[i,j2,k2,l]
updateVopt(MDP_object, i, j2, k2, l, iVals, sVals, actions, Vopt, intermeds, trans, interpV, gamma, bounds, goal, ptsEachDim, useNN)
newV[0] = Vopt[i,j2,k2,l]
evaluateConvergence(newV, oldV, epsilon, reSweep)
count[0] += 1
###################################### SETUP PLACEHOLDERS ######################################
# Initialize the HCL environment
hcl.init()
hcl.config.init_dtype = hcl.Float()
# NOTE: trans is a tensor with size = maximum number of transitions
# NOTE: intermeds must have size [# possible actions]
# NOTE: transition must have size [# possible outcomes, #state dimensions + 1]
Vopt = hcl.placeholder(tuple(MDP_object._ptsEachDim), name="Vopt", dtype=hcl.Float())
gamma = hcl.placeholder((0,), "gamma")
count = hcl.placeholder((0,), "count")
maxIters = hcl.placeholder((0,), "maxIters")
epsilon = hcl.placeholder((0,), "epsilon")
actions = hcl.placeholder(tuple(MDP_object._actions.shape), name="actions", dtype=hcl.Float())
intermeds = hcl.placeholder(tuple([MDP_object._actions.shape[0]]), name="intermeds", dtype=hcl.Float())
trans = hcl.placeholder(tuple(MDP_object._trans.shape), name="successors", dtype=hcl.Float())
bounds = hcl.placeholder(tuple(MDP_object._bounds.shape), name="bounds", dtype=hcl.Float())
goal = hcl.placeholder(tuple(MDP_object._goal.shape), name="goal", dtype=hcl.Float())
ptsEachDim = hcl.placeholder(tuple([4]), name="ptsEachDim", dtype=hcl.Float())
sVals = hcl.placeholder(tuple([4]), name="sVals", dtype=hcl.Float())
iVals = hcl.placeholder(tuple([4]), name="iVals", dtype=hcl.Float())
interpV = hcl.placeholder((0,), "interpols")
useNN = hcl.placeholder((0,), "useNN")
# Create a static schedule -- graph
s = hcl.create_schedule([Vopt, actions, intermeds, trans, interpV, gamma, epsilon, iVals, sVals, bounds, goal, ptsEachDim, count, maxIters, useNN], solve_Vopt)
# Use this graph and build an executable
return hcl.build(s, target="llvm")
| 13,675 | 0 | 134 |
e914cdbaf9db38c6a1e4d3a7709ce06b88ad5dcc | 957 | py | Python | blog/templatetags/blog_tags.py | Volodichev/Django | cebc9629987bc02067a1aa8d6e4ff901a24d1f98 | [
"MIT"
] | null | null | null | blog/templatetags/blog_tags.py | Volodichev/Django | cebc9629987bc02067a1aa8d6e4ff901a24d1f98 | [
"MIT"
] | null | null | null | blog/templatetags/blog_tags.py | Volodichev/Django | cebc9629987bc02067a1aa8d6e4ff901a24d1f98 | [
"MIT"
] | null | null | null | from django import template
from blog.models import Category
register = template.Library()
def get_categories(context, order, count):
"""Получаю список категорий"""
# categories = Category.objects.filter(published=True, parent__isnull=True).order_by(order)
categories = Category.objects.filter(published=True).order_by(order)
if count is not None:
categories = categories[:count]
return categories
@register.inclusion_tag('base/tags/base_tag.html', takes_context=True)
def category_list(context, order='-name', count=None, template='base/blog/categories.html'):
"""template tag вывода категорий"""
categories = get_categories(context, order, count)
return {'template': template, "category_list": categories}
@register.simple_tag(takes_context=True)
def for_category_list(context, count=None, order='-name'):
"""template tag вывода категорий без шаблона"""
return get_categories(context, order, count)
| 34.178571 | 95 | 0.746082 | from django import template
from blog.models import Category
register = template.Library()
def get_categories(context, order, count):
"""Получаю список категорий"""
# categories = Category.objects.filter(published=True, parent__isnull=True).order_by(order)
categories = Category.objects.filter(published=True).order_by(order)
if count is not None:
categories = categories[:count]
return categories
@register.inclusion_tag('base/tags/base_tag.html', takes_context=True)
def category_list(context, order='-name', count=None, template='base/blog/categories.html'):
"""template tag вывода категорий"""
categories = get_categories(context, order, count)
return {'template': template, "category_list": categories}
@register.simple_tag(takes_context=True)
def for_category_list(context, count=None, order='-name'):
"""template tag вывода категорий без шаблона"""
return get_categories(context, order, count)
| 0 | 0 | 0 |
8e7e0be8d21bf4d81ac8c4f4330098fdfbc73d8a | 774 | py | Python | 2017/day4/puzzle2.py | tcmitchell/AdventOfCode | caaac1aa37c999d4804f9f4154bf7033a06e98af | [
"MIT"
] | null | null | null | 2017/day4/puzzle2.py | tcmitchell/AdventOfCode | caaac1aa37c999d4804f9f4154bf7033a06e98af | [
"MIT"
] | null | null | null | 2017/day4/puzzle2.py | tcmitchell/AdventOfCode | caaac1aa37c999d4804f9f4154bf7033a06e98af | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# http://adventofcode.com/2017/day/4
import sys
if __name__ == '__main__':
main(sys.argv)
| 23.454545 | 66 | 0.590439 | #!/usr/bin/env python3
# http://adventofcode.com/2017/day/4
import sys
def load_passphrases(datafile):
with open(datafile, 'rb') as f:
return [line.decode('utf-8').strip('\n') for line in f]
def main(argv):
datafile = argv[1]
passphrases = load_passphrases(datafile)
valid = 0
invalid = 0
for phrase in passphrases:
words = phrase.split(' ')
exploded = [''.join(sorted(list(word))) for word in words]
word_set = set(exploded)
if len(words) == len(word_set):
valid += 1
else:
invalid += 1
print('Loaded %d passphrases' % (len(passphrases)))
print('Found %d valid' % (valid))
print('Found %d invalid' % (invalid))
if __name__ == '__main__':
main(sys.argv)
| 605 | 0 | 46 |
f679f0ada9270379bfe42846ba1ec73d464a78cf | 128 | py | Python | lifelist/api/admin.py | andela-mnzomo/life-list | 28a7fa9d16e2b322e4a1bce269dbe7331e783534 | [
"Unlicense"
] | 3 | 2017-08-17T07:12:03.000Z | 2017-10-18T11:13:44.000Z | lifelist/api/admin.py | andela-mnzomo/life-list | 28a7fa9d16e2b322e4a1bce269dbe7331e783534 | [
"Unlicense"
] | 1 | 2018-05-30T14:38:52.000Z | 2018-05-30T14:38:52.000Z | lifelist/api/admin.py | andela-mnzomo/life-list | 28a7fa9d16e2b322e4a1bce269dbe7331e783534 | [
"Unlicense"
] | null | null | null | from django.contrib import admin
from models import Bucketlist, Item
admin.site.register(Bucketlist)
admin.site.register(Item)
| 21.333333 | 35 | 0.828125 | from django.contrib import admin
from models import Bucketlist, Item
admin.site.register(Bucketlist)
admin.site.register(Item)
| 0 | 0 | 0 |
59e10b8a316f3d2f6e6fe62963da3255be088523 | 1,690 | py | Python | setup.py | GrammaTech/gtirb-capstone | f46d90e9cd733c632620e5d8c921a4b9f011020a | [
"MIT"
] | 6 | 2020-04-10T15:19:30.000Z | 2021-04-13T22:54:17.000Z | setup.py | GrammaTech/gtirb-capstone | f46d90e9cd733c632620e5d8c921a4b9f011020a | [
"MIT"
] | null | null | null | setup.py | GrammaTech/gtirb-capstone | f46d90e9cd733c632620e5d8c921a4b9f011020a | [
"MIT"
] | 3 | 2020-07-10T22:52:32.000Z | 2021-02-13T19:52:22.000Z | #
# Copyright (C) 2020 GrammaTech, Inc.
#
# This code is licensed under the MIT license. See the LICENSE file in
# the project root for license terms.
#
# This project is sponsored by the Office of Naval Research, One Liberty
# Center, 875 N. Randolph Street, Arlington, VA 22203 under contract #
# N68335-17-C-0700. The content of the information does not necessarily
# reflect the position or policy of the Government and no official
# endorsement should be inferred.
#
import imp
import setuptools
__version__ = imp.load_source(
"pkginfo.version", "gtirb_capstone/version.py"
).__version__
if __name__ == "__main__":
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="gtirb-capstone",
version=__version__,
author="Grammatech",
author_email="gtirb@grammatech.com",
description="Utilities for rewriting GTIRB with capstone and keystone",
packages=setuptools.find_packages(),
install_requires=[
"capstone-gt",
"dataclasses ; python_version<'3.7.0'",
"gtirb",
"keystone-engine",
],
classifiers=["Programming Language :: Python :: 3"],
extras_require={
"test": [
"flake8",
"isort",
"pytest",
"pytest-cov",
"tox",
"tox-wheel",
"pre-commit",
"mcasm",
]
},
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/grammatech/gtirb-functions",
license="MIT",
)
| 30.178571 | 79 | 0.597041 | #
# Copyright (C) 2020 GrammaTech, Inc.
#
# This code is licensed under the MIT license. See the LICENSE file in
# the project root for license terms.
#
# This project is sponsored by the Office of Naval Research, One Liberty
# Center, 875 N. Randolph Street, Arlington, VA 22203 under contract #
# N68335-17-C-0700. The content of the information does not necessarily
# reflect the position or policy of the Government and no official
# endorsement should be inferred.
#
import imp
import setuptools
__version__ = imp.load_source(
"pkginfo.version", "gtirb_capstone/version.py"
).__version__
if __name__ == "__main__":
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="gtirb-capstone",
version=__version__,
author="Grammatech",
author_email="gtirb@grammatech.com",
description="Utilities for rewriting GTIRB with capstone and keystone",
packages=setuptools.find_packages(),
install_requires=[
"capstone-gt",
"dataclasses ; python_version<'3.7.0'",
"gtirb",
"keystone-engine",
],
classifiers=["Programming Language :: Python :: 3"],
extras_require={
"test": [
"flake8",
"isort",
"pytest",
"pytest-cov",
"tox",
"tox-wheel",
"pre-commit",
"mcasm",
]
},
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/grammatech/gtirb-functions",
license="MIT",
)
| 0 | 0 | 0 |
2f88913b6f9fe5c8a3c60c7802a9be91b9c38253 | 360 | py | Python | drf_registration/api/__init__.py | rti/drf-registration | 0d631730e1730a7778398f4c1e811ca0df57e260 | [
"MIT"
] | 1 | 2020-12-07T04:44:51.000Z | 2020-12-07T04:44:51.000Z | drf_registration/api/__init__.py | cunguyendev/drf-registration | 2a9e5ffbffa23bdc787c8363bdd0ffd170cf6bb6 | [
"MIT"
] | null | null | null | drf_registration/api/__init__.py | cunguyendev/drf-registration | 2a9e5ffbffa23bdc787c8363bdd0ffd170cf6bb6 | [
"MIT"
] | null | null | null | from .login import LoginView, SocialLoginView
from .logout import LogoutView
from .register import RegisterView, VerifyView, ActivateView
from .profile import ProfileView
from .change_password import ChangePasswordView
from .reset_password import ResetPasswordView, ResetPasswordConfirmView, ResetPasswordCompleteView
from .set_password import SetPasswordView
| 45 | 98 | 0.875 | from .login import LoginView, SocialLoginView
from .logout import LogoutView
from .register import RegisterView, VerifyView, ActivateView
from .profile import ProfileView
from .change_password import ChangePasswordView
from .reset_password import ResetPasswordView, ResetPasswordConfirmView, ResetPasswordCompleteView
from .set_password import SetPasswordView
| 0 | 0 | 0 |
4d1b9062f6dd748ffda8b31a1aceb57d2db1dec1 | 155 | py | Python | blogs/admin.py | AnkushCh/Finalproject | 880d29390043a506c8c4f570b8005b9f4660454b | [
"MIT"
] | 1 | 2020-12-01T09:59:21.000Z | 2020-12-01T09:59:21.000Z | blogs/admin.py | AnkushCh/Finalproject | 880d29390043a506c8c4f570b8005b9f4660454b | [
"MIT"
] | null | null | null | blogs/admin.py | AnkushCh/Finalproject | 880d29390043a506c8c4f570b8005b9f4660454b | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Post, Comments
# Register your models here.
admin.site.register(Post)
admin.site.register(Comments)
| 19.375 | 34 | 0.8 | from django.contrib import admin
from .models import Post, Comments
# Register your models here.
admin.site.register(Post)
admin.site.register(Comments)
| 0 | 0 | 0 |
96bd8ee23bf397dd5607427f8c9e71c375b4e70e | 7,439 | py | Python | unitx/UnitXListener.py | 0ED/UnitX | f3c8c564cb7822cebb24ebc000ca454f3222fbf2 | [
"MIT"
] | 2 | 2016-03-13T08:35:58.000Z | 2016-03-13T19:20:07.000Z | unitx/UnitXListener.py | 0ED/UnitX | f3c8c564cb7822cebb24ebc000ca454f3222fbf2 | [
"MIT"
] | 1 | 2016-11-04T23:34:21.000Z | 2016-11-04T23:34:21.000Z | unitx/UnitXListener.py | supertask/UnitX | f3c8c564cb7822cebb24ebc000ca454f3222fbf2 | [
"MIT"
] | null | null | null | # Generated from UnitX.g4 by ANTLR 4.5.1
from antlr4 import *
# This class defines a complete listener for a parse tree produced by UnitXParser.
# Enter a parse tree produced by UnitXParser#program.
# Exit a parse tree produced by UnitXParser#program.
# Enter a parse tree produced by UnitXParser#typeDeclaration.
# Exit a parse tree produced by UnitXParser#typeDeclaration.
# Enter a parse tree produced by UnitXParser#functionDeclaration.
# Exit a parse tree produced by UnitXParser#functionDeclaration.
# Enter a parse tree produced by UnitXParser#formalParameters.
# Exit a parse tree produced by UnitXParser#formalParameters.
# Enter a parse tree produced by UnitXParser#formalParameterList.
# Exit a parse tree produced by UnitXParser#formalParameterList.
# Enter a parse tree produced by UnitXParser#formalParameter.
# Exit a parse tree produced by UnitXParser#formalParameter.
# Enter a parse tree produced by UnitXParser#block.
# Exit a parse tree produced by UnitXParser#block.
# Enter a parse tree produced by UnitXParser#blockStatement.
# Exit a parse tree produced by UnitXParser#blockStatement.
# Enter a parse tree produced by UnitXParser#statement.
# Exit a parse tree produced by UnitXParser#statement.
# Enter a parse tree produced by UnitXParser#repStatement.
# Exit a parse tree produced by UnitXParser#repStatement.
# Enter a parse tree produced by UnitXParser#ifStatement.
# Exit a parse tree produced by UnitXParser#ifStatement.
# Enter a parse tree produced by UnitXParser#expressionStatement.
# Exit a parse tree produced by UnitXParser#expressionStatement.
# Enter a parse tree produced by UnitXParser#printStatement.
# Exit a parse tree produced by UnitXParser#printStatement.
# Enter a parse tree produced by UnitXParser#assertStatement.
# Exit a parse tree produced by UnitXParser#assertStatement.
# Enter a parse tree produced by UnitXParser#dumpStatement.
# Exit a parse tree produced by UnitXParser#dumpStatement.
# Enter a parse tree produced by UnitXParser#borderStatement.
# Exit a parse tree produced by UnitXParser#borderStatement.
# Enter a parse tree produced by UnitXParser#expressionList.
# Exit a parse tree produced by UnitXParser#expressionList.
# Enter a parse tree produced by UnitXParser#parExpression.
# Exit a parse tree produced by UnitXParser#parExpression.
# Enter a parse tree produced by UnitXParser#repControl.
# Exit a parse tree produced by UnitXParser#repControl.
# Enter a parse tree produced by UnitXParser#endRep.
# Exit a parse tree produced by UnitXParser#endRep.
# Enter a parse tree produced by UnitXParser#expression.
# Exit a parse tree produced by UnitXParser#expression.
# Enter a parse tree produced by UnitXParser#unit.
# Exit a parse tree produced by UnitXParser#unit.
# Enter a parse tree produced by UnitXParser#unitSingleOrPairOperator.
# Exit a parse tree produced by UnitXParser#unitSingleOrPairOperator.
# Enter a parse tree produced by UnitXParser#unitOperator.
# Exit a parse tree produced by UnitXParser#unitOperator.
# Enter a parse tree produced by UnitXParser#primary.
# Exit a parse tree produced by UnitXParser#primary.
# Enter a parse tree produced by UnitXParser#literal.
# Exit a parse tree produced by UnitXParser#literal.
# Enter a parse tree produced by UnitXParser#string.
# Exit a parse tree produced by UnitXParser#string.
# Enter a parse tree produced by UnitXParser#halfString.
# Exit a parse tree produced by UnitXParser#halfString.
# Enter a parse tree produced by UnitXParser#number.
# Exit a parse tree produced by UnitXParser#number.
# Enter a parse tree produced by UnitXParser#integer.
# Exit a parse tree produced by UnitXParser#integer.
# Enter a parse tree produced by UnitXParser#boolean.
# Exit a parse tree produced by UnitXParser#boolean.
# Enter a parse tree produced by UnitXParser#none.
# Exit a parse tree produced by UnitXParser#none.
| 25.216949 | 82 | 0.680737 | # Generated from UnitX.g4 by ANTLR 4.5.1
from antlr4 import *
# This class defines a complete listener for a parse tree produced by UnitXParser.
class UnitXListener(ParseTreeListener):
# Enter a parse tree produced by UnitXParser#program.
def enterProgram(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#program.
def exitProgram(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#typeDeclaration.
def enterTypeDeclaration(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#typeDeclaration.
def exitTypeDeclaration(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#functionDeclaration.
def enterFunctionDeclaration(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#functionDeclaration.
def exitFunctionDeclaration(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#formalParameters.
def enterFormalParameters(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#formalParameters.
def exitFormalParameters(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#formalParameterList.
def enterFormalParameterList(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#formalParameterList.
def exitFormalParameterList(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#formalParameter.
def enterFormalParameter(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#formalParameter.
def exitFormalParameter(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#block.
def enterBlock(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#block.
def exitBlock(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#blockStatement.
def enterBlockStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#blockStatement.
def exitBlockStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#statement.
def enterStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#statement.
def exitStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#repStatement.
def enterRepStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#repStatement.
def exitRepStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#ifStatement.
def enterIfStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#ifStatement.
def exitIfStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#expressionStatement.
def enterExpressionStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#expressionStatement.
def exitExpressionStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#printStatement.
def enterPrintStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#printStatement.
def exitPrintStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#assertStatement.
def enterAssertStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#assertStatement.
def exitAssertStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#dumpStatement.
def enterDumpStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#dumpStatement.
def exitDumpStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#borderStatement.
def enterBorderStatement(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#borderStatement.
def exitBorderStatement(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#expressionList.
def enterExpressionList(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#expressionList.
def exitExpressionList(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#parExpression.
def enterParExpression(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#parExpression.
def exitParExpression(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#repControl.
def enterRepControl(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#repControl.
def exitRepControl(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#endRep.
def enterEndRep(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#endRep.
def exitEndRep(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#expression.
def enterExpression(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#expression.
def exitExpression(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#unit.
def enterUnit(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#unit.
def exitUnit(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#unitSingleOrPairOperator.
def enterUnitSingleOrPairOperator(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#unitSingleOrPairOperator.
def exitUnitSingleOrPairOperator(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#unitOperator.
def enterUnitOperator(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#unitOperator.
def exitUnitOperator(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#primary.
def enterPrimary(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#primary.
def exitPrimary(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#literal.
def enterLiteral(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#literal.
def exitLiteral(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#string.
def enterString(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#string.
def exitString(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#halfString.
def enterHalfString(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#halfString.
def exitHalfString(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#number.
def enterNumber(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#number.
def exitNumber(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#integer.
def enterInteger(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#integer.
def exitInteger(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#boolean.
def enterBoolean(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#boolean.
def exitBoolean(self, ctx):
pass
# Enter a parse tree produced by UnitXParser#none.
def enterNone(self, ctx):
pass
# Exit a parse tree produced by UnitXParser#none.
def exitNone(self, ctx):
pass
| 1,530 | 18 | 1,686 |
b77cf51d7f72fbf4600506697564ae15a5c5100a | 13,850 | py | Python | wagon_tracking/tracking.py | TheCamilovisk/pytorch-ssd | 83f7adafd5a1d44e53fea20b34f80b367b0a7ca4 | [
"MIT"
] | null | null | null | wagon_tracking/tracking.py | TheCamilovisk/pytorch-ssd | 83f7adafd5a1d44e53fea20b34f80b367b0a7ca4 | [
"MIT"
] | null | null | null | wagon_tracking/tracking.py | TheCamilovisk/pytorch-ssd | 83f7adafd5a1d44e53fea20b34f80b367b0a7ca4 | [
"MIT"
] | 1 | 2019-10-07T17:00:52.000Z | 2019-10-07T17:00:52.000Z | from copy import deepcopy
import cv2 as cv
import numpy as np
from sortedcontainers import SortedDict
import vision.utils.box_utils_numpy as box_utils
from wagon_tracking.transforms import ImageDownscaleTransform
| 33.293269 | 87 | 0.600361 | from copy import deepcopy
import cv2 as cv
import numpy as np
from sortedcontainers import SortedDict
import vision.utils.box_utils_numpy as box_utils
from wagon_tracking.transforms import ImageDownscaleTransform
class OpticalMovementEstimator:
def __init__(self, update_interval=5, frame_downscale_factor=None):
self.update_interval = update_interval
self.frame_count = 0
self.downscale_t = None
if frame_downscale_factor:
if (
not isinstance(frame_downscale_factor, int)
or frame_downscale_factor < 0
):
raise TypeError('Downscale factor must be an positive integer.')
self.downscale_t = ImageDownscaleTransform(frame_downscale_factor)
self.feature_params = dict(
maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7
)
self.lk_params = dict(
winSize=(15, 15),
maxLevel=2,
criteria=(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 0.03),
)
self.old_gray = None
self.corners = None
def __call__(self, frame):
if self.downscale_t:
frame = self.downscale_t(frame)
if self.corners is None:
self.old_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
self._update_features()
return np.array([0, 0])
frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
u_corners, status, error = cv.calcOpticalFlowPyrLK(
self.old_gray, frame_gray, self.corners, None, **self.lk_params
)
good_new = u_corners[status == 1]
good_old = self.corners[status == 1]
global_mov = self._compute_global_movement(good_new, good_old)
self.old_gray = frame_gray
self.corners = good_new.reshape(-1, 1, 2)
self.frame_count += 1
if self.frame_count % self.update_interval == 0:
self._update_features()
return global_mov
def _update_features(self):
self.corners = cv.goodFeaturesToTrack(
self.old_gray, mask=None, **self.feature_params
)
def _compute_global_movement(self, good_new, good_old):
global_mov = np.array([0, 0], dtype=np.float)
n_mov_vectors = 0
for new, old in zip(good_new, good_old):
mov = new - old
mov_thresh = (
0.5 / self.downscale_t.factor if self.downscale_t is not None else 0.5
)
if np.linalg.norm(mov) < mov_thresh:
continue
global_mov += mov
n_mov_vectors += 1
global_mov = global_mov / n_mov_vectors if n_mov_vectors else global_mov
if self.downscale_t:
global_mov *= self.downscale_t.factor ** 2
return global_mov
class BoxesMovementEstimator:
def __init__(self):
self.movement = np.array([0, 0])
def __call__(self, last_positions, new_positions):
last_centers = (last_positions[:, :2] + last_positions[:, 2:]) / 2
new_centers = (new_positions[:, :2] + new_positions[:, 2:]) / 2
move_vectors = new_centers - last_centers
return move_vectors.mean(axis=0)
class Tracker:
def __init__(self, detector):
self.detector = detector
self.elements_info = SortedDict()
self.next_element_id = 0
def __call__(self, image):
boxes, labels, _ = self.detector(image)
boxes, labels = boxes.numpy(), labels.numpy()
if len(boxes) != 0:
boxes, labels = self._sort_detections(boxes, labels)
self._update_tracking(boxes, labels)
self.elements_info = SortedDict(self.elements_info)
return deepcopy(self.elements_info)
def _sort_detections(self, boxes, labels):
sorted_idxs = np.argsort(boxes[:, 0], axis=0)
return boxes[sorted_idxs, :], labels[sorted_idxs]
def _update_tracking(self, boxes, labels):
raise NotImplementedError
def _get_new_elements_info(self, remaining_elements_info, updated_elements_info):
(boxes, labels) = remaining_elements_info
if len(boxes) == 0:
return {}
new_elements_info = {
id + self.next_element_id: (box, lbl)
for id, (box, lbl) in enumerate(zip(boxes, labels))
}
self.next_element_id += len(new_elements_info)
return new_elements_info
class WagonTracker(Tracker):
def __init__(
self,
detector,
detection_threshold,
video_fps=30,
target_fps=30,
restrictions=[],
):
super().__init__(detector)
self.optical_movement = np.array([0.0, 0.0])
self.boxes_movement = np.array([0.0, 0.0])
self.detection_threshold = detection_threshold
self.restrictions = restrictions
self.optical_motion_estimator = OpticalMovementEstimator(update_interval=3)
self.boxes_motion_estimator = BoxesMovementEstimator()
self.video_fps = video_fps
self.target_fps = target_fps
self.fps_ratio = self.target_fps / self.video_fps
def __call__(self, image):
self._estimate_optical_motion(image)
return super().__call__(image)
def _estimate_optical_motion(self, image):
self.optical_movement = self.optical_motion_estimator(image).astype(np.float)
self.optical_movement *= self.fps_ratio
self.optical_movement[1] = 0.0
self.optical_movement[0] = np.clip(self.optical_movement[0], -45.0, 45.0)
def _estimate_boxes_motion(self, updated_elements_info):
last_positions = []
new_positions = []
for key in updated_elements_info.keys():
last_positions.append(self.elements_info[key][0])
new_positions.append(updated_elements_info[key][0])
if len(last_positions) == 0 or len(new_positions) == 0:
if (self.optical_movement != 0).any() and (self.boxes_movement == 0).all():
self.boxes_movement = self.optical_movement
return
last_positions = np.asarray(last_positions)
new_positions = np.asarray(new_positions)
boxes_movement = self.boxes_motion_estimator(last_positions, new_positions)
boxes_movement[1] = 0.0
boxes_movement[0] = np.clip(boxes_movement[0], -45.0, 45.0)
if np.linalg.norm(boxes_movement) < 3:
boxes_movement = np.zeros_like(boxes_movement)
self.boxes_movement = (boxes_movement + self.boxes_movement) / 2
def _update_tracking(self, boxes, labels):
if len(self.elements_info) == 0 and len(boxes) > 0:
self._init_elements_info(boxes, labels)
return
updated_elements_info, remaining_elements_info = self._update_elements(
boxes, labels
)
self._estimate_boxes_motion(updated_elements_info)
notfound_elements_info = self._update_notfound_elements(updated_elements_info)
updated_elements_info.update(notfound_elements_info)
for restriction in self.restrictions:
remaining_elements_info = restriction(
*remaining_elements_info, updated_elements_info
)
new_elements_info = self._get_new_elements_info(
remaining_elements_info, updated_elements_info
)
if len(new_elements_info) > 0:
updated_elements_info.update(new_elements_info)
updated_elements_info = self._check_elements_tracking_info(
updated_elements_info
)
self.elements_info = updated_elements_info
def _init_elements_info(self, boxes, labels):
self.elements_info = {
id: (box, lbl) for id, (box, lbl) in enumerate(zip(boxes, labels))
}
self.next_element_id = len(self.elements_info)
def _update_elements(self, boxes, labels):
updated_elements_info = {}
for t_id, (t_box, t_lbl) in self.elements_info.items():
if len(boxes) == 0:
break
search_mask = labels == t_lbl
if search_mask.sum() == 0:
continue
search_boxes = boxes[search_mask, :]
search_labels = labels[search_mask]
search_idxs = np.arange(len(boxes))[search_mask]
t_box = t_box.copy()
t_box[2:] += self.optical_movement
t_box[:2] += self.optical_movement
ious = box_utils.iou_of(t_box, search_boxes)
n_box_idx = np.argmax(ious)
if ious[n_box_idx] > 0.1:
updated_elements_info[t_id] = (
search_boxes[n_box_idx],
search_labels[n_box_idx],
)
boxes = np.delete(boxes, (search_idxs[n_box_idx]), axis=0)
labels = np.delete(labels, (search_idxs[n_box_idx]), axis=0)
return updated_elements_info, (boxes, labels)
def _update_notfound_elements(self, updated_elements_info: list):
u_ids = updated_elements_info.keys()
notfound_elements_info = {}
for t_id, (t_box, t_lbl) in self.elements_info.items():
if t_id not in u_ids:
updated_box = np.copy(t_box)
updated_box[2:] = t_box[2:] + self.boxes_movement
updated_box[:2] = t_box[:2] + self.boxes_movement
notfound_elements_info[t_id] = (updated_box, t_lbl)
return notfound_elements_info
def _get_nvisible_elements(self, updated_elements_info):
visible_left, visible_right = 0, 0
for id, (box, lbl) in updated_elements_info.items():
if lbl != 1:
continue
if ((box[:2] + box[2:]) / 2)[0] < self.detection_threshold:
visible_left += 1
else:
visible_right += 1
return visible_left, visible_right
def _check_elements_tracking_info(self, elements_info):
elements_info = SortedDict(elements_info)
n_elements = len(elements_info)
keys = elements_info.keys()
for idx in range(n_elements):
if idx == n_elements - 1:
break
cur_key = keys[idx]
next_key = keys[idx + 1]
if elements_info[next_key][0][0] <= elements_info[cur_key][0][0]:
tmp = elements_info[cur_key]
elements_info[cur_key] = elements_info[next_key]
elements_info[next_key] = tmp
return elements_info
class WagonsInfo:
def __init__(
self, roi, intrawagon_range, interwagon_range, wagon_threshold=None, label=None
):
roi = np.array(roi)
xmin, ymin = roi[0::2].min(), roi[1::2].min()
xmax, ymax = roi[0::2].max(), roi[1::2].max()
self.roi = np.array([xmin, ymin, xmax, ymax])
self.intrawagon_range = tuple(np.sort(intrawagon_range))
self.interwagon_range = tuple(np.sort(interwagon_range))
self.label = label
self.wagon_thresh = wagon_threshold
if self.wagon_thresh is None:
self.wagon_thresh = (xmin + xmax) / 2
def __call__(self, tracking_info):
if not isinstance(tracking_info, SortedDict):
raise TypeError
if not tracking_info:
return {}
boxes = self._get_elements_boxes(tracking_info)
centers = (boxes[:, :2] + boxes[:, 2:]) / 2
heigths = boxes[:, 3] - boxes[:, 1]
last_box = None
last_center = None
last_heigth = None
wagons = {}
next_id = 0
for box, center, heigth in zip(boxes, centers, heigths):
if last_center is None:
if center[0] > self.wagon_thresh:
start_point = np.array((self.roi[0], box[1]))
wagon_box = self._clip(np.hstack((start_point, box[2:])))
else:
end_point = np.array((self.roi[2], box[3]))
wagon_box = self._clip(np.hstack((box[:2], end_point)))
wagons[next_id] = wagon_box
last_box = box
last_center = center
last_heigth = heigth
continue
mean_heigth = (heigth + last_heigth) / 2
length = np.linalg.norm(center - last_center) / mean_heigth
length_class = self._classify_length(length, mean_heigth)
if length_class is None:
continue
if length_class == 0:
wagon_box = self._clip(np.hstack((last_box[:2], box[2:])))
wagons[next_id] = wagon_box
next_id += 1
else:
end_point = np.array((self.roi[2], box[3]))
wagon_box = self._clip(np.hstack((box[:2], end_point)))
wagons[next_id] = wagon_box
last_box = box
last_center = center
last_heigth = heigth
return wagons
def _get_elements_boxes(self, tracking_info):
if self.label is not None:
boxes = (
box for box, lbl in tuple(tracking_info.values()) if lbl == self.label
)
boxes = np.array(tuple(boxes))
else:
boxes = (box for box, _ in tuple(tracking_info.values()))
boxes = np.array(tuple(boxes))
return boxes
def _classify_length(self, length, mean_heigth):
if self.intrawagon_range[0] <= length <= self.intrawagon_range[1]:
return 0
elif self.interwagon_range[0] <= length <= self.interwagon_range[1]:
return 1
else:
return None
def _clip(self, box):
box = box.copy()
box[0::2] = np.clip(box[0::2], self.roi[0], self.roi[2])
box[1::2] = np.clip(box[1::2], self.roi[1], self.roi[3])
return box
| 12,803 | 14 | 812 |
8b466cd90199311e8b4ba4a9c518eb5896fa30b4 | 2,539 | py | Python | projeto-02/graph.py | henrique-tavares/IFB-Analise-de-Algoritmos | 36db7672fea45ce8ab9dce5bbe41aec30be18465 | [
"MIT"
] | null | null | null | projeto-02/graph.py | henrique-tavares/IFB-Analise-de-Algoritmos | 36db7672fea45ce8ab9dce5bbe41aec30be18465 | [
"MIT"
] | null | null | null | projeto-02/graph.py | henrique-tavares/IFB-Analise-de-Algoritmos | 36db7672fea45ce8ab9dce5bbe41aec30be18465 | [
"MIT"
] | 1 | 2021-07-15T23:50:37.000Z | 2021-07-15T23:50:37.000Z | from __future__ import annotations
from collections import defaultdict
from math import ceil
from typing import Dict, NamedTuple
from random import randint, sample
if __name__ == "__main__":
# g = Graph.random_generator(10, 0.2)
# print(g)
# print(len(g["0"].values()))
for i in (0.25, 0.5, 1):
g = Graph.random_generator(10, i)
print(g, end="\n\n")
| 29.183908 | 93 | 0.590784 | from __future__ import annotations
from collections import defaultdict
from math import ceil
from typing import Dict, NamedTuple
from random import randint, sample
class Edge(NamedTuple):
vertice: str
distance: float
bidrectional: bool = False
class Graph:
def __init__(self) -> None:
self.elements: Dict[str, Dict[str, float]] = defaultdict(dict)
def __getitem__(self, key: str) -> Dict[str, float]:
return self.elements[key]
def __setitem__(self, key: str, value: Edge) -> None:
self.elements[key][value.vertice] = value.distance
def __contains__(self, key: str) -> bool:
return key in self.elements
def __iter__(self):
for x in self.elements:
yield x
def __str__(self) -> str:
return str(dict(self.elements))
def add(self, vertice: str, *edges: Edge) -> None:
for edge in edges:
self[vertice] = edge
if edge.vertice not in self:
self[edge.vertice]
if edge.bidrectional:
self[edge.vertice] = Edge(vertice, edge.distance)
def remove(self, vertice: str) -> None:
self.elements.pop(vertice, dict())
for key in self.elements:
self.elements[key].pop(vertice, dict())
@staticmethod
def random_generator(vertices: int, density: float) -> Graph:
g = Graph()
max_num_edges = vertices * (vertices - 1) / 2
num_edges = ceil(max_num_edges * density)
vertice_degree = ceil((vertices - 1) * num_edges / max_num_edges)
added_vertices = set()
range_vertice = range(vertices)
for src in range_vertice:
actual_vertice_degree = vertice_degree - len(g[str(src)].values())
added_vertices.add(src)
reduced_sample = set(range_vertice) - added_vertices
dests = sample(
reduced_sample,
actual_vertice_degree
if actual_vertice_degree > 0 and actual_vertice_degree <= len(reduced_sample)
else 0,
)
for dest in dests:
g.add(str(src), Edge(str(dest), randint(1, 999), True))
if len(g[str(dest)].values()) == vertice_degree:
added_vertices.add(dest)
return g
if __name__ == "__main__":
# g = Graph.random_generator(10, 0.2)
# print(g)
# print(len(g["0"].values()))
for i in (0.25, 0.5, 1):
g = Graph.random_generator(10, i)
print(g, end="\n\n")
| 1,784 | 321 | 46 |
369bbf3c6fcc3f3fa0a093ab85e4f12dd895b953 | 6,306 | py | Python | estar/src/moeadd/moeadd_ref/moeadd_supplementary.py | tatikhonova/FEDOT.Algs | aeb539f52bfbdb0ba8f4975e9ea7cb5a60859e25 | [
"BSD-3-Clause"
] | null | null | null | estar/src/moeadd/moeadd_ref/moeadd_supplementary.py | tatikhonova/FEDOT.Algs | aeb539f52bfbdb0ba8f4975e9ea7cb5a60859e25 | [
"BSD-3-Clause"
] | null | null | null | estar/src/moeadd/moeadd_ref/moeadd_supplementary.py | tatikhonova/FEDOT.Algs | aeb539f52bfbdb0ba8f4975e9ea7cb5a60859e25 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 14:04:57 2020
@author: mike_ubuntu
"""
from copy import deepcopy
import numpy as np
| 42.897959 | 149 | 0.629718 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Nov 30 14:04:57 2020
@author: mike_ubuntu
"""
from copy import deepcopy
import numpy as np
def check_dominance(target, compared_with) -> bool:
flag = False
for obj_fun_idx in range(len(target.obj_fun)):
if target.obj_fun[obj_fun_idx] <= compared_with.obj_fun[obj_fun_idx]:
if target.obj_fun[obj_fun_idx] < compared_with.obj_fun[obj_fun_idx]:
flag = True
else:
return False
return flag
# return (all([target.obj_fun[obj_fun_idx] <= compared_with.obj_fun[obj_fun_idx] for obj_fun_idx in np.arange(target.obj_fun.size)]) and
# any([target.obj_fun[obj_fun_idx] < compared_with.obj_fun[obj_fun_idx] for obj_fun_idx in np.arange(target.obj_fun.size)]))
def NDL_update(new_solution, levels) -> list: # efficient_NDL_update
moving_set = {new_solution}
new_levels = levels
for level_idx in np.arange(len(levels)):
moving_set_new = set()
for ms_idx, moving_set_elem in enumerate(moving_set):
if np.any([check_dominance(solution, moving_set_elem) for solution in new_levels[level_idx]]):
moving_set_new.add(moving_set_elem)
elif (not np.any([check_dominance(solution, moving_set_elem) for solution in new_levels[level_idx]]) and
not np.any([check_dominance(moving_set_elem, solution) for solution in new_levels[level_idx]])):
new_levels[level_idx].append(moving_set_elem)#; completed_levels = True
elif np.all([check_dominance(moving_set_elem, solution) for solution in levels[level_idx]]):
temp_levels = new_levels[level_idx:]
new_levels[level_idx:] = []
new_levels.append([moving_set_elem,]); new_levels.extend(temp_levels)#; completed_levels = True
else:
dominated_level_elems = [level_elem for level_elem in new_levels[level_idx] if check_dominance(moving_set_elem, level_elem)]
non_dominated_level_elems = [level_elem for level_elem in new_levels[level_idx] if not check_dominance(moving_set_elem, level_elem)]
non_dominated_level_elems.append(moving_set_elem)
new_levels[level_idx] = non_dominated_level_elems
for element in dominated_level_elems:
moving_set_new.add(element)
moving_set = moving_set_new
if not len(moving_set):
break
if len(moving_set):
new_levels.append(list(moving_set))
if len(new_levels[len(new_levels)-1]) == 0:
_ = new_levels.pop()
return new_levels
def fast_non_dominated_sorting(population) -> list:
levels = []; ranks = np.empty(len(population))
domination_count = np.zeros(len(population)) # Число элементов, доминирующих над i-ым кандидиатом
dominated_solutions = [[] for elem_idx in np.arange(len(population))] # Индексы элементов, над которыми доминирует i-ый кандидиат
current_level_idxs = []
for main_elem_idx in np.arange(len(population)):
for compared_elem_idx in np.arange(len(population)):
if main_elem_idx == compared_elem_idx:
continue
if check_dominance(population[compared_elem_idx], population[main_elem_idx]):
domination_count[main_elem_idx] += 1
elif check_dominance(population[main_elem_idx], population[compared_elem_idx]):
dominated_solutions[main_elem_idx].append(compared_elem_idx)
if domination_count[main_elem_idx] == 0:
current_level_idxs.append(main_elem_idx); ranks[main_elem_idx] = 0
levels.append([population[elem_idx] for elem_idx in current_level_idxs])
level_idx = 0
while len(current_level_idxs) > 0:
new_level_idxs = []
for main_elem_idx in current_level_idxs:
for dominated_elem_idx in dominated_solutions[main_elem_idx]:
domination_count[dominated_elem_idx] -= 1
if domination_count[dominated_elem_idx] == 0:
ranks[dominated_elem_idx] = level_idx + 1
new_level_idxs.append(dominated_elem_idx)
if len(new_level_idxs): levels.append([population[elem_idx] for elem_idx in new_level_idxs])
level_idx += 1
current_level_idxs = new_level_idxs
return levels
def slow_non_dominated_sorting(population) -> list:
locked_idxs = []
levels = []; levels_elems = 0
while len(population) > levels_elems:
processed_idxs = []
for main_elem_idx in np.arange(len(population)):
if not main_elem_idx in locked_idxs:
dominated = False
for compared_elem_idx in np.arange(len(population)):
if main_elem_idx == compared_elem_idx or compared_elem_idx in locked_idxs:
continue
if check_dominance(population[compared_elem_idx], population[main_elem_idx]):
dominated = True
if not dominated:
processed_idxs.append(main_elem_idx)
locked_idxs.extend(processed_idxs); levels_elems += len(processed_idxs)
levels.append([population[elem_idx] for elem_idx in processed_idxs])
return levels
def acute_angle(vector_a, vector_b) -> float:
return np.arccos(np.dot(vector_a, vector_b)/(np.sqrt(np.dot(vector_a, vector_a))*np.sqrt(np.dot(vector_b, vector_b))))
class Constraint(object):
def __init__(self, *args):
pass
def __call__(self, *args):
pass
class Inequality(Constraint):
def __init__(self, g):
'''
Inequality assumed in format g(x) >= 0
'''
self._g = g
def __call__(self, x) -> float:
return - self._g(x) if self._g(x) < 0 else 0
class Equality(Constraint):
def __init__(self, h):
'''
Equality assumed in format h(x) = 0
'''
self._h = h
def __call__(self, x) -> float:
return np.abs(self._h(x)) | 5,524 | 388 | 277 |
43cf2550196fb601b5410a14ffba60cec95eaddd | 692 | py | Python | scripts/controller.py | kato-masahiro/raspimouse_maze_manual | e5b6317bbb889de416b52dc1a61790c9e235c084 | [
"BSD-3-Clause"
] | null | null | null | scripts/controller.py | kato-masahiro/raspimouse_maze_manual | e5b6317bbb889de416b52dc1a61790c9e235c084 | [
"BSD-3-Clause"
] | null | null | null | scripts/controller.py | kato-masahiro/raspimouse_maze_manual | e5b6317bbb889de416b52dc1a61790c9e235c084 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import rospy
import math
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
if __name__ == '__main__':
rospy.init_node('joy_twist')
joy_twist = JoyTwist()
rospy.spin()
| 30.086957 | 87 | 0.643064 | #!/usr/bin/env python
import rospy
import math
from sensor_msgs.msg import Joy
from geometry_msgs.msg import Twist
class JoyTwist(object):
def __init__(self):
self._joy_sub = rospy.Subscriber('joy', Joy, self.joy_callback, queue_size = 1)
self._twist_pub = rospy.Publisher('/cmd_vel', Twist, queue_size = 1)
def joy_callback(self, joy_msg):
if joy_msg.buttons[0] == 1:
twist = Twist()
twist.linear.x = joy_msg.axes[1] * 0.1
twist.angular.z = joy_msg.axes[0] * math.pi * 0.5
self._twist_pub.publish(twist)
if __name__ == '__main__':
rospy.init_node('joy_twist')
joy_twist = JoyTwist()
rospy.spin()
| 394 | 2 | 76 |
974da76cfbed60fb31e44aeefba7b8b79a4e2ee8 | 719 | py | Python | py/day07.py | kwinkunks/aoc21 | 4fccd605334ad55b6ddc34bdd1921b1d42fc8a42 | [
"Apache-2.0"
] | null | null | null | py/day07.py | kwinkunks/aoc21 | 4fccd605334ad55b6ddc34bdd1921b1d42fc8a42 | [
"Apache-2.0"
] | null | null | null | py/day07.py | kwinkunks/aoc21 | 4fccd605334ad55b6ddc34bdd1921b1d42fc8a42 | [
"Apache-2.0"
] | null | null | null | import numpy as np
if __name__ == "__main__":
# Part 1.
assert part1(get_data(7, 'test')) == 37, "Part 1 failed."
print(f"Part 1: {part1(get_data(7, 'data')):.0f}")
# Part 2.
assert part2(get_data(7, 'test')) == 168, "Part 2 failed."
print(f"Part 2: {part2(get_data(7, 'data')):.0f}")
| 27.653846 | 62 | 0.588317 | import numpy as np
def get_data(day, dataset):
with open(f'../js/day{day:02d}/{dataset}.txt', 'r') as f:
return np.array(list(map(int, f.read().split(','))))
def part1(data):
return np.abs(data - np.median(data)).sum()
def part2(data):
points = np.arange(2000).reshape(-1, 1)
dists = np.abs(np.subtract.outer(points, data))
cost = np.sum(dists * (dists + 1) / 2, axis=-1)
return cost.min()
if __name__ == "__main__":
# Part 1.
assert part1(get_data(7, 'test')) == 37, "Part 1 failed."
print(f"Part 1: {part1(get_data(7, 'data')):.0f}")
# Part 2.
assert part2(get_data(7, 'test')) == 168, "Part 2 failed."
print(f"Part 2: {part2(get_data(7, 'data')):.0f}")
| 337 | 0 | 69 |
3ff95edb9cad1101f186963ca13d6372c9ea44db | 1,193 | py | Python | demo_wait.py | rdagger/micropython-ads1220 | c90f939517c8163b234210b8cf91b3ce948b5b1c | [
"MIT"
] | 2 | 2021-08-25T11:40:23.000Z | 2022-02-28T05:31:18.000Z | demo_wait.py | rdagger/micropython-ads1220 | c90f939517c8163b234210b8cf91b3ce948b5b1c | [
"MIT"
] | null | null | null | demo_wait.py | rdagger/micropython-ads1220 | c90f939517c8163b234210b8cf91b3ce948b5b1c | [
"MIT"
] | 1 | 2021-08-08T11:39:47.000Z | 2021-08-08T11:39:47.000Z | """ADS1220 example (polling ADC).
Uses single shot mode and wait for data ready."""
from time import sleep
from machine import Pin, SPI # type: ignore
from ads1220 import ADC
cs = 15 # Chip select pin
drdy = 27 # Data ready pin
spi = SPI(1,
baudrate=10000000, # 10 MHz (try lower speed to troubleshoot)
sck=Pin(14),
mosi=Pin(13),
miso=Pin(12),
phase=1) # ADS1220 uses SPI mode 1
adc = ADC(spi, cs, drdy)
def test():
"""Test code."""
vref = 2.048 # Internal voltage reference
res = 8388607 # ADC resolution 23 bit (2^23, assumes 1 bit polarity)
adc.conversion_single_shot() # Set single shot conversion mode
adc.select_channel(0) # Select channel 0 (0 to 3 ADC channels)
sleep(.1) # Ensure ADC ready
try:
while True:
adc.start_conversion() # Conversion must be started each shot
reading = adc.read_wait()
v = reading * vref / res
print("raw: {0}, volts: {1}".format(reading, v))
sleep(3)
except KeyboardInterrupt:
print("\nCtrl-C pressed to exit.")
finally:
adc.power_down()
spi.deinit()
test()
| 28.404762 | 74 | 0.600168 | """ADS1220 example (polling ADC).
Uses single shot mode and wait for data ready."""
from time import sleep
from machine import Pin, SPI # type: ignore
from ads1220 import ADC
cs = 15 # Chip select pin
drdy = 27 # Data ready pin
spi = SPI(1,
baudrate=10000000, # 10 MHz (try lower speed to troubleshoot)
sck=Pin(14),
mosi=Pin(13),
miso=Pin(12),
phase=1) # ADS1220 uses SPI mode 1
adc = ADC(spi, cs, drdy)
def test():
"""Test code."""
vref = 2.048 # Internal voltage reference
res = 8388607 # ADC resolution 23 bit (2^23, assumes 1 bit polarity)
adc.conversion_single_shot() # Set single shot conversion mode
adc.select_channel(0) # Select channel 0 (0 to 3 ADC channels)
sleep(.1) # Ensure ADC ready
try:
while True:
adc.start_conversion() # Conversion must be started each shot
reading = adc.read_wait()
v = reading * vref / res
print("raw: {0}, volts: {1}".format(reading, v))
sleep(3)
except KeyboardInterrupt:
print("\nCtrl-C pressed to exit.")
finally:
adc.power_down()
spi.deinit()
test()
| 0 | 0 | 0 |
4cf5ed14c9a66510d6acd4017cb1faca8edf5750 | 2,265 | py | Python | tfx_addons/feast_examplegen/component_test.py | BACtaki/tfx-addons | 130465c2cdaae45728535ea09e4bf38f4ca9eb38 | [
"Apache-2.0"
] | 1 | 2021-07-10T00:25:06.000Z | 2021-07-10T00:25:06.000Z | tfx_addons/feast_examplegen/component_test.py | BACtaki/tfx-addons | 130465c2cdaae45728535ea09e4bf38f4ca9eb38 | [
"Apache-2.0"
] | 4 | 2021-11-13T03:10:19.000Z | 2022-02-18T19:00:47.000Z | tfx_addons/feast_examplegen/component_test.py | BACtaki/tfx-addons | 130465c2cdaae45728535ea09e4bf38f4ca9eb38 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Tests for tfx_addons.feast_examplegen.component.
"""
import pytest
try:
import feast
except ImportError:
pytest.skip("feast not available, skipping", allow_module_level=True)
from tfx.v1.proto import Input
from tfx_addons.feast_examplegen.component import FeastExampleGen
| 37.75 | 80 | 0.647241 | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Tests for tfx_addons.feast_examplegen.component.
"""
import pytest
try:
import feast
except ImportError:
pytest.skip("feast not available, skipping", allow_module_level=True)
from tfx.v1.proto import Input
from tfx_addons.feast_examplegen.component import FeastExampleGen
def test_init_valid():
entity_query = 'SELECT user FROM fake_db'
repo_config = feast.RepoConfig(provider='local', project='default')
FeastExampleGen(repo_config=repo_config,
features=['feature1', 'feature2'],
entity_query='SELECT user FROM fake_db')
FeastExampleGen(repo_config=repo_config,
features='feature_service1',
entity_query='SELECT user FROM fake_db')
FeastExampleGen(repo_config=repo_config,
features=['feature1', 'feature2'],
input_config=Input(splits=[
Input.Split(name='train', pattern=entity_query),
Input.Split(name='eval', pattern=entity_query),
]))
def test_input_and_entity():
entity_query = 'SELECT user FROM fake_db'
repo_config = feast.RepoConfig(provider='local', project='default')
with pytest.raises(RuntimeError):
FeastExampleGen(repo_config=repo_config,
features=['feature1', 'feature2'],
entity_query=entity_query,
input_config=Input(splits=[
Input.Split(name='train', pattern=entity_query),
Input.Split(name='eval', pattern=entity_query),
]))
| 1,246 | 0 | 46 |
e19d19ef0a9751a966e5539a1d4f246e5698feea | 10,309 | py | Python | tools/apollo4b_scripts/am_defines.py | vaxradius/Apollo4B-SDK-2021.02.08 | 507e328b16a179f5d5b18685d5be4a5c6753f852 | [
"BSD-3-Clause"
] | 2 | 2021-11-04T03:48:20.000Z | 2021-12-27T01:34:31.000Z | tools/apollo4b_scripts/am_defines.py | vaxradius/Apollo4B-SDK-2021.02.08 | 507e328b16a179f5d5b18685d5be4a5c6753f852 | [
"BSD-3-Clause"
] | null | null | null | tools/apollo4b_scripts/am_defines.py | vaxradius/Apollo4B-SDK-2021.02.08 | 507e328b16a179f5d5b18685d5be4a5c6753f852 | [
"BSD-3-Clause"
] | 2 | 2021-11-04T03:47:21.000Z | 2021-12-27T01:30:28.000Z | #!/usr/bin/env python3
# Utility functioins
import sys
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Signature import pss
from Crypto.Hash import SHA256
import array
import hashlib
import hmac
import os
import binascii
MAX_DOWNLOAD_SIZE = 0x48000 # 288K
AM_SECBOOT_DEFAULT_NONSECURE_MAIN = 0x18000
# Encryption Algorithm
# String constants
# Authentication Algorithm
FLASH_INVALID = 0xFFFFFFFF
# KeyWrap Mode
#******************************************************************************
#
# Magic Numbers
#
#******************************************************************************
AM_IMAGE_MAGIC_SBL = 0xA3
AM_IMAGE_MAGIC_ICV_CHAIN = 0xAC
AM_IMAGE_MAGIC_SECURE = 0xC0
AM_IMAGE_MAGIC_OEM_CHAIN = 0xCC
AM_IMAGE_MAGIC_NONSECURE = 0xCB
AM_IMAGE_MAGIC_INFO0 = 0xCF
AM_IMAGE_MAGIC_CONTAINER = 0xC1
AM_IMAGE_MAGIC_KEYREVOKE = 0xCE
AM_IMAGE_MAGIC_DOWNLOAD = 0xCD
#******************************************************************************
#
# Wired Message Types
#
#******************************************************************************
AM_SECBOOT_WIRED_MSGTYPE_HELLO = 0
AM_SECBOOT_WIRED_MSGTYPE_STATUS = 1
AM_SECBOOT_WIRED_MSGTYPE_OTADESC = 2
AM_SECBOOT_WIRED_MSGTYPE_UPDATE = 3
AM_SECBOOT_WIRED_MSGTYPE_ABORT = 4
AM_SECBOOT_WIRED_MSGTYPE_RECOVER = 5
AM_SECBOOT_WIRED_MSGTYPE_RESET = 6
AM_SECBOOT_WIRED_MSGTYPE_ACK = 7
AM_SECBOOT_WIRED_MSGTYPE_DATA = 8
#******************************************************************************
#
# Wired Message ACK Status
#
#******************************************************************************
AM_SECBOOT_WIRED_ACK_STATUS_SUCCESS = 0
AM_SECBOOT_WIRED_ACK_STATUS_FAILURE = 1
AM_SECBOOT_WIRED_ACK_STATUS_INVALID_INFO0 = 2
AM_SECBOOT_WIRED_ACK_STATUS_CRC = 3
AM_SECBOOT_WIRED_ACK_STATUS_SEC = 4
AM_SECBOOT_WIRED_ACK_STATUS_MSG_TOO_BIG = 5
AM_SECBOOT_WIRED_ACK_STATUS_UNKNOWN_MSGTYPE = 6
AM_SECBOOT_WIRED_ACK_STATUS_INVALID_ADDR = 7
AM_SECBOOT_WIRED_ACK_STATUS_INVALID_OPERATION = 8
AM_SECBOOT_WIRED_ACK_STATUS_INVALID_PARAM = 9
AM_SECBOOT_WIRED_ACK_STATUS_SEQ = 10
AM_SECBOOT_WIRED_ACK_STATUS_TOO_MUCH_DATA = 11
#******************************************************************************
#
# Definitions related to Image Headers
#
#******************************************************************************
AM_MAX_UART_MSG_SIZE = 8192 # 8K buffer in SBL
# Max Wired Update Image header size - this includes optional sign & encryption info
AM_WU_IMAGEHDR_SIZE = (16 + 384 + 48 + 16)
#******************************************************************************
#
# INFOSPACE related definitions
#
#******************************************************************************
AM_SECBOOT_INFO0_SIGN_PROGRAMMED0 = 0x48EAAD88
AM_SECBOOT_INFO0_SIGN_PROGRAMMED1 = 0xC9705737
AM_SECBOOT_INFO0_SIGN_PROGRAMMED2 = 0x0A6B8458
AM_SECBOOT_INFO0_SIGN_PROGRAMMED3 = 0xE41A9D74
AM_SECBOOT_INFO0_SIGN_UINIT0 = 0x5B75A5FA
AM_SECBOOT_INFO0_SIGN_UINIT1 = 0x7B9C8674
AM_SECBOOT_INFO0_SIGN_UINIT2 = 0x869A96FE
AM_SECBOOT_INFO0_SIGN_UINIT3 = 0xAEC90860
INFO0_SIZE_BYTES = (2 * 1024)
INFO1_SIZE_BYTES = (6 * 1024)
#******************************************************************************
#
# CRC using ethernet poly, as used by Corvette hardware for validation
#
#******************************************************************************
#******************************************************************************
#
# Pad the text to the block_size. bZeroPad determines how to handle text which
# is already multiple of block_size
#
#******************************************************************************
#******************************************************************************
#
# AES CBC encryption
#
#******************************************************************************
#******************************************************************************
#
# AES 128 CBC encryption
#
#******************************************************************************
#******************************************************************************
#
# SHA256 HMAC
#
#******************************************************************************
#******************************************************************************
#
# RSA PKCS1_v1_5 sign
#
#******************************************************************************
#******************************************************************************
#
# RSA PKCS1_v1_5 sign verification
#
#******************************************************************************
#******************************************************************************
#
# RSA PSS signing function.
#
#******************************************************************************
#******************************************************************************
#
# RSA PSS signature verification.
#
#******************************************************************************
#******************************************************************************
#
# Fill one word in bytearray
#
#******************************************************************************
#******************************************************************************
#
# Turn a 32-bit number into a series of bytes for transmission.
#
# This command will split a 32-bit integer into an array of bytes, ordered
# LSB-first for transmission over the UART.
#
#******************************************************************************
#******************************************************************************
#
# Extract a word from a byte array
#
#******************************************************************************
#******************************************************************************
#
# automatically figure out the integer format (base 10 or 16)
#
#******************************************************************************
#******************************************************************************
#
# User controllable Prints control
#
#******************************************************************************
# Defined print levels
AM_PRINT_LEVEL_MIN = 0
AM_PRINT_LEVEL_NONE = AM_PRINT_LEVEL_MIN
AM_PRINT_LEVEL_ERROR = 1
AM_PRINT_LEVEL_INFO = 2
AM_PRINT_LEVEL_VERBOSE = 4
AM_PRINT_LEVEL_DEBUG = 5
AM_PRINT_LEVEL_MAX = AM_PRINT_LEVEL_DEBUG
# Global variable to control the prints
AM_PRINT_VERBOSITY = AM_PRINT_LEVEL_INFO
helpPrintLevel = 'Set Log Level (0: None), (1: Error), (2: INFO), (4: Verbose), (5: Debug) [Default = Info]'
| 34.135762 | 112 | 0.484237 | #!/usr/bin/env python3
# Utility functioins
import sys
from Crypto.Cipher import AES
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
from Crypto.Signature import pss
from Crypto.Hash import SHA256
import array
import hashlib
import hmac
import os
import binascii
MAX_DOWNLOAD_SIZE = 0x48000 # 288K
AM_SECBOOT_DEFAULT_NONSECURE_MAIN = 0x18000
# Encryption Algorithm
# String constants
# Authentication Algorithm
FLASH_INVALID = 0xFFFFFFFF
# KeyWrap Mode
#******************************************************************************
#
# Magic Numbers
#
#******************************************************************************
AM_IMAGE_MAGIC_SBL = 0xA3
AM_IMAGE_MAGIC_ICV_CHAIN = 0xAC
AM_IMAGE_MAGIC_SECURE = 0xC0
AM_IMAGE_MAGIC_OEM_CHAIN = 0xCC
AM_IMAGE_MAGIC_NONSECURE = 0xCB
AM_IMAGE_MAGIC_INFO0 = 0xCF
AM_IMAGE_MAGIC_CONTAINER = 0xC1
AM_IMAGE_MAGIC_KEYREVOKE = 0xCE
AM_IMAGE_MAGIC_DOWNLOAD = 0xCD
#******************************************************************************
#
# Wired Message Types
#
#******************************************************************************
AM_SECBOOT_WIRED_MSGTYPE_HELLO = 0
AM_SECBOOT_WIRED_MSGTYPE_STATUS = 1
AM_SECBOOT_WIRED_MSGTYPE_OTADESC = 2
AM_SECBOOT_WIRED_MSGTYPE_UPDATE = 3
AM_SECBOOT_WIRED_MSGTYPE_ABORT = 4
AM_SECBOOT_WIRED_MSGTYPE_RECOVER = 5
AM_SECBOOT_WIRED_MSGTYPE_RESET = 6
AM_SECBOOT_WIRED_MSGTYPE_ACK = 7
AM_SECBOOT_WIRED_MSGTYPE_DATA = 8
#******************************************************************************
#
# Wired Message ACK Status
#
#******************************************************************************
AM_SECBOOT_WIRED_ACK_STATUS_SUCCESS = 0
AM_SECBOOT_WIRED_ACK_STATUS_FAILURE = 1
AM_SECBOOT_WIRED_ACK_STATUS_INVALID_INFO0 = 2
AM_SECBOOT_WIRED_ACK_STATUS_CRC = 3
AM_SECBOOT_WIRED_ACK_STATUS_SEC = 4
AM_SECBOOT_WIRED_ACK_STATUS_MSG_TOO_BIG = 5
AM_SECBOOT_WIRED_ACK_STATUS_UNKNOWN_MSGTYPE = 6
AM_SECBOOT_WIRED_ACK_STATUS_INVALID_ADDR = 7
AM_SECBOOT_WIRED_ACK_STATUS_INVALID_OPERATION = 8
AM_SECBOOT_WIRED_ACK_STATUS_INVALID_PARAM = 9
AM_SECBOOT_WIRED_ACK_STATUS_SEQ = 10
AM_SECBOOT_WIRED_ACK_STATUS_TOO_MUCH_DATA = 11
#******************************************************************************
#
# Definitions related to Image Headers
#
#******************************************************************************
AM_MAX_UART_MSG_SIZE = 8192 # 8K buffer in SBL
# Max Wired Update Image header size - this includes optional sign & encryption info
AM_WU_IMAGEHDR_SIZE = (16 + 384 + 48 + 16)
#******************************************************************************
#
# INFOSPACE related definitions
#
#******************************************************************************
AM_SECBOOT_INFO0_SIGN_PROGRAMMED0 = 0x48EAAD88
AM_SECBOOT_INFO0_SIGN_PROGRAMMED1 = 0xC9705737
AM_SECBOOT_INFO0_SIGN_PROGRAMMED2 = 0x0A6B8458
AM_SECBOOT_INFO0_SIGN_PROGRAMMED3 = 0xE41A9D74
AM_SECBOOT_INFO0_SIGN_UINIT0 = 0x5B75A5FA
AM_SECBOOT_INFO0_SIGN_UINIT1 = 0x7B9C8674
AM_SECBOOT_INFO0_SIGN_UINIT2 = 0x869A96FE
AM_SECBOOT_INFO0_SIGN_UINIT3 = 0xAEC90860
INFO0_SIZE_BYTES = (2 * 1024)
INFO1_SIZE_BYTES = (6 * 1024)
#******************************************************************************
#
# CRC using ethernet poly, as used by Corvette hardware for validation
#
#******************************************************************************
def crc32(L):
return (binascii.crc32(L) & 0xFFFFFFFF)
#******************************************************************************
#
# Pad the text to the block_size. bZeroPad determines how to handle text which
# is already multiple of block_size
#
#******************************************************************************
def pad_to_block_size(text, block_size, bZeroPad):
text_length = len(text)
amount_to_pad = block_size - (text_length % block_size)
if (amount_to_pad == block_size):
if (bZeroPad == 0):
amount_to_pad = 0
for i in range(0, amount_to_pad, 1):
text += bytes(chr(amount_to_pad), 'ascii')
return text
#******************************************************************************
#
# AES CBC encryption
#
#******************************************************************************
def encrypt_app_aes(cleartext, encKey, iv):
key = array.array('B', encKey).tostring()
ivVal = array.array('B', iv).tostring()
plaintext = array.array('B', cleartext).tostring()
encryption_suite = AES.new(key, AES.MODE_CBC, ivVal)
cipher_text = encryption_suite.encrypt(plaintext)
return cipher_text
#******************************************************************************
#
# AES 128 CBC encryption
#
#******************************************************************************
def encrypt_app_aes128(cleartext, encKey, iv):
key = array.array('B', encKey).tostring()
ivVal = array.array('B', iv).tostring()
plaintext = array.array('B', cleartext).tostring()
encryption_suite = AES.new(key, AES.MODE_CBC, ivVal)
cipher_text = encryption_suite.encrypt(plaintext)
return cipher_text
#******************************************************************************
#
# SHA256 HMAC
#
#******************************************************************************
def compute_hmac(key, data):
sig = hmac.new(array.array('B', key).tostring(), array.array('B', data).tostring(), hashlib.sha256).digest()
return sig
#******************************************************************************
#
# RSA PKCS1_v1_5 sign
#
#******************************************************************************
def compute_rsa_sign(prvKeyFile, data):
key = open(prvKeyFile, "r").read()
rsakey = RSA.importKey(key)
signer = PKCS1_v1_5.new(rsakey)
digest = SHA256.new()
digest.update(bytes(data))
sign = signer.sign(digest)
return sign
#******************************************************************************
#
# RSA PKCS1_v1_5 sign verification
#
#******************************************************************************
def verify_rsa_sign(pubKeyFile, data, sign):
key = open(pubKeyFile, "r").read()
rsakey = RSA.importKey(key)
#print(hex(rsakey.n))
verifier = PKCS1_v1_5.new(rsakey)
digest = SHA256.new()
digest.update(bytes(data))
return verifier.verify(digest, sign)
#******************************************************************************
#
# RSA PSS signing function.
#
#******************************************************************************
def compute_rsa_pss_sign(prvKeyFile, data):
# Import the key, hash the data, create an RSA pss signature from the hash.
with open(prvKeyFile, 'rb') as private_key_file:
key = RSA.import_key(private_key_file.read())
h = SHA256.new(data)
signature = pss.new(key).sign(h)
return signature, h
#******************************************************************************
#
# RSA PSS signature verification.
#
#******************************************************************************
def verify_rsa_pss_sign(pubKeyFile, data, sign):
# Read the public key, hash the message, and use our key to make sure the
# signature matches the hash.
with open(pubKeyFile, 'rb') as public_key_file:
key = RSA.import_key(public_key_file.read())
h = SHA256.new(data)
verifier = pss.new(key)
try:
verifier.verify(h, signature)
return True
except (ValueError, TypeError):
return False
#******************************************************************************
#
# Fill one word in bytearray
#
#******************************************************************************
def fill_word(barray, offset, w):
barray[offset + 0] = (w >> 0) & 0x000000ff;
barray[offset + 1] = (w >> 8) & 0x000000ff;
barray[offset + 2] = (w >> 16) & 0x000000ff;
barray[offset + 3] = (w >> 24) & 0x000000ff;
#******************************************************************************
#
# Turn a 32-bit number into a series of bytes for transmission.
#
# This command will split a 32-bit integer into an array of bytes, ordered
# LSB-first for transmission over the UART.
#
#******************************************************************************
def int_to_bytes(n):
A = [n & 0xFF,
(n >> 8) & 0xFF,
(n >> 16) & 0xFF,
(n >> 24) & 0xFF]
return A
#******************************************************************************
#
# Extract a word from a byte array
#
#******************************************************************************
def word_from_bytes(B, n):
return (B[n] + (B[n + 1] << 8) + (B[n + 2] << 16) + (B[n + 3] << 24))
#******************************************************************************
#
# automatically figure out the integer format (base 10 or 16)
#
#******************************************************************************
def auto_int(x):
return int(x, 0)
#******************************************************************************
#
# User controllable Prints control
#
#******************************************************************************
# Defined print levels
AM_PRINT_LEVEL_MIN = 0
AM_PRINT_LEVEL_NONE = AM_PRINT_LEVEL_MIN
AM_PRINT_LEVEL_ERROR = 1
AM_PRINT_LEVEL_INFO = 2
AM_PRINT_LEVEL_VERBOSE = 4
AM_PRINT_LEVEL_DEBUG = 5
AM_PRINT_LEVEL_MAX = AM_PRINT_LEVEL_DEBUG
# Global variable to control the prints
AM_PRINT_VERBOSITY = AM_PRINT_LEVEL_INFO
helpPrintLevel = 'Set Log Level (0: None), (1: Error), (2: INFO), (4: Verbose), (5: Debug) [Default = Info]'
def am_set_print_level(level):
global AM_PRINT_VERBOSITY
AM_PRINT_VERBOSITY = level
def am_print(*args, level=AM_PRINT_LEVEL_INFO, **kwargs):
global AM_PRINT_VERBOSITY
if (AM_PRINT_VERBOSITY >= level):
print(*args, **kwargs)
| 2,967 | 0 | 332 |
f72fef007e9ec6112672dfd0e87b7ec609049c6a | 2,115 | py | Python | scrape_artists/artists.py | flannerykj/python_scrape | c5166431810432c24e04150eb305b3ec2a899a91 | [
"MIT"
] | null | null | null | scrape_artists/artists.py | flannerykj/python_scrape | c5166431810432c24e04150eb305b3ec2a899a91 | [
"MIT"
] | null | null | null | scrape_artists/artists.py | flannerykj/python_scrape | c5166431810432c24e04150eb305b3ec2a899a91 | [
"MIT"
] | null | null | null |
import csv
import requests
import socket
from bs4 import BeautifulSoup
import re
import json
with open('artists.json', 'w') as outfile:
json.dump(parse_artists(), outfile)
'''artist_urls = get_artist_urls()
artist_array = compile_artist_profiles(artist_urls)
outfile = open("./toronto-artists.csv", "wb")
writer = csv.writer(outfile)
writer.writerows(recipe_array)'''
| 33.571429 | 144 | 0.605674 |
import csv
import requests
import socket
from bs4 import BeautifulSoup
import re
import json
def parse_artists():
artist_profiles = []
try:
url = 'http://wx.toronto.ca/inter/pmmd/streetart.nsf/artists?OpenView'
response = requests.get(url)
html = response.content
soup = BeautifulSoup(html)
link_list = soup.findAll('a', attrs={'class': 'viewa1'})
for item in link_list:
item_url = 'http://wx.toronto.ca'+item.get('href')
profile = get_profile_data(item_url)
artist_profiles.append(profile)
except Exception as e:
print (e.message)
return artist_profiles
def get_profile_data(url):
try:
response = requests.get(url)
html = response.content
soup = BeautifulSoup(html)
profile = soup.find('div', attrs={'id': 'profiledisplay'}).text
name = soup.findAll('legend')[0].text
email = re.search(r'[\w\.-]+@[\w\.-]+', profile).group().replace('Business', '')
website = re.search(r'Website: (.*?)[\n\r\s]+', profile).group().replace('Website: ', '')
bio = re.search(r'Profile\n(.*?)\n', profile).group().replace('Profile', '')
description = re.search(r'Business/Organization Description\n(.*?)\n', profile).group().replace('Business/Organization Description', '')
experience = re.search(r'Experience\n(.*?)\n', profile).group().replace('Experience', '')
return {
"name": name,
"email": email,
"website": website,
"bio": bio,
"description": description,
"experience": experience,
"dateJoined": "1508884475917",
"dateUpdated": "1508884475917"
}
return profile
except Exception as e:
print (e.message)
return
with open('artists.json', 'w') as outfile:
json.dump(parse_artists(), outfile)
'''artist_urls = get_artist_urls()
artist_array = compile_artist_profiles(artist_urls)
outfile = open("./toronto-artists.csv", "wb")
writer = csv.writer(outfile)
writer.writerows(recipe_array)'''
| 1,692 | 0 | 46 |
6b4633f252cbbf76f44dce400dc91c131a603c52 | 22,669 | py | Python | pgcli/packages/sqlcompletion.py | akshay-joshi/pgcli | 51c4cf495cab0722f0f474dceb502746e8e7c5ed | [
"BSD-3-Clause"
] | null | null | null | pgcli/packages/sqlcompletion.py | akshay-joshi/pgcli | 51c4cf495cab0722f0f474dceb502746e8e7c5ed | [
"BSD-3-Clause"
] | null | null | null | pgcli/packages/sqlcompletion.py | akshay-joshi/pgcli | 51c4cf495cab0722f0f474dceb502746e8e7c5ed | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import sys
import re
import sqlparse
from collections import namedtuple
from sqlparse.sql import Comparison, Identifier, Where
from .parseutils.utils import (
last_word, find_prev_keyword, parse_partial_identifier)
from .parseutils.tables import extract_tables
from .parseutils.ctes import isolate_query_ctes
from pgspecial.main import parse_special_command
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str
else:
string_types = basestring
Special = namedtuple('Special', [])
Database = namedtuple('Database', [])
Schema = namedtuple('Schema', ['quoted'])
Schema.__new__.__defaults__ = (False,)
# FromClauseItem is a table/view/function used in the FROM clause
# `table_refs` contains the list of tables/... already in the statement,
# used to ensure that the alias we suggest is unique
FromClauseItem = namedtuple('FromClauseItem', 'schema table_refs local_tables')
Table = namedtuple('Table', ['schema', 'table_refs', 'local_tables'])
TableFormat = namedtuple('TableFormat', [])
View = namedtuple('View', ['schema', 'table_refs'])
# JoinConditions are suggested after ON, e.g. 'foo.barid = bar.barid'
JoinCondition = namedtuple('JoinCondition', ['table_refs', 'parent'])
# Joins are suggested after JOIN, e.g. 'foo ON foo.barid = bar.barid'
Join = namedtuple('Join', ['table_refs', 'schema'])
Function = namedtuple('Function', ['schema', 'table_refs', 'usage'])
# For convenience, don't require the `usage` argument in Function constructor
Function.__new__.__defaults__ = (None, tuple(), None)
Table.__new__.__defaults__ = (None, tuple(), tuple())
View.__new__.__defaults__ = (None, tuple())
FromClauseItem.__new__.__defaults__ = (None, tuple(), tuple())
Column = namedtuple(
'Column',
['table_refs', 'require_last_table', 'local_tables', 'qualifiable',
'context']
)
Column.__new__.__defaults__ = (None, None, tuple(), False, None)
Keyword = namedtuple('Keyword', ['last_token'])
Keyword.__new__.__defaults__ = (None,)
NamedQuery = namedtuple('NamedQuery', [])
Datatype = namedtuple('Datatype', ['schema'])
Alias = namedtuple('Alias', ['aliases'])
Path = namedtuple('Path', [])
def suggest_type(full_text, text_before_cursor):
"""Takes the full_text that is typed so far and also the text before the
cursor to suggest completion type and scope.
Returns a tuple with a type of entity ('table', 'column' etc) and a scope.
A scope for a column category will be a list of tables.
"""
if full_text.startswith('\\i '):
return (Path(),)
# This is a temporary hack; the exception handling
# here should be removed once sqlparse has been fixed
try:
stmt = SqlStatement(full_text, text_before_cursor)
except (TypeError, AttributeError):
return []
# Check for special commands and handle those separately
if stmt.parsed:
# Be careful here because trivial whitespace is parsed as a
# statement, but the statement won't have a first token
tok1 = stmt.parsed.token_first()
if tok1 and tok1.value == '\\':
text = stmt.text_before_cursor + stmt.word_before_cursor
return suggest_special(text)
return suggest_based_on_last_token(stmt.last_token, stmt)
named_query_regex = re.compile(r'^\s*\\ns\s+[A-z0-9\-_]+\s+')
def _strip_named_query(txt):
"""
This will strip "save named query" command in the beginning of the line:
'\ns zzz SELECT * FROM abc' -> 'SELECT * FROM abc'
' \ns zzz SELECT * FROM abc' -> 'SELECT * FROM abc'
"""
if named_query_regex.match(txt):
txt = named_query_regex.sub('', txt)
return txt
function_body_pattern = re.compile(r'(\$.*?\$)([\s\S]*?)\1', re.M)
SPECIALS_SUGGESTION = {
'dT': Datatype,
'df': Function,
'dt': Table,
'dv': View,
'sf': Function,
}
def identifies(id, ref):
"""Returns true if string `id` matches TableReference `ref`"""
return id == ref.alias or id == ref.name or (
ref.schema and (id == ref.schema + '.' + ref.name))
def _allow_join_condition(statement):
"""
Tests if a join condition should be suggested
We need this to avoid bad suggestions when entering e.g.
select * from tbl1 a join tbl2 b on a.id = <cursor>
So check that the preceding token is a ON, AND, or OR keyword, instead of
e.g. an equals sign.
:param statement: an sqlparse.sql.Statement
:return: boolean
"""
if not statement or not statement.tokens:
return False
last_tok = statement.token_prev(len(statement.tokens))[1]
return last_tok.value.lower() in ('on', 'and', 'or')
def _allow_join(statement):
"""
Tests if a join should be suggested
We need this to avoid bad suggestions when entering e.g.
select * from tbl1 a join tbl2 b <cursor>
So check that the preceding token is a JOIN keyword
:param statement: an sqlparse.sql.Statement
:return: boolean
"""
if not statement or not statement.tokens:
return False
last_tok = statement.token_prev(len(statement.tokens))[1]
return (last_tok.value.lower().endswith('join')
and last_tok.value.lower() not in('cross join', 'natural join'))
| 38.357022 | 79 | 0.619613 | from __future__ import print_function
import sys
import re
import sqlparse
from collections import namedtuple
from sqlparse.sql import Comparison, Identifier, Where
from .parseutils.utils import (
last_word, find_prev_keyword, parse_partial_identifier)
from .parseutils.tables import extract_tables
from .parseutils.ctes import isolate_query_ctes
from pgspecial.main import parse_special_command
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
string_types = str
else:
string_types = basestring
Special = namedtuple('Special', [])
Database = namedtuple('Database', [])
Schema = namedtuple('Schema', ['quoted'])
Schema.__new__.__defaults__ = (False,)
# FromClauseItem is a table/view/function used in the FROM clause
# `table_refs` contains the list of tables/... already in the statement,
# used to ensure that the alias we suggest is unique
FromClauseItem = namedtuple('FromClauseItem', 'schema table_refs local_tables')
Table = namedtuple('Table', ['schema', 'table_refs', 'local_tables'])
TableFormat = namedtuple('TableFormat', [])
View = namedtuple('View', ['schema', 'table_refs'])
# JoinConditions are suggested after ON, e.g. 'foo.barid = bar.barid'
JoinCondition = namedtuple('JoinCondition', ['table_refs', 'parent'])
# Joins are suggested after JOIN, e.g. 'foo ON foo.barid = bar.barid'
Join = namedtuple('Join', ['table_refs', 'schema'])
Function = namedtuple('Function', ['schema', 'table_refs', 'usage'])
# For convenience, don't require the `usage` argument in Function constructor
Function.__new__.__defaults__ = (None, tuple(), None)
Table.__new__.__defaults__ = (None, tuple(), tuple())
View.__new__.__defaults__ = (None, tuple())
FromClauseItem.__new__.__defaults__ = (None, tuple(), tuple())
Column = namedtuple(
'Column',
['table_refs', 'require_last_table', 'local_tables', 'qualifiable',
'context']
)
Column.__new__.__defaults__ = (None, None, tuple(), False, None)
Keyword = namedtuple('Keyword', ['last_token'])
Keyword.__new__.__defaults__ = (None,)
NamedQuery = namedtuple('NamedQuery', [])
Datatype = namedtuple('Datatype', ['schema'])
Alias = namedtuple('Alias', ['aliases'])
Path = namedtuple('Path', [])
class SqlStatement(object):
def __init__(self, full_text, text_before_cursor):
self.identifier = None
self.word_before_cursor = word_before_cursor = last_word(
text_before_cursor, include='many_punctuations')
full_text = _strip_named_query(full_text)
text_before_cursor = _strip_named_query(text_before_cursor)
full_text, text_before_cursor, self.local_tables = \
isolate_query_ctes(full_text, text_before_cursor)
self.text_before_cursor_including_last_word = text_before_cursor
# If we've partially typed a word then word_before_cursor won't be an
# empty string. In that case we want to remove the partially typed
# string before sending it to the sqlparser. Otherwise the last token
# will always be the partially typed string which renders the smart
# completion useless because it will always return the list of
# keywords as completion.
if self.word_before_cursor:
if word_before_cursor[-1] == '(' or word_before_cursor[0] == '\\':
parsed = sqlparse.parse(text_before_cursor)
else:
text_before_cursor = \
text_before_cursor[:-len(word_before_cursor)]
parsed = sqlparse.parse(text_before_cursor)
self.identifier = parse_partial_identifier(word_before_cursor)
else:
parsed = sqlparse.parse(text_before_cursor)
full_text, text_before_cursor, parsed = \
_split_multiple_statements(full_text, text_before_cursor, parsed)
self.full_text = full_text
self.text_before_cursor = text_before_cursor
self.parsed = parsed
self.last_token = \
parsed and parsed.token_prev(len(parsed.tokens))[1] or ''
def is_insert(self):
return self.parsed.token_first().value.lower() == 'insert'
def get_tables(self, scope='full'):
""" Gets the tables available in the statement.
param `scope:` possible values: 'full', 'insert', 'before'
If 'insert', only the first table is returned.
If 'before', only tables before the cursor are returned.
If not 'insert' and the stmt is an insert, the first table is skipped.
"""
tables = extract_tables(
self.full_text if scope == 'full' else self.text_before_cursor)
if scope == 'insert':
tables = tables[:1]
elif self.is_insert():
tables = tables[1:]
return tables
def get_previous_token(self, token):
return self.parsed.token_prev(self.parsed.token_index(token))[1]
def get_identifier_schema(self):
schema = (self.identifier and self.identifier.get_parent_name()) \
or None
# If schema name is unquoted, lower-case it
if schema and self.identifier.value[0] != '"':
schema = schema.lower()
return schema
def reduce_to_prev_keyword(self, n_skip=0):
prev_keyword, self.text_before_cursor = \
find_prev_keyword(self.text_before_cursor, n_skip=n_skip)
return prev_keyword
def suggest_type(full_text, text_before_cursor):
"""Takes the full_text that is typed so far and also the text before the
cursor to suggest completion type and scope.
Returns a tuple with a type of entity ('table', 'column' etc) and a scope.
A scope for a column category will be a list of tables.
"""
if full_text.startswith('\\i '):
return (Path(),)
# This is a temporary hack; the exception handling
# here should be removed once sqlparse has been fixed
try:
stmt = SqlStatement(full_text, text_before_cursor)
except (TypeError, AttributeError):
return []
# Check for special commands and handle those separately
if stmt.parsed:
# Be careful here because trivial whitespace is parsed as a
# statement, but the statement won't have a first token
tok1 = stmt.parsed.token_first()
if tok1 and tok1.value == '\\':
text = stmt.text_before_cursor + stmt.word_before_cursor
return suggest_special(text)
return suggest_based_on_last_token(stmt.last_token, stmt)
named_query_regex = re.compile(r'^\s*\\ns\s+[A-z0-9\-_]+\s+')
def _strip_named_query(txt):
"""
This will strip "save named query" command in the beginning of the line:
'\ns zzz SELECT * FROM abc' -> 'SELECT * FROM abc'
' \ns zzz SELECT * FROM abc' -> 'SELECT * FROM abc'
"""
if named_query_regex.match(txt):
txt = named_query_regex.sub('', txt)
return txt
function_body_pattern = re.compile(r'(\$.*?\$)([\s\S]*?)\1', re.M)
def _find_function_body(text):
split = function_body_pattern.search(text)
return (split.start(2), split.end(2)) if split else (None, None)
def _statement_from_function(full_text, text_before_cursor, statement):
current_pos = len(text_before_cursor)
body_start, body_end = _find_function_body(full_text)
if body_start is None:
return full_text, text_before_cursor, statement
if not body_start <= current_pos < body_end:
return full_text, text_before_cursor, statement
full_text = full_text[body_start:body_end]
text_before_cursor = text_before_cursor[body_start:]
parsed = sqlparse.parse(text_before_cursor)
return _split_multiple_statements(full_text, text_before_cursor, parsed)
def _split_multiple_statements(full_text, text_before_cursor, parsed):
if len(parsed) > 1:
# Multiple statements being edited -- isolate the current one by
# cumulatively summing statement lengths to find the one that bounds
# the current position
current_pos = len(text_before_cursor)
stmt_start, stmt_end = 0, 0
for statement in parsed:
stmt_len = len(str(statement))
stmt_start, stmt_end = stmt_end, stmt_end + stmt_len
if stmt_end >= current_pos:
text_before_cursor = full_text[stmt_start:current_pos]
full_text = full_text[stmt_start:]
break
elif parsed:
# A single statement
statement = parsed[0]
else:
# The empty string
return full_text, text_before_cursor, None
token2 = None
if statement.get_type() in ('CREATE', 'CREATE OR REPLACE'):
token1 = statement.token_first()
if token1:
token1_idx = statement.token_index(token1)
token2 = statement.token_next(token1_idx)[1]
if token2 and token2.value.upper() == 'FUNCTION':
full_text, text_before_cursor, statement = _statement_from_function(
full_text, text_before_cursor, statement
)
return full_text, text_before_cursor, statement
SPECIALS_SUGGESTION = {
'dT': Datatype,
'df': Function,
'dt': Table,
'dv': View,
'sf': Function,
}
def suggest_special(text):
text = text.lstrip()
cmd, _, arg = parse_special_command(text)
if cmd == text:
# Trying to complete the special command itself
return (Special(),)
if cmd in ('\\c', '\\connect'):
return (Database(),)
if cmd == '\\T':
return (TableFormat(),)
if cmd == '\\dn':
return (Schema(),)
if arg:
# Try to distinguish "\d name" from "\d schema.name"
# Note that this will fail to obtain a schema name if wildcards are
# used, e.g. "\d schema???.name"
parsed = sqlparse.parse(arg)[0].tokens[0]
try:
schema = parsed.get_parent_name()
except AttributeError:
schema = None
else:
schema = None
if cmd[1:] == 'd':
# \d can describe tables or views
if schema:
return (Table(schema=schema),
View(schema=schema),)
else:
return (Schema(),
Table(schema=None),
View(schema=None),)
elif cmd[1:] in SPECIALS_SUGGESTION:
rel_type = SPECIALS_SUGGESTION[cmd[1:]]
if schema:
if rel_type == Function:
return (Function(schema=schema, usage='special'),)
return (rel_type(schema=schema),)
else:
if rel_type == Function:
return (Schema(), Function(schema=None, usage='special'),)
return (Schema(), rel_type(schema=None))
if cmd in ['\\n', '\\ns', '\\nd']:
return (NamedQuery(),)
return (Keyword(), Special())
def suggest_based_on_last_token(token, stmt):
if isinstance(token, string_types):
token_v = token.lower()
elif isinstance(token, Comparison):
# If 'token' is a Comparison type such as
# 'select * FROM abc a JOIN def d ON a.id = d.'. Then calling
# token.value on the comparison type will only return the lhs of the
# comparison. In this case a.id. So we need to do token.tokens to get
# both sides of the comparison and pick the last token out of that
# list.
token_v = token.tokens[-1].value.lower()
elif isinstance(token, Where):
# sqlparse groups all tokens from the where clause into a single token
# list. This means that token.value may be something like
# 'where foo > 5 and '. We need to look "inside" token.tokens to handle
# suggestions in complicated where clauses correctly
prev_keyword = stmt.reduce_to_prev_keyword()
return suggest_based_on_last_token(prev_keyword, stmt)
elif isinstance(token, Identifier):
# If the previous token is an identifier, we can suggest datatypes if
# we're in a parenthesized column/field list, e.g.:
# CREATE TABLE foo (Identifier <CURSOR>
# CREATE FUNCTION foo (Identifier <CURSOR>
# If we're not in a parenthesized list, the most likely scenario is the
# user is about to specify an alias, e.g.:
# SELECT Identifier <CURSOR>
# SELECT foo FROM Identifier <CURSOR>
prev_keyword, _ = find_prev_keyword(stmt.text_before_cursor)
if prev_keyword and prev_keyword.value == '(':
# Suggest datatypes
return suggest_based_on_last_token('type', stmt)
else:
return (Keyword(),)
else:
token_v = token.value.lower()
if not token:
return (Keyword(), Special())
elif token_v.endswith('('):
p = sqlparse.parse(stmt.text_before_cursor)[0]
if p.tokens and isinstance(p.tokens[-1], Where):
# Four possibilities:
# 1 - Parenthesized clause like "WHERE foo AND ("
# Suggest columns/functions
# 2 - Function call like "WHERE foo("
# Suggest columns/functions
# 3 - Subquery expression like "WHERE EXISTS ("
# Suggest keywords, in order to do a subquery
# 4 - Subquery OR array comparison like "WHERE foo = ANY("
# Suggest columns/functions AND keywords.(If we wanted to be
# really fancy, we could suggest only array-typed columns)
column_suggestions = suggest_based_on_last_token('where', stmt)
# Check for a subquery expression (cases 3 & 4)
where = p.tokens[-1]
prev_tok = where.token_prev(len(where.tokens) - 1)[1]
if isinstance(prev_tok, Comparison):
# e.g. "SELECT foo FROM bar WHERE foo = ANY("
prev_tok = prev_tok.tokens[-1]
prev_tok = prev_tok.value.lower()
if prev_tok == 'exists':
return (Keyword(),)
else:
return column_suggestions
# Get the token before the parens
prev_tok = p.token_prev(len(p.tokens) - 1)[1]
if (prev_tok and prev_tok.value
and prev_tok.value.lower().split(' ')[-1] == 'using'):
# tbl1 INNER JOIN tbl2 USING (col1, col2)
tables = stmt.get_tables('before')
# suggest columns that are present in more than one table
return (Column(table_refs=tables,
require_last_table=True,
local_tables=stmt.local_tables),)
elif p.token_first().value.lower() == 'select':
# If the lparen is preceeded by a space chances are we're about to
# do a sub-select.
if last_word(stmt.text_before_cursor,
'all_punctuations').startswith('('):
return (Keyword(),)
prev_prev_tok = prev_tok and p.token_prev(p.token_index(prev_tok))[1]
if prev_prev_tok and prev_prev_tok.normalized == 'INTO':
return (
Column(table_refs=stmt.get_tables('insert'), context='insert'),
)
# We're probably in a function argument list
return (Column(table_refs=extract_tables(stmt.full_text),
local_tables=stmt.local_tables, qualifiable=True),)
elif token_v == 'set':
return (Column(table_refs=stmt.get_tables(),
local_tables=stmt.local_tables),)
elif token_v in ('select', 'where', 'having', 'by', 'distinct'):
# Check for a table alias or schema qualification
parent = (stmt.identifier and stmt.identifier.get_parent_name()) or []
tables = stmt.get_tables()
if parent:
tables = tuple(t for t in tables if identifies(parent, t))
return (Column(table_refs=tables, local_tables=stmt.local_tables),
Table(schema=parent),
View(schema=parent),
Function(schema=parent),)
else:
return (Column(table_refs=tables, local_tables=stmt.local_tables,
qualifiable=True),
Function(schema=None),
Keyword(token_v.upper()),)
elif token_v == 'as':
# Don't suggest anything for aliases
return ()
elif (token_v.endswith('join') and token.is_keyword) \
or (token_v in ('copy', 'from', 'update', 'into', 'describe',
'truncate')):
schema = stmt.get_identifier_schema()
tables = extract_tables(stmt.text_before_cursor)
is_join = token_v.endswith('join') and token.is_keyword
# Suggest tables from either the currently-selected schema or the
# public schema if no schema has been specified
suggest = []
if not schema:
# Suggest schemas
suggest.insert(0, Schema())
if token_v == 'from' or is_join:
suggest.append(FromClauseItem(schema=schema,
table_refs=tables,
local_tables=stmt.local_tables))
elif token_v == 'truncate':
suggest.append(Table(schema))
else:
suggest.extend((Table(schema), View(schema)))
if is_join and _allow_join(stmt.parsed):
tables = stmt.get_tables('before')
suggest.append(Join(table_refs=tables, schema=schema))
return tuple(suggest)
elif token_v == 'function':
schema = stmt.get_identifier_schema()
# stmt.get_previous_token will fail for e.g.
# `SELECT 1 FROM functions WHERE function:`
try:
prev = stmt.get_previous_token(token).value.lower()
if prev in('drop', 'alter', 'create', 'create or replace'):
return (Function(schema=schema, usage='signature'),)
except ValueError:
pass
return tuple()
elif token_v in ('table', 'view'):
# E.g. 'ALTER TABLE <tablname>'
rel_type = {'table': Table, 'view': View,
'function': Function}[token_v]
schema = stmt.get_identifier_schema()
if schema:
return (rel_type(schema=schema),)
else:
return (Schema(), rel_type(schema=schema))
elif token_v == 'column':
# E.g. 'ALTER TABLE foo ALTER COLUMN bar
return (Column(table_refs=stmt.get_tables()),)
elif token_v == 'on':
tables = stmt.get_tables('before')
parent = \
(stmt.identifier and stmt.identifier.get_parent_name()) or None
if parent:
# "ON parent.<suggestion>"
# parent can be either a schema name or table alias
filteredtables = tuple(t for t in tables if identifies(parent, t))
sugs = [Column(table_refs=filteredtables,
local_tables=stmt.local_tables),
Table(schema=parent),
View(schema=parent),
Function(schema=parent)]
if filteredtables and _allow_join_condition(stmt.parsed):
sugs.append(JoinCondition(table_refs=tables,
parent=filteredtables[-1]))
return tuple(sugs)
else:
# ON <suggestion>
# Use table alias if there is one, otherwise the table name
aliases = tuple(t.ref for t in tables)
if _allow_join_condition(stmt.parsed):
return (Alias(aliases=aliases), JoinCondition(
table_refs=tables, parent=None))
else:
return (Alias(aliases=aliases),)
elif token_v in ('c', 'use', 'database', 'template'):
# "\c <db", "use <db>", "DROP DATABASE <db>",
# "CREATE DATABASE <newdb> WITH TEMPLATE <db>"
return (Database(),)
elif token_v == 'schema':
# DROP SCHEMA schema_name, SET SCHEMA schema name
prev_keyword = stmt.reduce_to_prev_keyword(n_skip=2)
quoted = prev_keyword and prev_keyword.value.lower() == 'set'
return (Schema(quoted),)
elif token_v.endswith(',') or token_v in ('=', 'and', 'or'):
prev_keyword = stmt.reduce_to_prev_keyword()
if prev_keyword:
return suggest_based_on_last_token(prev_keyword, stmt)
else:
return ()
elif token_v in ('type', '::'):
# ALTER TABLE foo SET DATA TYPE bar
# SELECT foo::bar
# Note that tables are a form of composite type in postgresql, so
# they're suggested here as well
schema = stmt.get_identifier_schema()
suggestions = [Datatype(schema=schema),
Table(schema=schema)]
if not schema:
suggestions.append(Schema())
return tuple(suggestions)
elif token_v in {'alter', 'create', 'drop'}:
return (Keyword(token_v.upper()),)
elif token.is_keyword:
# token is a keyword we haven't implemented any special handling for
# go backwards in the query until we find one we do recognize
prev_keyword = stmt.reduce_to_prev_keyword(n_skip=1)
if prev_keyword:
return suggest_based_on_last_token(prev_keyword, stmt)
else:
return (Keyword(token_v.upper()),)
else:
return (Keyword(),)
def identifies(id, ref):
"""Returns true if string `id` matches TableReference `ref`"""
return id == ref.alias or id == ref.name or (
ref.schema and (id == ref.schema + '.' + ref.name))
def _allow_join_condition(statement):
"""
Tests if a join condition should be suggested
We need this to avoid bad suggestions when entering e.g.
select * from tbl1 a join tbl2 b on a.id = <cursor>
So check that the preceding token is a ON, AND, or OR keyword, instead of
e.g. an equals sign.
:param statement: an sqlparse.sql.Statement
:return: boolean
"""
if not statement or not statement.tokens:
return False
last_tok = statement.token_prev(len(statement.tokens))[1]
return last_tok.value.lower() in ('on', 'and', 'or')
def _allow_join(statement):
"""
Tests if a join should be suggested
We need this to avoid bad suggestions when entering e.g.
select * from tbl1 a join tbl2 b <cursor>
So check that the preceding token is a JOIN keyword
:param statement: an sqlparse.sql.Statement
:return: boolean
"""
if not statement or not statement.tokens:
return False
last_tok = statement.token_prev(len(statement.tokens))[1]
return (last_tok.value.lower().endswith('join')
and last_tok.value.lower() not in('cross join', 'natural join'))
| 16,500 | 771 | 138 |
5b21e44116bbbcc52b3378dbbcba99b01edbe18e | 2,397 | py | Python | db/Answer.py | sysu-team1/BackEnd | 4773545897fee3aa7a767cbe6d011372623e1e58 | [
"MIT"
] | 1 | 2019-11-19T09:08:50.000Z | 2019-11-19T09:08:50.000Z | db/Answer.py | sysu-team1/BackEnd | 4773545897fee3aa7a767cbe6d011372623e1e58 | [
"MIT"
] | null | null | null | db/Answer.py | sysu-team1/BackEnd | 4773545897fee3aa7a767cbe6d011372623e1e58 | [
"MIT"
] | null | null | null | import random
from .prepare import app, db, model_repr
class Answer(db.Model):
'''
使用的sql语句:
```sql
CREATE TABLE `answers` (
`accept_id` int(11) NOT NULL COMMENT '接受id',
`problem_id` int(11) NOT NULL COMMENT '问题id',
`answer` int(11) NOT NULL DEFAULT '-1' COMMENT '具体答案的选项',
PRIMARY KEY (`accept_id`,`problem_id`),
KEY `problem_index` (`problem_id`),
CONSTRAINT `answers_ibfk_1` FOREIGN KEY (`accept_id`) REFERENCES `accepts` (`id`) ON DELETE CASCADE,
CONSTRAINT `answers_ibfk_2` FOREIGN KEY (`problem_id`) REFERENCES `problems` (`id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8
```
属性:
基本属性
problem: 关联的问题
task: 关联的任务
'''
__tablename__ = 'answers'
accept_id = db.Column('accept_id', db.Integer, db.ForeignKey(
'accepts.id', ondelete='cascade'), nullable=False, comment='接受id')
problem_id = db.Column('problem_id', db.Integer, db.ForeignKey(
'problems.id', ondelete='cascade'), nullable=False, comment='问题id')
# answer_id = db.Column('answer_id', db.Integer, db.ForeignKey(
# 'answers.openid', ondelete='cascade'), nullable=False, comment='回答者id')
# task_id = db.Column('task_id', db.Integer, db.ForeignKey(
# 'tasks.id', ondelete='cascade'), nullable=False, comment='任务id')
answer = db.Column('answer', db.Integer(
), nullable=False, server_default='-1', comment='具体答案的选项')
accept = db.relationship('Accept', back_populates='answers')
problem = db.relationship('Problem', back_populates='answers')
# task = db.relationship('Task', back_populates='answers')
# student = db.relationship('Student', back_populates='answers')
__table_args__ = (
db.PrimaryKeyConstraint('accept_id', 'problem_id'),
db.Index('problem_index', 'problem_id'),
)
| 38.66129 | 109 | 0.656237 | import random
from .prepare import app, db, model_repr
class Answer(db.Model):
'''
使用的sql语句:
```sql
CREATE TABLE `answers` (
`accept_id` int(11) NOT NULL COMMENT '接受id',
`problem_id` int(11) NOT NULL COMMENT '问题id',
`answer` int(11) NOT NULL DEFAULT '-1' COMMENT '具体答案的选项',
PRIMARY KEY (`accept_id`,`problem_id`),
KEY `problem_index` (`problem_id`),
CONSTRAINT `answers_ibfk_1` FOREIGN KEY (`accept_id`) REFERENCES `accepts` (`id`) ON DELETE CASCADE,
CONSTRAINT `answers_ibfk_2` FOREIGN KEY (`problem_id`) REFERENCES `problems` (`id`) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8
```
属性:
基本属性
problem: 关联的问题
task: 关联的任务
'''
__tablename__ = 'answers'
accept_id = db.Column('accept_id', db.Integer, db.ForeignKey(
'accepts.id', ondelete='cascade'), nullable=False, comment='接受id')
problem_id = db.Column('problem_id', db.Integer, db.ForeignKey(
'problems.id', ondelete='cascade'), nullable=False, comment='问题id')
# answer_id = db.Column('answer_id', db.Integer, db.ForeignKey(
# 'answers.openid', ondelete='cascade'), nullable=False, comment='回答者id')
# task_id = db.Column('task_id', db.Integer, db.ForeignKey(
# 'tasks.id', ondelete='cascade'), nullable=False, comment='任务id')
answer = db.Column('answer', db.Integer(
), nullable=False, server_default='-1', comment='具体答案的选项')
accept = db.relationship('Accept', back_populates='answers')
problem = db.relationship('Problem', back_populates='answers')
# task = db.relationship('Task', back_populates='answers')
# student = db.relationship('Student', back_populates='answers')
__table_args__ = (
db.PrimaryKeyConstraint('accept_id', 'problem_id'),
db.Index('problem_index', 'problem_id'),
)
def __repr__(self):
return model_repr(self, app.config['ANSWER_JSON_PATTERN'], app.config['ANSWER_JSON_ATTR_ORDER'])
def random_answers(accept_id, problems, db_helper):
answers = []
for problem in problems:
int_answer = random.randint(
0, len(problem.all_answers.split(sep=app.config['SPLIT_ANSWER'])) - 1)
answers.append(Answer(
accept_id=accept_id, problem_id=problem.id, answer=int_answer))
db_helper.save_all(answers)
db_helper.commit()
return answers
| 480 | 0 | 50 |
0b00751199a21103bbd2d9de1bbc3315e858f87a | 2,476 | py | Python | switch_inputs/bioenergy_clean.py | Switch-Mexico/switch-inputs | e2afa96c40b516435c350d525119e4594f1b7eca | [
"MIT"
] | 1 | 2020-07-14T21:50:28.000Z | 2020-07-14T21:50:28.000Z | switch_inputs/bioenergy_clean.py | Switch-Mexico/switch-inputs | e2afa96c40b516435c350d525119e4594f1b7eca | [
"MIT"
] | 14 | 2018-12-14T23:21:09.000Z | 2019-05-10T21:42:36.000Z | switch_inputs/bioenergy_clean.py | Switch-Mexico/switch-inputs | e2afa96c40b516435c350d525119e4594f1b7eca | [
"MIT"
] | 1 | 2020-07-14T21:50:37.000Z | 2020-07-14T21:50:37.000Z | """
Clean bioenergy data from AZEL
"""
import os
import json
import itertools
import geopandas as gpd
import pandas as pd
os.makedirs('data', exist_ok=True)
projection = 'epsg:4326'
name = ['pecuarios', 'forestales', 'industriales', 'urbanos']
scenario = ['E3', 'E1']
for scenario, name in itertools.product(scenario, name):
# Load bioenergy shape file
print ('Reading file: {}_R{}.shp'.format(scenario, name))
df = gpd.read_file('../data/interim/shapes/FBio_{0}_R{1}.shp'.format(scenario,
name))
df = df[df.geometry.notnull()].to_crs({'init': projection})
# Load transmission region dictionary
with open(os.path.join('../data/interim/', 'trans-regions.json'), 'r') as fp:
trans_regions = json.load(fp)
# Load transmission region shapefiles
lz = gpd.read_file('../data/interim/shapes/Mask_T.shp')
lz = lz.to_crs({'init': projection})
lz.loc[:, 'trans-region'] = (lz['ID'].astype(int)
.map('{0:02}'.format)
.map(trans_regions))
assert lz.crs == df.crs
if not 'forestal' in name:
join = gpd.sjoin(df, lz, op='within')
else:
join = gpd.overlay(lz, df, how='intersection')
# Get specific columns for output data
try:
columns = ['trans-region', 'X', 'Y', 'CLASIFICAC', 'TIPO', 'PROCESO',
'GENE_GWha', 'CAPINST_MW', 'FP']
bio = join[columns].copy();
except KeyError:
columns = ['trans-region', 'CLASIFICAC', 'TIPO', 'PROCESO',
'GENE_GWha', 'CAPINST_MW', 'FP']
bio = join[columns].copy();
bio['CLASIFICAC'] = bio.CLASIFICAC.map(str.lower).str.replace(' ', '_')
bio['TIPO'] = bio.TIPO.map(str.lower).str.replace(' ', '_')
bio['PROCESO'] = bio.PROCESO.map(str.lower).str.replace(' ', '_')
if 'E3' in scenario:
scenario = 'high'
else:
scenario = 'low'
bio.loc[:, 'scenario'] = scenario
bio.loc[:, 'id'] = name
bio = bio.rename(columns={'X': 'lng', 'Y': 'lat', 'CLASIFICAC': 'source',
'TIPO': 'category', 'FP': 'cf',
'GENE_GWha': 'gen_GWha', 'CAPINST_MW':'cap_MW',
'PROCESO': 'fuel_type'})
print ('Saving data: {0}_{1}'.format(scenario, name))
bio.to_csv('data/bioenergy_{0}_{1}.csv'.format(scenario, name), index=False)
| 38.6875 | 82 | 0.550485 | """
Clean bioenergy data from AZEL
"""
import os
import json
import itertools
import geopandas as gpd
import pandas as pd
os.makedirs('data', exist_ok=True)
projection = 'epsg:4326'
name = ['pecuarios', 'forestales', 'industriales', 'urbanos']
scenario = ['E3', 'E1']
for scenario, name in itertools.product(scenario, name):
# Load bioenergy shape file
print ('Reading file: {}_R{}.shp'.format(scenario, name))
df = gpd.read_file('../data/interim/shapes/FBio_{0}_R{1}.shp'.format(scenario,
name))
df = df[df.geometry.notnull()].to_crs({'init': projection})
# Load transmission region dictionary
with open(os.path.join('../data/interim/', 'trans-regions.json'), 'r') as fp:
trans_regions = json.load(fp)
# Load transmission region shapefiles
lz = gpd.read_file('../data/interim/shapes/Mask_T.shp')
lz = lz.to_crs({'init': projection})
lz.loc[:, 'trans-region'] = (lz['ID'].astype(int)
.map('{0:02}'.format)
.map(trans_regions))
assert lz.crs == df.crs
if not 'forestal' in name:
join = gpd.sjoin(df, lz, op='within')
else:
join = gpd.overlay(lz, df, how='intersection')
# Get specific columns for output data
try:
columns = ['trans-region', 'X', 'Y', 'CLASIFICAC', 'TIPO', 'PROCESO',
'GENE_GWha', 'CAPINST_MW', 'FP']
bio = join[columns].copy();
except KeyError:
columns = ['trans-region', 'CLASIFICAC', 'TIPO', 'PROCESO',
'GENE_GWha', 'CAPINST_MW', 'FP']
bio = join[columns].copy();
bio['CLASIFICAC'] = bio.CLASIFICAC.map(str.lower).str.replace(' ', '_')
bio['TIPO'] = bio.TIPO.map(str.lower).str.replace(' ', '_')
bio['PROCESO'] = bio.PROCESO.map(str.lower).str.replace(' ', '_')
if 'E3' in scenario:
scenario = 'high'
else:
scenario = 'low'
bio.loc[:, 'scenario'] = scenario
bio.loc[:, 'id'] = name
bio = bio.rename(columns={'X': 'lng', 'Y': 'lat', 'CLASIFICAC': 'source',
'TIPO': 'category', 'FP': 'cf',
'GENE_GWha': 'gen_GWha', 'CAPINST_MW':'cap_MW',
'PROCESO': 'fuel_type'})
print ('Saving data: {0}_{1}'.format(scenario, name))
bio.to_csv('data/bioenergy_{0}_{1}.csv'.format(scenario, name), index=False)
| 0 | 0 | 0 |
1e25f3f0b115342908828bb75c9bb105ab2844b3 | 1,720 | py | Python | bounds/essentials.py | rkirov/code_bounds | 2855c3bfd2972c98d93b891c4f737b6f320c2664 | [
"Unlicense"
] | null | null | null | bounds/essentials.py | rkirov/code_bounds | 2855c3bfd2972c98d93b891c4f737b6f320c2664 | [
"Unlicense"
] | null | null | null | bounds/essentials.py | rkirov/code_bounds | 2855c3bfd2972c98d93b891c4f737b6f320c2664 | [
"Unlicense"
] | null | null | null |
#the table should be read T[deg][a] where a is the multiplicity of the Q
def build1DCeilingTable(c):
'''entry for A is max k s.t. l(A) = l(A+kP) and l(A+kQ) '''
max_deg = [0 for _ in range(c.m)]
CLP = c.fill_degree_table_reverse(update, max_deg)
CLQ = c.fill_degree_table_reverse(update, max_deg)
return ['CLP','CLQ'], [CLP,CLQ]
| 38.222222 | 74 | 0.629651 | def l_values(c):
minus_one_deg = [0 for _ in range(c.m)]
def update(div, minus_p_val, minus_q_val):
return minus_p_val + div.is_P_nongap()
lval = c.fill_degree_table(update, minus_one_deg)
return ['LVAL'], [lval]
def build_floor_table(c):
#if l(D) = 0, floor is assigned to be [0,1], XXX: why not [0,0] (2011)
minus_one_deg = [(0,1) for _ in range(c.m)]
def update(div, minus_p_val, minus_q_val):
jumpP = div.is_P_nongap()
jumpQ = div.is_Q_nongap()
if jumpP and jumpQ:
return div.to_tuple()
elif jumpQ:
return minus_p_val
else:
# this includes two cases
return minus_q_val
floor_table = c.fill_degree_table(update, minus_one_deg)
return ['FL'], [floor_table]
#the table should be read T[deg][a] where a is the multiplicity of the Q
def build1DCeilingTable(c):
'''entry for A is max k s.t. l(A) = l(A+kP) and l(A+kQ) '''
max_deg = [0 for _ in range(c.m)]
def update(div, plus_p_val, plus_q_val):
div_plus_P = div + c.div(P=1,deg=1)
return (plus_p_val + 1) if not div_plus_P.is_P_nongap() else 0
CLP = c.fill_degree_table_reverse(update, max_deg)
def update(div, plus_p_val, plus_q_val):
div_plus_Q = div + c.div(Q=1,deg=1)
return (plus_q_val + 1) if not div_plus_Q.is_Q_nongap() else 0
CLQ = c.fill_degree_table_reverse(update, max_deg)
return ['CLP','CLQ'], [CLP,CLQ]
def essentials_dispatcher(curve, choice):
if choice == 'CLP' or choice == 'CLQ':
return build1DCeilingTable(curve)
elif choice == 'FL':
return build_floor_table(curve)
elif choice == 'LVAL':
return l_values(curve)
| 1,248 | 0 | 122 |
47a1ef61a50d752f006c753ce45eb846e946bcb9 | 1,850 | py | Python | DSM/estrutura_dados/Entregas/PROVA SUBSTITUTIVA/exclusao.py | murillotlopes/DSM-Python | 2822b9b1b988936ab098c7052180ee3c3d7dd735 | [
"MIT"
] | null | null | null | DSM/estrutura_dados/Entregas/PROVA SUBSTITUTIVA/exclusao.py | murillotlopes/DSM-Python | 2822b9b1b988936ab098c7052180ee3c3d7dd735 | [
"MIT"
] | null | null | null | DSM/estrutura_dados/Entregas/PROVA SUBSTITUTIVA/exclusao.py | murillotlopes/DSM-Python | 2822b9b1b988936ab098c7052180ee3c3d7dd735 | [
"MIT"
] | null | null | null | # 1. Observe os dois métodos de exclusão listados abaixo.
#
# 2. Identifique a quais estruturas pertencem os métodos, respectivamente.
# R: A primeira é Lista duplamente encadeada (double_linked), metodo de remoção. A segunda é Lista encadeada, metodo de remoção
#
# 3. Explique qual a diferença FUNDAMENTAL entre os dois métodos.
# R: na lista duplamente encadeada um nodo sempre aponta para o next e um prev, pois assim a lista pode ser percorrida de qualquer direção, tanto no inicio para o fim, quanto do fim para o inicio. Já a lista ordenada, possui apenas o next, ou seja, a lista só é acessada percorrendo uma unica direção.
# Método 1
# Método 2 | 33.636364 | 302 | 0.648649 | # 1. Observe os dois métodos de exclusão listados abaixo.
#
# 2. Identifique a quais estruturas pertencem os métodos, respectivamente.
# R: A primeira é Lista duplamente encadeada (double_linked), metodo de remoção. A segunda é Lista encadeada, metodo de remoção
#
# 3. Explique qual a diferença FUNDAMENTAL entre os dois métodos.
# R: na lista duplamente encadeada um nodo sempre aponta para o next e um prev, pois assim a lista pode ser percorrida de qualquer direção, tanto no inicio para o fim, quanto do fim para o inicio. Já a lista ordenada, possui apenas o next, ou seja, a lista só é acessada percorrendo uma unica direção.
# Método 1
def remove(self, pos):
if self.is_empty() or pos < 0 or pos > self.__count - 1: return None
if pos == 0:
removed = self.__head
self.__head = removed.next
if self.__head is not None: self.__head.prev = None
if self.__count == 1: self.__tail = None
elif pos == self.__count - 1:
removed = self.__tail
self.__tail = removed.prev
if self.__tail is not None: self.__tail.next = None
if self.__count == 1: self.__head = None
else:
removed = self.__find_node(pos)
before = removed.prev
after = removed.next
before.next = after
after.prev = before
self.__count -= 1
return removed.data
# Método 2
def remove(self, pos):
if self.__count == 0 or pos < 0 or pos > self.__count - 1: return None
if pos == 0:
removed = self.__head
self.__head = self.__head.next
else:
before = self.__head
for i in range(1, pos): before = before.next
removed = before.next
after = removed.next
before.next = after
if removed == self.__tail:
self.__tail = before
self.__count -= 1
return removed.data | 1,140 | 0 | 44 |
728ed67c9adf8bf2a90007fb924fb783092d33e8 | 9,864 | py | Python | invenio_rest/csrf.py | max-moser/invenio-rest | 1b6bd04c953b0e9662314d5ee9601e966196f332 | [
"MIT"
] | null | null | null | invenio_rest/csrf.py | max-moser/invenio-rest | 1b6bd04c953b0e9662314d5ee9601e966196f332 | [
"MIT"
] | null | null | null | invenio_rest/csrf.py | max-moser/invenio-rest | 1b6bd04c953b0e9662314d5ee9601e966196f332 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2020 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CSRF Middleware.
The implementation is highly inspred from Django's initial implementation
about CSRF protection. For more information you can see here:
<https://github.com/django/django/blob/master/django/middleware/csrf.py>
"""
import re
import secrets
import string
from datetime import datetime, timedelta, timezone
from flask import Blueprint, abort, current_app, request
from itsdangerous import BadSignature, SignatureExpired, \
TimedJSONWebSignatureSerializer
from six import string_types
from six.moves.urllib.parse import urlparse
from .errors import RESTCSRFError
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = (
"Referer checking failed - %s does not match any trusted origins."
)
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed."
REASON_INSECURE_REFERER = (
"Referer checking failed - Referer is insecure while host is secure."
)
REASON_TOKEN_EXPIRED = "CSRF token expired. Try again."
def _get_csrf_serializer(expires_in=None):
"""Note that this serializer is used to encode/decode the CSRF cookie.
In case you change this implementation bear in mind that the token
generated must be signed so as to avoid any client-side tampering.
"""
expires_in = expires_in or current_app.config['CSRF_TOKEN_EXPIRES_IN']
return TimedJSONWebSignatureSerializer(
current_app.config.get(
'CSRF_SECRET',
current_app.config.get('SECRET_KEY') or 'CHANGE_ME'),
salt=current_app.config['CSRF_SECRET_SALT'],
expires_in=expires_in,
)
def csrf_validate():
"""Check CSRF cookie against request headers."""
if request.is_secure:
referer = request.referrer
if referer is None:
return _abort400(REASON_NO_REFERER)
referer = urlparse(referer)
# Make sure we have a valid URL for Referer.
if '' in (referer.scheme, referer.netloc):
return _abort400(REASON_MALFORMED_REFERER)
# Ensure that our Referer is also secure.
if not _is_referer_secure(referer):
return _abort400(REASON_INSECURE_REFERER)
is_hostname_allowed = referer.hostname in \
current_app.config.get('APP_ALLOWED_HOSTS')
if not is_hostname_allowed:
reason = REASON_BAD_REFERER % referer.geturl()
return _abort400(reason)
csrf_token = _get_csrf_token()
if csrf_token is None:
return _abort400(REASON_NO_CSRF_COOKIE)
request_csrf_token = _get_submitted_csrf_token()
if not request_csrf_token:
_abort400(REASON_BAD_TOKEN)
decoded_request_csrf_token = _decode_csrf(request_csrf_token)
if csrf_token != decoded_request_csrf_token:
return _abort400(REASON_BAD_TOKEN)
def reset_token():
"""Change the CSRF token in use for a request."""
request.csrf_cookie_needs_reset = True
class CSRFTokenMiddleware():
"""CSRF Token Middleware."""
def __init__(self, app=None):
"""Middleware initialization.
:param app: An instance of :class:`flask.Flask`.
"""
if app:
self.init_app(app)
def init_app(self, app):
"""Initialize middleware extension.
:param app: An instance of :class:`flask.Flask`.
"""
app.config.setdefault('CSRF_COOKIE_NAME', 'csrftoken')
app.config.setdefault('CSRF_HEADER', 'X-CSRFToken')
app.config.setdefault(
'CSRF_METHODS', ['POST', 'PUT', 'PATCH', 'DELETE'])
app.config.setdefault('CSRF_TOKEN_LENGTH', 32)
app.config.setdefault(
'CSRF_ALLOWED_CHARS', string.ascii_letters + string.digits)
app.config.setdefault('CSRF_SECRET_SALT', 'invenio-csrf-token')
app.config.setdefault('CSRF_FORCE_SECURE_REFERER', True)
app.config.setdefault(
'CSRF_COOKIE_SAMESITE',
app.config.get('SESSION_COOKIE_SAMESITE') or 'Lax')
# The token last for 24 hours, but the cookie for 7 days. This allows
# us to implement transparent token rotation during those 7 days. Note,
# that the token is automatically rotated on login, thus you can also
# change PERMANENT_SESSION_LIFETIME
app.config.setdefault('CSRF_TOKEN_EXPIRES_IN', 60*60*24)
# We allow usage of an expired CSRF token during this period. This way
# we can rotate the CSRF token without the user getting an CSRF error.
# Align with CSRF_COOKIE_MAX_AGE
app.config.setdefault('CSRF_TOKEN_GRACE_PERIOD', 60*60*24*7)
@app.after_request
app.extensions['invenio-csrf'] = self
class CSRFProtectMiddleware(CSRFTokenMiddleware):
"""CSRF Middleware."""
def __init__(self, app=None):
"""Middleware initialization.
:param app: An instance of :class:`flask.Flask`.
"""
self._exempt_views = set()
self._exempt_blueprints = set()
self._before_protect_funcs = []
if app:
self.init_app(app)
def init_app(self, app):
"""Initialize middleware extension.
:param app: An instance of :class:`flask.Flask`.
"""
super(CSRFProtectMiddleware, self).init_app(app)
@app.before_request
def csrf_protect():
"""CSRF protect method."""
for func in self._before_protect_funcs:
func()
is_method_vulnerable = request.method in app.config['CSRF_METHODS']
if not is_method_vulnerable:
return
if request.blueprint in self._exempt_blueprints:
return
if hasattr(request, 'skip_csrf_check'):
return
view = app.view_functions.get(request.endpoint)
if view:
dest = '{0}.{1}'.format(view.__module__, view.__name__)
if dest in self._exempt_views:
return
return csrf_validate()
def before_csrf_protect(self, f):
"""Register functions to be invoked before checking csrf.
The function accepts nothing as parameters.
"""
self._before_protect_funcs.append(f)
return f
def exempt(self, view):
"""Mark a view or blueprint to be excluded from CSRF protection."""
if isinstance(view, Blueprint):
self._exempt_blueprints.add(view.name)
return view
if isinstance(view, string_types):
view_location = view
else:
view_location = '.'.join((view.__module__, view.__name__))
self._exempt_views.add(view_location)
return view
csrf = CSRFProtectMiddleware()
| 32.662252 | 79 | 0.662612 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2020 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""CSRF Middleware.
The implementation is highly inspred from Django's initial implementation
about CSRF protection. For more information you can see here:
<https://github.com/django/django/blob/master/django/middleware/csrf.py>
"""
import re
import secrets
import string
from datetime import datetime, timedelta, timezone
from flask import Blueprint, abort, current_app, request
from itsdangerous import BadSignature, SignatureExpired, \
TimedJSONWebSignatureSerializer
from six import string_types
from six.moves.urllib.parse import urlparse
from .errors import RESTCSRFError
REASON_NO_REFERER = "Referer checking failed - no Referer."
REASON_BAD_REFERER = (
"Referer checking failed - %s does not match any trusted origins."
)
REASON_NO_CSRF_COOKIE = "CSRF cookie not set."
REASON_BAD_TOKEN = "CSRF token missing or incorrect."
REASON_MALFORMED_REFERER = "Referer checking failed - Referer is malformed."
REASON_INSECURE_REFERER = (
"Referer checking failed - Referer is insecure while host is secure."
)
REASON_TOKEN_EXPIRED = "CSRF token expired. Try again."
def _get_csrf_serializer(expires_in=None):
"""Note that this serializer is used to encode/decode the CSRF cookie.
In case you change this implementation bear in mind that the token
generated must be signed so as to avoid any client-side tampering.
"""
expires_in = expires_in or current_app.config['CSRF_TOKEN_EXPIRES_IN']
return TimedJSONWebSignatureSerializer(
current_app.config.get(
'CSRF_SECRET',
current_app.config.get('SECRET_KEY') or 'CHANGE_ME'),
salt=current_app.config['CSRF_SECRET_SALT'],
expires_in=expires_in,
)
def _get_random_string(length, allowed_chars):
return ''.join(secrets.choice(allowed_chars) for i in range(length))
def _get_new_csrf_token(expires_in=None):
csrf_serializer = _get_csrf_serializer(expires_in=expires_in)
encoded_token = csrf_serializer.dumps(
_get_random_string(
current_app.config['CSRF_TOKEN_LENGTH'],
current_app.config['CSRF_ALLOWED_CHARS'],
)
)
return encoded_token
def _get_csrf_token():
try:
csrf_cookie = request.cookies[
current_app.config['CSRF_COOKIE_NAME']]
except KeyError:
return None
return _decode_csrf(csrf_cookie)
def _decode_csrf(data):
csrf_serializer = _get_csrf_serializer()
try:
return csrf_serializer.loads(data)
except SignatureExpired as e:
grace_period = timedelta(
seconds=current_app.config['CSRF_TOKEN_GRACE_PERIOD'])
if e.date_signed < datetime.now(tz=timezone.utc) - grace_period:
# Grace period for token rotation exceeded.
_abort400(REASON_TOKEN_EXPIRED)
else:
# Accept expired token, but rotate it to a new one.
reset_token()
return e.payload
except BadSignature:
_abort400(REASON_BAD_TOKEN)
def _set_token(response):
response.set_cookie(
current_app.config['CSRF_COOKIE_NAME'],
_get_new_csrf_token(),
max_age=current_app.config.get(
# 1 week for cookie (but we rotate the token every day)
'CSRF_COOKIE_MAX_AGE', 60*60*24*7),
domain=current_app.config.get(
'CSRF_COOKIE_DOMAIN',
current_app.session_interface.get_cookie_domain(
current_app)),
path=current_app.session_interface.get_cookie_path(
current_app),
secure=current_app.config.get('SESSION_COOKIE_SECURE', True),
httponly=False,
samesite=current_app.config['CSRF_COOKIE_SAMESITE'],
)
def _get_submitted_csrf_token():
header_name = current_app.config['CSRF_HEADER']
csrf_token = request.headers.get(header_name)
if csrf_token:
return csrf_token
return None
def _is_referer_secure(referer):
return 'https' in referer.scheme or \
not current_app.config['CSRF_FORCE_SECURE_REFERER']
def _abort400(reason):
abort(400, reason)
def csrf_validate():
"""Check CSRF cookie against request headers."""
if request.is_secure:
referer = request.referrer
if referer is None:
return _abort400(REASON_NO_REFERER)
referer = urlparse(referer)
# Make sure we have a valid URL for Referer.
if '' in (referer.scheme, referer.netloc):
return _abort400(REASON_MALFORMED_REFERER)
# Ensure that our Referer is also secure.
if not _is_referer_secure(referer):
return _abort400(REASON_INSECURE_REFERER)
is_hostname_allowed = referer.hostname in \
current_app.config.get('APP_ALLOWED_HOSTS')
if not is_hostname_allowed:
reason = REASON_BAD_REFERER % referer.geturl()
return _abort400(reason)
csrf_token = _get_csrf_token()
if csrf_token is None:
return _abort400(REASON_NO_CSRF_COOKIE)
request_csrf_token = _get_submitted_csrf_token()
if not request_csrf_token:
_abort400(REASON_BAD_TOKEN)
decoded_request_csrf_token = _decode_csrf(request_csrf_token)
if csrf_token != decoded_request_csrf_token:
return _abort400(REASON_BAD_TOKEN)
def reset_token():
"""Change the CSRF token in use for a request."""
request.csrf_cookie_needs_reset = True
class CSRFTokenMiddleware():
"""CSRF Token Middleware."""
def __init__(self, app=None):
"""Middleware initialization.
:param app: An instance of :class:`flask.Flask`.
"""
if app:
self.init_app(app)
def init_app(self, app):
"""Initialize middleware extension.
:param app: An instance of :class:`flask.Flask`.
"""
app.config.setdefault('CSRF_COOKIE_NAME', 'csrftoken')
app.config.setdefault('CSRF_HEADER', 'X-CSRFToken')
app.config.setdefault(
'CSRF_METHODS', ['POST', 'PUT', 'PATCH', 'DELETE'])
app.config.setdefault('CSRF_TOKEN_LENGTH', 32)
app.config.setdefault(
'CSRF_ALLOWED_CHARS', string.ascii_letters + string.digits)
app.config.setdefault('CSRF_SECRET_SALT', 'invenio-csrf-token')
app.config.setdefault('CSRF_FORCE_SECURE_REFERER', True)
app.config.setdefault(
'CSRF_COOKIE_SAMESITE',
app.config.get('SESSION_COOKIE_SAMESITE') or 'Lax')
# The token last for 24 hours, but the cookie for 7 days. This allows
# us to implement transparent token rotation during those 7 days. Note,
# that the token is automatically rotated on login, thus you can also
# change PERMANENT_SESSION_LIFETIME
app.config.setdefault('CSRF_TOKEN_EXPIRES_IN', 60*60*24)
# We allow usage of an expired CSRF token during this period. This way
# we can rotate the CSRF token without the user getting an CSRF error.
# Align with CSRF_COOKIE_MAX_AGE
app.config.setdefault('CSRF_TOKEN_GRACE_PERIOD', 60*60*24*7)
@app.after_request
def csrf_send(response):
is_method_vulnerable = request.method in app.config['CSRF_METHODS']
cookie_needs_reset = getattr(
request, 'csrf_cookie_needs_reset', False)
cookie_is_missing = current_app.config['CSRF_COOKIE_NAME'] not in \
request.cookies
if is_method_vulnerable \
and (cookie_needs_reset or cookie_is_missing):
_set_token(response)
return response
app.extensions['invenio-csrf'] = self
class CSRFProtectMiddleware(CSRFTokenMiddleware):
"""CSRF Middleware."""
def __init__(self, app=None):
"""Middleware initialization.
:param app: An instance of :class:`flask.Flask`.
"""
self._exempt_views = set()
self._exempt_blueprints = set()
self._before_protect_funcs = []
if app:
self.init_app(app)
def init_app(self, app):
"""Initialize middleware extension.
:param app: An instance of :class:`flask.Flask`.
"""
super(CSRFProtectMiddleware, self).init_app(app)
@app.before_request
def csrf_protect():
"""CSRF protect method."""
for func in self._before_protect_funcs:
func()
is_method_vulnerable = request.method in app.config['CSRF_METHODS']
if not is_method_vulnerable:
return
if request.blueprint in self._exempt_blueprints:
return
if hasattr(request, 'skip_csrf_check'):
return
view = app.view_functions.get(request.endpoint)
if view:
dest = '{0}.{1}'.format(view.__module__, view.__name__)
if dest in self._exempt_views:
return
return csrf_validate()
def before_csrf_protect(self, f):
"""Register functions to be invoked before checking csrf.
The function accepts nothing as parameters.
"""
self._before_protect_funcs.append(f)
return f
def exempt(self, view):
"""Mark a view or blueprint to be excluded from CSRF protection."""
if isinstance(view, Blueprint):
self._exempt_blueprints.add(view.name)
return view
if isinstance(view, string_types):
view_location = view
else:
view_location = '.'.join((view.__module__, view.__name__))
self._exempt_views.add(view_location)
return view
csrf = CSRFProtectMiddleware()
| 2,644 | 0 | 214 |
64796e0dca6ac811f76e9b1d579e2ef14b18c171 | 849 | py | Python | polymetis/polymetis/tests/python/polymetis/test_gripper_interface.py | ali-senguel/fairo-explore | 893481da270eed1e6d504c71e483d685ca9218d1 | [
"MIT"
] | null | null | null | polymetis/polymetis/tests/python/polymetis/test_gripper_interface.py | ali-senguel/fairo-explore | 893481da270eed1e6d504c71e483d685ca9218d1 | [
"MIT"
] | null | null | null | polymetis/polymetis/tests/python/polymetis/test_gripper_interface.py | ali-senguel/fairo-explore | 893481da270eed1e6d504c71e483d685ca9218d1 | [
"MIT"
] | null | null | null | import pytest
import unittest
from unittest.mock import MagicMock
from polymetis import GripperInterface
import polymetis_pb2
@pytest.fixture
@pytest.mark.parametrize("blocking", [True, False])
| 26.53125 | 81 | 0.765607 | import pytest
import unittest
from unittest.mock import MagicMock
from polymetis import GripperInterface
import polymetis_pb2
@pytest.fixture
def mocked_gripper(request):
gripper = GripperInterface()
gripper.grpc_connection = MagicMock()
return gripper
@pytest.mark.parametrize("blocking", [True, False])
def test_gripper_interface(mocked_gripper, blocking):
# Inputs
width = 0.1
speed = 0.2
force = 0.3
# Test methods
mocked_gripper.get_state()
mocked_gripper.goto(width=width, speed=speed, force=force, blocking=blocking)
mocked_gripper.grasp(speed=speed, force=force, blocking=blocking)
# Check asserts
mocked_gripper.grpc_connection.GetState.assert_called_once()
mocked_gripper.grpc_connection.Goto.assert_called_once()
mocked_gripper.grpc_connection.Grasp.assert_called_once()
| 606 | 0 | 44 |
152b6caaa0f282aa664a559068342555529558dd | 978 | py | Python | sites/web/web/migrations/0005_auto_20210117_1826.py | PrathameshBolade/yats | 93bb5271255120b7131a3bc416e3386428a4d3ec | [
"MIT"
] | 54 | 2015-01-26T07:56:59.000Z | 2022-03-10T18:48:05.000Z | sites/web/web/migrations/0005_auto_20210117_1826.py | PrathameshBolade/yats | 93bb5271255120b7131a3bc416e3386428a4d3ec | [
"MIT"
] | 8 | 2015-03-15T18:33:39.000Z | 2021-12-21T14:23:11.000Z | sites/web/web/migrations/0005_auto_20210117_1826.py | PrathameshBolade/yats | 93bb5271255120b7131a3bc416e3386428a4d3ec | [
"MIT"
] | 23 | 2015-02-19T16:55:35.000Z | 2022-03-11T19:49:06.000Z | # Generated by Django 2.2.10 on 2021-01-17 17:26
from django.db import migrations, models
| 33.724138 | 169 | 0.610429 | # Generated by Django 2.2.10 on 2021-01-17 17:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('web', '0004_auto_20180911_1440'),
]
operations = [
migrations.AlterField(
model_name='test',
name='billing_type',
field=models.CharField(blank=True, choices=[('service', 'service'), ('development', 'development')], max_length=255, null=True, verbose_name='billing type'),
),
migrations.AlterField(
model_name='test',
name='fixed_in_version',
field=models.CharField(blank=True, choices=[('last_python2', 'last_python2')], max_length=255, verbose_name='fixed in version'),
),
migrations.AlterField(
model_name='test',
name='version',
field=models.CharField(choices=[('last_python2', 'last_python2')], max_length=255, verbose_name='version'),
),
]
| 0 | 863 | 23 |
215fb216f7f044d8ecbf8c81133f5e55057f1c0a | 10,676 | py | Python | tests/test_commands.py | voroneril/pixi | aefe0e4d6b8799f98c959487d3501c17ecec42d8 | [
"Apache-2.0"
] | 1 | 2020-11-09T00:03:04.000Z | 2020-11-09T00:03:04.000Z | tests/test_commands.py | voroneril/pixi | aefe0e4d6b8799f98c959487d3501c17ecec42d8 | [
"Apache-2.0"
] | 1 | 2019-08-05T15:52:27.000Z | 2019-08-05T15:52:27.000Z | tests/test_commands.py | voroneril/pixi | aefe0e4d6b8799f98c959487d3501c17ecec42d8 | [
"Apache-2.0"
] | 1 | 2020-11-10T03:33:06.000Z | 2020-11-10T03:33:06.000Z | from pathlib import Path
from shutil import copyfile
from unittest import mock
import click
from click.testing import CliRunner
from pixivapi import BadApiResponse, LoginError, Visibility
from pixi.commands import (
_confirm_table_wipe,
_get_starting_bookmark_offset,
artist,
auth,
bookmarks,
config,
failed,
illust,
migrate,
wipe,
)
from pixi.database import Migration, database
from pixi.errors import DownloadFailed, PixiError
@mock.patch("pixi.commands.Config")
@mock.patch("pixi.commands.Client")
@mock.patch("pixi.commands.Config")
@mock.patch("pixi.commands.Client")
@mock.patch("click.edit")
@mock.patch("click.edit")
@mock.patch("pixi.commands.calculate_migrations_needed")
@mock.patch("pixi.commands.calculate_migrations_needed")
@mock.patch("pixi.commands.download_image")
@mock.patch("pixi.commands.Client")
@mock.patch("pixi.commands.Config")
@mock.patch("pixi.commands.download_image")
@mock.patch("pixi.commands.Client")
@mock.patch("pixi.commands.Config")
@mock.patch("pixi.commands.download_pages")
@mock.patch("pixi.commands.Client")
@mock.patch("pixi.commands.Config")
@mock.patch("pixi.commands.download_pages")
@mock.patch("pixi.commands.Client")
@mock.patch("pixi.commands.Config")
@mock.patch("pixi.commands.download_pages")
@mock.patch("pixi.commands.Client")
@mock.patch("pixi.commands.Config")
@mock.patch("pixi.commands._confirm_table_wipe")
@mock.patch("pixi.commands._confirm_table_wipe")
@mock.patch("pixi.commands._confirm_table_wipe")
| 33.258567 | 85 | 0.637411 | from pathlib import Path
from shutil import copyfile
from unittest import mock
import click
from click.testing import CliRunner
from pixivapi import BadApiResponse, LoginError, Visibility
from pixi.commands import (
_confirm_table_wipe,
_get_starting_bookmark_offset,
artist,
auth,
bookmarks,
config,
failed,
illust,
migrate,
wipe,
)
from pixi.database import Migration, database
from pixi.errors import DownloadFailed, PixiError
@mock.patch("pixi.commands.Config")
@mock.patch("pixi.commands.Client")
def test_auth_failure(client, _):
client.return_value.login.side_effect = LoginError
result = CliRunner().invoke(auth, ["-u", "u", "-p", "p"])
assert isinstance(result.exception, PixiError)
@mock.patch("pixi.commands.Config")
@mock.patch("pixi.commands.Client")
def test_auth_success(client, config):
client.return_value.refresh_token = "token value"
config_dict = {"pixi": {}}
config.return_value = config_dict
CliRunner().invoke(auth, ["-u", "u", "-p", "p"])
assert config_dict["pixi"]["refresh_token"] == "token value"
@mock.patch("click.edit")
def test_edit_config_completed(edit, monkeypatch):
runner = CliRunner()
with runner.isolated_filesystem():
config_path = Path.cwd() / "config.ini"
with config_path.open("w") as f:
f.write("a bunch of text")
monkeypatch.setattr("pixi.commands.CONFIG_PATH", config_path)
edit.return_value = "text2"
result = runner.invoke(config)
assert result.output == "Edit completed.\n"
assert edit.called_with("a bunch of text")
with config_path.open("r") as f:
assert "text2" == f.read()
@mock.patch("click.edit")
def test_edit_config_aborted(edit, monkeypatch):
runner = CliRunner()
with runner.isolated_filesystem():
config_path = Path.cwd() / "config.ini"
with config_path.open("w") as f:
f.write("a bunch of text")
monkeypatch.setattr("pixi.commands.CONFIG_PATH", config_path)
edit.return_value = None
result = runner.invoke(config)
assert result.output == "Edit aborted.\n"
with config_path.open("r") as f:
assert "a bunch of text" == f.read()
@mock.patch("pixi.commands.calculate_migrations_needed")
def test_migrate(calculate, monkeypatch):
runner = CliRunner()
with runner.isolated_filesystem():
fake_mig = Path.cwd() / "0001.sql"
with fake_mig.open("w") as f:
f.write("INSERT INTO test (id) VALUES (29)")
monkeypatch.setattr("pixi.database.DATABASE_PATH", Path.cwd() / "db.sqlite3")
with database() as (conn, cursor):
cursor.execute("CREATE TABLE test (id INTEGER PRIMARY KEY)")
cursor.execute("CREATE TABLE versions (version INTEGER PRIMARY KEY)")
conn.commit()
calculate.return_value = [Migration(path=fake_mig, version=9)]
runner.invoke(migrate)
with database() as (conn, cursor):
cursor.execute("SELECT version FROM versions")
assert 9 == cursor.fetchone()[0]
cursor.execute("SELECT id FROM test")
assert 29 == cursor.fetchone()[0]
@mock.patch("pixi.commands.calculate_migrations_needed")
def test_migrate_not_needed(calculate, monkeypatch):
runner = CliRunner()
with runner.isolated_filesystem():
monkeypatch.setattr("pixi.database.DATABASE_PATH", Path.cwd() / "db.sqlite3")
calculate.return_value = []
result = runner.invoke(migrate)
assert isinstance(result.exception, SystemExit)
@mock.patch("pixi.commands.download_image")
@mock.patch("pixi.commands.Client")
@mock.patch("pixi.commands.Config")
def test_illust(_, client, download_image):
client.return_value.fetch_illustration.return_value = "Illust!"
runner = CliRunner()
with runner.isolated_filesystem():
runner.invoke(
illust,
[
"--directory",
str(Path.cwd()),
"--no-track",
"--allow-duplicates",
"https://www.pixiv.net/member_illust.php?illust_id=12345",
],
)
client.return_value.fetch_illustration.assert_called_with(12345)
assert download_image.call_args[0][0] == "Illust!"
assert download_image.call_args[1]["directory"] == Path.cwd()
assert download_image.call_args[1]["allow_duplicate"] is True
assert download_image.call_args[1]["track_download"] is False
@mock.patch("pixi.commands.download_image")
@mock.patch("pixi.commands.Client")
@mock.patch("pixi.commands.Config")
def test_illust_error(_, __, download_image):
download_image.side_effect = BadApiResponse
result = CliRunner().invoke(illust, "12345")
assert isinstance(result.exception, DownloadFailed)
@mock.patch("pixi.commands.download_pages")
@mock.patch("pixi.commands.Client")
@mock.patch("pixi.commands.Config")
def test_artist(_, client, download_pages):
CliRunner().invoke(
artist,
[
"--page",
"372",
"https://www.pixiv.net/member.php?id=12345",
],
)
assert download_pages.call_args[1]["starting_offset"] == 371 * 30
download_pages.call_args[0][0](222)
fetch_user_illustrations = client.return_value.fetch_user_illustrations
fetch_user_illustrations.assert_called_with(12345, offset=222)
@mock.patch("pixi.commands.download_pages")
@mock.patch("pixi.commands.Client")
@mock.patch("pixi.commands.Config")
def test_bookmarks(_, client, download_pages):
CliRunner().invoke(bookmarks)
assert download_pages.call_count == 2
@mock.patch("pixi.commands.download_pages")
@mock.patch("pixi.commands.Client")
@mock.patch("pixi.commands.Config")
def test_bookmarks_with_visibility(_, client, download_pages):
CliRunner().invoke(bookmarks, ["--visibility", "public"])
assert download_pages.call_count == 1
client.return_value.account.id = 789
download_pages.call_args[0][0](10)
assert client.return_value.fetch_user_bookmarks.called_with(
user=789,
max_bookmark_id=10,
visibility=Visibility.PUBLIC,
tag=None,
)
def test_get_starting_bookmark_offset():
get_next_response = mock.Mock()
get_next_response.return_value = {"next": 831831}
assert 831831 == _get_starting_bookmark_offset(get_next_response, 2)
def test_get_starting_bookmark_offset_page_1():
get_next_response = mock.Mock()
get_next_response.return_value = {"next": 831831}
assert _get_starting_bookmark_offset(get_next_response, 1) is None
def test_failed(monkeypatch):
runner = CliRunner()
with runner.isolated_filesystem():
db_path = Path.cwd() / "db.sqlite3"
copyfile(Path(__file__).parent / "test.db", db_path)
monkeypatch.setattr("pixi.database.DATABASE_PATH", db_path)
with database() as (conn, cursor):
cursor.execute(
"""
INSERT INTO FAILED (id, artist, title, time)
VALUES (?, ?, ?, ?)
""",
(
20,
"testing",
"illustration",
"2019-01-01T01:23:45-04:00",
),
)
result = runner.invoke(failed)
assert result.output == (
"Jan 01, 2019 01:23:45 | testing - illustration\n"
"URL: https://www.pixiv.net/member_illust.php?mode=medium"
"&illust_id=20\n\n"
)
@mock.patch("pixi.commands._confirm_table_wipe")
def test_wipe(_, monkeypatch):
runner = CliRunner()
with runner.isolated_filesystem():
db_path = Path.cwd() / "db.sqlite3"
copyfile(Path(__file__).parent / "test.db", db_path)
monkeypatch.setattr("pixi.database.DATABASE_PATH", db_path)
with database() as (conn, cursor):
cursor.execute('INSERT INTO downloaded (id, path) VALUES (1, "a")')
cursor.execute(
'INSERT INTO FAILED (id, artist, title) VALUES (1, "a", "b")'
)
conn.commit()
runner.invoke(wipe, "--table=all")
with database() as (conn, cursor):
cursor.execute("SELECT 1 FROM downloaded")
assert not cursor.fetchone()
cursor.execute("SELECT 1 FROM failed")
assert not cursor.fetchone()
@mock.patch("pixi.commands._confirm_table_wipe")
def test_wipe_single(_, monkeypatch):
runner = CliRunner()
with runner.isolated_filesystem():
db_path = Path.cwd() / "db.sqlite3"
copyfile(Path(__file__).parent / "test.db", db_path)
monkeypatch.setattr("pixi.database.DATABASE_PATH", db_path)
with database() as (conn, cursor):
cursor.execute('INSERT INTO downloaded (id, path) VALUES (1, "a")')
cursor.execute(
'INSERT INTO FAILED (id, artist, title) VALUES (1, "a", "b")'
)
conn.commit()
runner.invoke(wipe, "--table=failed")
with database() as (conn, cursor):
cursor.execute("SELECT 1 FROM downloaded")
assert cursor.fetchone()
cursor.execute("SELECT 1 FROM failed")
assert not cursor.fetchone()
@mock.patch("pixi.commands._confirm_table_wipe")
def test_wipe_failed(confirm, monkeypatch):
runner = CliRunner()
with runner.isolated_filesystem():
db_path = Path.cwd() / "db.sqlite3"
copyfile(Path(__file__).parent / "test.db", db_path)
monkeypatch.setattr("pixi.database.DATABASE_PATH", db_path)
confirm.side_effect = click.Abort
with database() as (conn, cursor):
cursor.execute('INSERT INTO downloaded (id, path) VALUES (1, "a")')
cursor.execute(
'INSERT INTO FAILED (id, artist, title) VALUES (1, "a", "b")'
)
conn.commit()
result = runner.invoke(wipe, "--table=all")
assert isinstance(result.exception, SystemExit)
with database() as (conn, cursor):
cursor.execute("SELECT 1 FROM downloaded")
assert cursor.fetchone()
cursor.execute("SELECT 1 FROM failed")
assert cursor.fetchone()
def test_confirm_table_wipe():
result = CliRunner().invoke(
click.command()(lambda: _confirm_table_wipe("table")),
input="table",
)
assert not result.exception
def test_confirm_table_wipe_fail():
result = CliRunner().invoke(
click.command()(lambda: _confirm_table_wipe("table")),
input="not table",
)
assert isinstance(result.exception, SystemExit)
| 8,711 | 0 | 423 |
4fd3ea83be0511369dac144b43bafc6128ea8267 | 4,873 | py | Python | src/beast.py | yotamfr/skempi | 9e5dbb7661a36c973edb0e94cf8bfe843f839e66 | [
"MIT"
] | 1 | 2021-11-08T14:16:40.000Z | 2021-11-08T14:16:40.000Z | src/beast.py | yotamfr/skempi | 9e5dbb7661a36c973edb0e94cf8bfe843f839e66 | [
"MIT"
] | 16 | 2019-12-16T21:16:26.000Z | 2022-03-11T23:33:34.000Z | src/beast.py | yotamfr/skempi | 9e5dbb7661a36c973edb0e94cf8bfe843f839e66 | [
"MIT"
] | null | null | null | import torch
from torch import nn
from torch import optim
from vae import *
from loader import *
from skempi_lib import *
from torch_utils import *
BATCH_SIZE = 32
LR = 1e-3
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
net = VAE2(nc=24, ngf=64, ndf=64, latent_variable_size=256)
net.to(device)
# opt = optim.SGD(net.parameters(), lr=LR, momentum=0.9, nesterov=True)
opt = ScheduledOptimizer(optim.Adam(net.parameters(), lr=LR), LR, num_iterations=200)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '%s'" % args.resume)
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage)
init_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['net'])
opt.load_state_dict(checkpoint['opt'])
else:
print("=> no checkpoint found at '%s'" % args.resume)
num_epochs = args.num_epochs
init_epoch = 0
n_iter = 0
for epoch in range(init_epoch, num_epochs):
train_iter, eval_iter = 20000, 5000
loader = pdb_loader(PDB_ZIP, TRAINING_SET, train_iter, 19.9, 1.25, handle_error=handle_error)
n_iter = train(net, opt, batch_generator(loader, BATCH_SIZE), train_iter, n_iter)
if epoch < num_epochs - 1 and epoch % args.eval_every != 0:
continue
loader = pdb_loader(PDB_ZIP, VALIDATION_SET, eval_iter, 19.9, 1.25, handle_error=handle_error)
loss = evaluate(net, batch_generator(loader, BATCH_SIZE), eval_iter, n_iter)
print("[Epoch %d/%d] (Validation Loss: %.5f" % (epoch + 1, num_epochs, loss))
save_checkpoint({
'lr': opt.lr,
'epoch': epoch,
'net': net.state_dict(),
'opt': opt.state_dict()
}, loss, "beast", args.out_dir)
| 34.316901 | 102 | 0.601683 | import torch
from torch import nn
from torch import optim
from vae import *
from loader import *
from skempi_lib import *
from torch_utils import *
BATCH_SIZE = 32
LR = 1e-3
def get_loss(aa_hat, aa, x_hat, x, mu, logvar):
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
L1 = nn.L1Loss(reduction='sum')(x_hat, x)
# BCE = nn.BCELoss(reduction='sum')(x_hat, x)
CE = -F.log_softmax(aa_hat, 1).gather(1, aa.unsqueeze(1)).mean()
return L1, KLD, CE
def train(model, opt, batch_generator, length_xy, n_iter):
model.train()
pbar = tqdm(total=length_xy, desc="calculating...")
err = 0.
for i, (aa, x, y) in enumerate(batch_generator):
opt.zero_grad()
aa_hat, y_hat, mu, logvar = model(x)
recons, kld, ce = get_loss(aa_hat, aa, y_hat, y, mu, logvar)
# loss = recons + kld + ce
loss = ce
writer.add_scalars('VAE/Loss', {"train": loss.item()}, n_iter)
writer.add_scalars('VAE/Recons', {"train": recons.item()}, n_iter)
writer.add_scalars('VAE/KLD', {"train": kld.item()}, n_iter)
writer.add_scalars('VAE/CE', {"train": ce.item()}, n_iter)
n_iter += 1
err += loss.item()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.25, norm_type=2)
loss.backward()
opt.step_and_update_lr(loss.item())
lr, e = opt.lr, err/(i + 1)
pbar.set_description("Training Loss:%.5f, LR: %.5f" % (e, lr))
pbar.update(len(y))
pbar.close()
return n_iter
def evaluate(model, batch_generator, length_xy, n_iter):
model.eval()
pbar = tqdm(total=length_xy, desc="calculation...")
err, loss1, loss2, loss3 = 0., 0., 0., 0.
for i, (aa, x, y) in enumerate(batch_generator):
aa_hat, y_hat, mu, logvar = model(x)
recons, kld, ce = get_loss(aa_hat, aa, y_hat, y, mu, logvar)
# loss = recons + kld + ce
loss = ce
err += loss.item()
loss1 += recons.item()
loss2 += kld.item()
loss3 += ce.item()
pbar.set_description("Validation Loss:%.5f" % (err/(i + 1),))
pbar.update(len(y))
writer.add_scalars('VAE/Loss', {"valid": err/(i + 1)}, n_iter)
writer.add_scalars('VAE/Recons', {"valid": loss1/(i + 1)}, n_iter)
writer.add_scalars('VAE/KLD', {"valid": loss2/(i + 1)}, n_iter)
writer.add_scalars('VAE/CE', {"valid": loss3/(i + 1)}, n_iter)
pbar.close()
return err/(i + 1)
def add_arguments(parser):
parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument("-e", "--eval_every", type=int, default=1,
help="How often to evaluate on the validation set.")
parser.add_argument('-n', "--num_epochs", type=int, default=200,
help="How many epochs to train the model?")
parser.add_argument("-o", "--out_dir", type=str, required=False,
default=gettempdir(), help="Specify the output directory.")
def handle_error(pdb, err):
print(pdb, str(err))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
add_arguments(parser)
args = parser.parse_args()
net = VAE2(nc=24, ngf=64, ndf=64, latent_variable_size=256)
net.to(device)
# opt = optim.SGD(net.parameters(), lr=LR, momentum=0.9, nesterov=True)
opt = ScheduledOptimizer(optim.Adam(net.parameters(), lr=LR), LR, num_iterations=200)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '%s'" % args.resume)
checkpoint = torch.load(args.resume, map_location=lambda storage, loc: storage)
init_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['net'])
opt.load_state_dict(checkpoint['opt'])
else:
print("=> no checkpoint found at '%s'" % args.resume)
num_epochs = args.num_epochs
init_epoch = 0
n_iter = 0
for epoch in range(init_epoch, num_epochs):
train_iter, eval_iter = 20000, 5000
loader = pdb_loader(PDB_ZIP, TRAINING_SET, train_iter, 19.9, 1.25, handle_error=handle_error)
n_iter = train(net, opt, batch_generator(loader, BATCH_SIZE), train_iter, n_iter)
if epoch < num_epochs - 1 and epoch % args.eval_every != 0:
continue
loader = pdb_loader(PDB_ZIP, VALIDATION_SET, eval_iter, 19.9, 1.25, handle_error=handle_error)
loss = evaluate(net, batch_generator(loader, BATCH_SIZE), eval_iter, n_iter)
print("[Epoch %d/%d] (Validation Loss: %.5f" % (epoch + 1, num_epochs, loss))
save_checkpoint({
'lr': opt.lr,
'epoch': epoch,
'net': net.state_dict(),
'opt': opt.state_dict()
}, loss, "beast", args.out_dir)
| 2,826 | 0 | 115 |
e5c1d44e5f230d87e88a537feae2bd6b367c70b9 | 898 | py | Python | scripts/query_all_orders.py | egorsimchuk/binance_bot | af1caac32f8d4804aea3af83250fd4530d9787df | [
"Unlicense"
] | 1 | 2022-03-13T01:07:08.000Z | 2022-03-13T01:07:08.000Z | scripts/query_all_orders.py | egorsimchuk/binance_bot | af1caac32f8d4804aea3af83250fd4530d9787df | [
"Unlicense"
] | 4 | 2022-02-20T10:33:45.000Z | 2022-03-03T22:59:24.000Z | scripts/query_all_orders.py | egorsimchuk/binance_bot | af1caac32f8d4804aea3af83250fd4530d9787df | [
"Unlicense"
] | null | null | null | import argparse
from src.client.client import ClientHelper
import logging
from src.data.orders_handler import load_and_process
from src.data.preprocessing.orders import OrdersProcessor
from src.utils.logging import log_format, log_level
logging.basicConfig(format=log_format, level=log_level)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('api_key', type=str, help='Key from binance profile')
parser.add_argument('api_secret', type=str, help='Secret key from binance profile')
parser.add_argument('open_file', type=int, nargs='?', default=1, choices=[0,1], help='Open html report after creating')
args = parser.parse_args()
client_helper = ClientHelper(args.api_key, args.api_secret)
orders_processor = OrdersProcessor(client_helper=client_helper)
data = load_and_process(client_helper, orders_processor)
print(data.shape)
| 42.761905 | 123 | 0.77951 | import argparse
from src.client.client import ClientHelper
import logging
from src.data.orders_handler import load_and_process
from src.data.preprocessing.orders import OrdersProcessor
from src.utils.logging import log_format, log_level
logging.basicConfig(format=log_format, level=log_level)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('api_key', type=str, help='Key from binance profile')
parser.add_argument('api_secret', type=str, help='Secret key from binance profile')
parser.add_argument('open_file', type=int, nargs='?', default=1, choices=[0,1], help='Open html report after creating')
args = parser.parse_args()
client_helper = ClientHelper(args.api_key, args.api_secret)
orders_processor = OrdersProcessor(client_helper=client_helper)
data = load_and_process(client_helper, orders_processor)
print(data.shape)
| 0 | 0 | 0 |
d69331e986956345298d242355aaa1cd0eefe66f | 138 | py | Python | day13/aoc-day13.py | SebastiaanZ/aoc-2018 | fb4d6db2ed592fa17554c80531384afdc0c34180 | [
"MIT"
] | 1 | 2019-03-16T19:20:11.000Z | 2019-03-16T19:20:11.000Z | day13/aoc-day13.py | SebastiaanZ/aoc-2018 | fb4d6db2ed592fa17554c80531384afdc0c34180 | [
"MIT"
] | null | null | null | day13/aoc-day13.py | SebastiaanZ/aoc-2018 | fb4d6db2ed592fa17554c80531384afdc0c34180 | [
"MIT"
] | null | null | null | from railroads import Track
track = Track("day13-input.txt")
track.run_partone()
track2 = Track("day13-input.txt")
track2.run_parttwo()
| 17.25 | 33 | 0.753623 | from railroads import Track
track = Track("day13-input.txt")
track.run_partone()
track2 = Track("day13-input.txt")
track2.run_parttwo()
| 0 | 0 | 0 |
ac305c92f5ce9e62d182f5076f44c90f41ffa6af | 4,339 | py | Python | side_projects/extract_chr16_cpg/extract_location_and_context_to_csv.py | methylgrammarlab/proj_scwgbs | 287196898796eb617fef273bfaf9e978a57047dc | [
"MIT"
] | null | null | null | side_projects/extract_chr16_cpg/extract_location_and_context_to_csv.py | methylgrammarlab/proj_scwgbs | 287196898796eb617fef273bfaf9e978a57047dc | [
"MIT"
] | null | null | null | side_projects/extract_chr16_cpg/extract_location_and_context_to_csv.py | methylgrammarlab/proj_scwgbs | 287196898796eb617fef273bfaf9e978a57047dc | [
"MIT"
] | null | null | null | """
Extract information about chr16 for several patients for ben
"""
import pandas as pd
crc01_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC01\all_cpg_ratios_CRC01_chr16.dummy.pkl.zip"
crc11_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC11" \
r"\all_cpg_ratios_CRC11_chr16.dummy.pkl.zip"
crc13_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC13" \
r"\all_cpg_ratios_CRC13_chr16.dummy.pkl.zip"
crc02_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC02" \
r"\all_cpg_ratios_CRC02_chr16.dummy.pkl.zip"
crc04_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC04" \
r"\all_cpg_ratios_CRC04_chr16.dummy.pkl.zip"
crc09_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC09" \
r"\all_cpg_ratios_CRC09_chr16.dummy.pkl.zip"
crc10_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC10" \
r"\all_cpg_ratios_CRC10_chr16.dummy.pkl.zip"
crc12_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC12" \
r"\all_cpg_ratios_CRC12_chr16.dummy.pkl.zip"
crc14_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC14" \
r"\all_cpg_ratios_CRC14_chr16.dummy.pkl.zip"
crc15_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC15" \
r"\all_cpg_ratios_CRC15_chr16.dummy.pkl.zip"
valid_path = r"H:\Study\university\Computational-Biology\Year 3\Projects\proj_scwgbs\covariance\valid_cpg.pkl"
if __name__ == '__main__':
valid_data = pd.read_pickle(valid_path)
valid_data = valid_data[valid_data["chromosome"] == "16"]
valid_data["small_seq"] = valid_data["sequence"].str[73:77]
cpg1 = valid_data[valid_data["sequence"].str.count("CG") == 1]
cpg1["context"] = "other"
cpg1.loc[cpg1["small_seq"].str.contains("[AT]CG[AT]", regex=True), "context"] = "WCGW"
cpg1.loc[cpg1["small_seq"].str.contains("[CG]CG[CG]", regex=True), "context"] = "SCGS"
only_needed = cpg1[["small_seq", "sequence", "context"]]
only_needed = only_needed.transpose()
only_needed.to_csv("info.csv")
# crc01 = pd.read_pickle(crc01_path)
# good = crc01[cpg1["location"]]
# good.to_csv("crc01.csv")
#
# crc11 = pd.read_pickle(crc11_path)
# good = crc11[cpg1["location"]]
# good.to_csv("crc11.csv")
#
# crc13 = pd.read_pickle(crc13_path)
# good = crc13[cpg1["location"]]
# good.to_csv("crc13.csv")
# rows = good.index.values
# columns = list(good.columns.values)
# data = good.values
# data_added = np.vstack((data, cpg1["small_seq"]))
# data_added = np.vstack((data_added, cpg1["context"]))
# df = pd.DataFrame(data=data_added, index=columns + ["small_seq", "context"], columns=columns)
crc02 = pd.read_pickle(crc02_path)
good = crc02[cpg1["location"]]
good.to_csv("crc02.csv")
crc04 = pd.read_pickle(crc04_path)
good = crc04[cpg1["location"]]
good.to_csv("crc04.csv")
crc09 = pd.read_pickle(crc09_path)
good = crc09[cpg1["location"]]
good.to_csv("crc09.csv")
crc10 = pd.read_pickle(crc10_path)
good = crc10[cpg1["location"]]
good.to_csv("crc10.csv")
crc12 = pd.read_pickle(crc12_path)
good = crc12[cpg1["location"]]
good.to_csv("crc12.csv")
crc14 = pd.read_pickle(crc14_path)
good = crc14[cpg1["location"]]
good.to_csv("crc14.csv")
crc15 = pd.read_pickle(crc15_path)
good = crc15[cpg1["location"]]
good.to_csv("crc15.csv")
| 41.721154 | 129 | 0.690943 | """
Extract information about chr16 for several patients for ben
"""
import pandas as pd
crc01_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC01\all_cpg_ratios_CRC01_chr16.dummy.pkl.zip"
crc11_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC11" \
r"\all_cpg_ratios_CRC11_chr16.dummy.pkl.zip"
crc13_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC13" \
r"\all_cpg_ratios_CRC13_chr16.dummy.pkl.zip"
crc02_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC02" \
r"\all_cpg_ratios_CRC02_chr16.dummy.pkl.zip"
crc04_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC04" \
r"\all_cpg_ratios_CRC04_chr16.dummy.pkl.zip"
crc09_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC09" \
r"\all_cpg_ratios_CRC09_chr16.dummy.pkl.zip"
crc10_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC10" \
r"\all_cpg_ratios_CRC10_chr16.dummy.pkl.zip"
crc12_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC12" \
r"\all_cpg_ratios_CRC12_chr16.dummy.pkl.zip"
crc14_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC14" \
r"\all_cpg_ratios_CRC14_chr16.dummy.pkl.zip"
crc15_path = r"H:\Study\university\Computational-Biology\Year " \
r"3\Projects\proj_scwgbs\resource\cpg_format\filtered_by_bl_and_cpgi\CRC15" \
r"\all_cpg_ratios_CRC15_chr16.dummy.pkl.zip"
valid_path = r"H:\Study\university\Computational-Biology\Year 3\Projects\proj_scwgbs\covariance\valid_cpg.pkl"
if __name__ == '__main__':
valid_data = pd.read_pickle(valid_path)
valid_data = valid_data[valid_data["chromosome"] == "16"]
valid_data["small_seq"] = valid_data["sequence"].str[73:77]
cpg1 = valid_data[valid_data["sequence"].str.count("CG") == 1]
cpg1["context"] = "other"
cpg1.loc[cpg1["small_seq"].str.contains("[AT]CG[AT]", regex=True), "context"] = "WCGW"
cpg1.loc[cpg1["small_seq"].str.contains("[CG]CG[CG]", regex=True), "context"] = "SCGS"
only_needed = cpg1[["small_seq", "sequence", "context"]]
only_needed = only_needed.transpose()
only_needed.to_csv("info.csv")
# crc01 = pd.read_pickle(crc01_path)
# good = crc01[cpg1["location"]]
# good.to_csv("crc01.csv")
#
# crc11 = pd.read_pickle(crc11_path)
# good = crc11[cpg1["location"]]
# good.to_csv("crc11.csv")
#
# crc13 = pd.read_pickle(crc13_path)
# good = crc13[cpg1["location"]]
# good.to_csv("crc13.csv")
# rows = good.index.values
# columns = list(good.columns.values)
# data = good.values
# data_added = np.vstack((data, cpg1["small_seq"]))
# data_added = np.vstack((data_added, cpg1["context"]))
# df = pd.DataFrame(data=data_added, index=columns + ["small_seq", "context"], columns=columns)
crc02 = pd.read_pickle(crc02_path)
good = crc02[cpg1["location"]]
good.to_csv("crc02.csv")
crc04 = pd.read_pickle(crc04_path)
good = crc04[cpg1["location"]]
good.to_csv("crc04.csv")
crc09 = pd.read_pickle(crc09_path)
good = crc09[cpg1["location"]]
good.to_csv("crc09.csv")
crc10 = pd.read_pickle(crc10_path)
good = crc10[cpg1["location"]]
good.to_csv("crc10.csv")
crc12 = pd.read_pickle(crc12_path)
good = crc12[cpg1["location"]]
good.to_csv("crc12.csv")
crc14 = pd.read_pickle(crc14_path)
good = crc14[cpg1["location"]]
good.to_csv("crc14.csv")
crc15 = pd.read_pickle(crc15_path)
good = crc15[cpg1["location"]]
good.to_csv("crc15.csv")
| 0 | 0 | 0 |
59e12014ee9c4a44c159a0e0cd44aac722333c28 | 1,509 | py | Python | setup.py | SUNET/eduid-queue | e7d090978220a4beaf61e5d893233120d8e79cdd | [
"BSD-2-Clause"
] | null | null | null | setup.py | SUNET/eduid-queue | e7d090978220a4beaf61e5d893233120d8e79cdd | [
"BSD-2-Clause"
] | null | null | null | setup.py | SUNET/eduid-queue | e7d090978220a4beaf61e5d893233120d8e79cdd | [
"BSD-2-Clause"
] | null | null | null | from pathlib import PurePath
from typing import List
from setuptools import find_packages, setup
version = '0.0.4'
def load_requirements(path: PurePath) -> List[str]:
""" Load dependencies from a requirements.txt style file, ignoring comments etc. """
res = []
with open(path) as fd:
for line in fd.readlines():
while line.endswith('\n') or line.endswith('\\'):
line = line[:-1]
line = line.strip()
if not line or line.startswith('-') or line.startswith('#'):
continue
res += [line]
return res
here = PurePath(__file__)
README = open(here.with_name('README.md')).read()
install_requires = load_requirements(here.with_name('requirements.txt'))
test_requires = load_requirements(here.with_name('test_requirements.txt'))
setup(
name='eduid-queue',
version=version,
packages=find_packages('src'),
package_dir={'': 'src'},
url='https://github.com/sunet/eduid-queue',
license='BSD-2-Clause',
keywords='eduid',
author='Johan Lundberg',
author_email='lundberg@sunet.se',
description='MongoDB based task queue',
install_requires=install_requires,
test_requires=test_requires,
extras_require={'testing': [],
'client': load_requirements(here.with_name('client_requirements.txt')),
},
include_package_data=True,
entry_points={'console_scripts': ['run-mail-worker=eduid_queue.workers.mail:start_worker',],},
)
| 31.4375 | 98 | 0.648774 | from pathlib import PurePath
from typing import List
from setuptools import find_packages, setup
version = '0.0.4'
def load_requirements(path: PurePath) -> List[str]:
""" Load dependencies from a requirements.txt style file, ignoring comments etc. """
res = []
with open(path) as fd:
for line in fd.readlines():
while line.endswith('\n') or line.endswith('\\'):
line = line[:-1]
line = line.strip()
if not line or line.startswith('-') or line.startswith('#'):
continue
res += [line]
return res
here = PurePath(__file__)
README = open(here.with_name('README.md')).read()
install_requires = load_requirements(here.with_name('requirements.txt'))
test_requires = load_requirements(here.with_name('test_requirements.txt'))
setup(
name='eduid-queue',
version=version,
packages=find_packages('src'),
package_dir={'': 'src'},
url='https://github.com/sunet/eduid-queue',
license='BSD-2-Clause',
keywords='eduid',
author='Johan Lundberg',
author_email='lundberg@sunet.se',
description='MongoDB based task queue',
install_requires=install_requires,
test_requires=test_requires,
extras_require={'testing': [],
'client': load_requirements(here.with_name('client_requirements.txt')),
},
include_package_data=True,
entry_points={'console_scripts': ['run-mail-worker=eduid_queue.workers.mail:start_worker',],},
)
| 0 | 0 | 0 |
939dd53c6999e793bc3ce2b6e8b9689a8b6c18aa | 3,451 | py | Python | fft_fluid_solver.py | 0xrabbyte/taichi_simple_fluid_solver | 992924edeee66a74e747b4503fa381637eabf03f | [
"MIT"
] | 3 | 2021-12-16T04:58:13.000Z | 2021-12-21T12:43:31.000Z | fft_fluid_solver.py | 0xrabbyte/taichi_simple_fluid_solver | 992924edeee66a74e747b4503fa381637eabf03f | [
"MIT"
] | null | null | null | fft_fluid_solver.py | 0xrabbyte/taichi_simple_fluid_solver | 992924edeee66a74e747b4503fa381637eabf03f | [
"MIT"
] | null | null | null | from numpy.core.fromnumeric import shape
import taichi as ti
import numpy as np
lin_iters = 20
N = 64
dt = 0.1
diff = 0.0
visc = 0.0
force = 5e5
source = 100.0
dvel = False
v = ti.Vector.field(2, float, shape=(N + 2, N + 2), offset = (-1, -1))
v_prev = ti.Vector.field(2, float, shape=(N + 2, N + 2), offset = (-1, -1))
dens = ti.field(float, shape=(N + 2, N + 2), offset = (-1, -1))
dens_prev = ti.field(float, shape=(N + 2, N + 2), offset = (-1, -1))
div = ti.field(float, shape=(N + 2, N + 2), offset = (-1, -1))
p = ti.field(float, shape=(N + 2, N + 2), offset = (-1, -1))
pixels = ti.field(float, shape=(N, N))
@ti.kernel
@ti.kernel
@ti.func
@ti.kernel
@ti.kernel
| 31.372727 | 102 | 0.454361 | from numpy.core.fromnumeric import shape
import taichi as ti
import numpy as np
lin_iters = 20
N = 64
dt = 0.1
diff = 0.0
visc = 0.0
force = 5e5
source = 100.0
dvel = False
v = ti.Vector.field(2, float, shape=(N + 2, N + 2), offset = (-1, -1))
v_prev = ti.Vector.field(2, float, shape=(N + 2, N + 2), offset = (-1, -1))
dens = ti.field(float, shape=(N + 2, N + 2), offset = (-1, -1))
dens_prev = ti.field(float, shape=(N + 2, N + 2), offset = (-1, -1))
div = ti.field(float, shape=(N + 2, N + 2), offset = (-1, -1))
p = ti.field(float, shape=(N + 2, N + 2), offset = (-1, -1))
pixels = ti.field(float, shape=(N, N))
@ti.kernel
def add_source(a : ti.template(), b : ti.template()):
for i, j in a:
a[i, j] += dt * b[i, j]
@ti.kernel
def swap(a : ti.template(), b : ti.template()):
for i, j in a:
a[i, j], b[i, j] = b[i, j], a[i, j]
@ti.func
def set_bnd(x : ti.template()):
for i in range(N):
x[-1, i] = x[0, i]
x[N, i] = x[N - 1, i]
x[i, -1] = x[i, 0]
x[i, N] = x[i, N - 1]
x[-1, -1] = (x[0, -1] + x[-1, 0]) / 2.0
x[-1, N] = (x[0, N] + x[-1, N - 1]) / 2.0
x[N, -1] = (x[N - 1, -1] + x[N, 0]) / 2.0
x[N, N] = (x[N - 1, N] + x[N, N - 1]) / 2.0
@ti.kernel
def lin_solve(x : ti.template(), x0 : ti.template(), a : float, c : float):
for i, j in ti.ndrange(N, N):
x[i, j] = (x0[i, j] + a * (x[i - 1, j] + x[i + 1, j] + x[i, j - 1] + x[i, j + 1])) / c
set_bnd(x)
def diffuse(a, a_prev, diff):
k = dt * diff * N * N
for t in range(lin_iters):
lin_solve(a, a_prev, k, 1.0 + 4.0 * k)
@ti.kernel
def advect(d : ti.template(), d0 : ti.template(), v : ti.template() ):
dt0 = dt * N
for i, j in ti.ndrange(N, N):
x, y = i - dt0 * v[i, j][0], j - dt0 * v[i, j][1]
if (x < 0.5): x = 0.5
if (x > N + 0.5): x = N + 0.5
i0, i1 = int(x), int(x) + 1
if (y < 0.5): y = 0.5
if (y > N + 0.5): y = N + 0.5
j0, j1 = int(y), int(y) + 1
s1, s0, t1, t0 = x - i0, i1 - x, y - j0, j1 - y
d[i, j] = s0 * (t0 * d0[i0, j0] + t1 * d0[i0, j1]) + s1 * (t0 * d0[i1, j0] + t1 * d0[i1, j1])
set_bnd(d)
def fft_project(v):
u0 = np.zeros(shape = (N + 2, N))
v0 = np.zeros(shape = (N + 2, N))
for i, j in ti.ndrange(N, N):
u0[i, j], v0[i, j] = v[i, j][0], v[i, j][1]
u0 = np.fft.fft2(u0)
v0 = np.fft.fft2(v0)
for i, j in ti.ndrange(N + 2, N):
x, y = i, j
if j > N // 2 : j = j - N
r = x * x + y * y
if (r == 0.0): continue
f = ti.exp(-r*dt*visc)
U, V = u0[i,j], v0[i,j]
u0[i, j] = f * np.complex((1-x*x/r)*U.real+(-x*y/r)*V.real, (1-x*x/r)*U.imag+(-x*y/r)*V.imag)
v0[i, j] = f * np.complex((-y*x/r)*U.real+(1-y*y/r)*V.real,(-y*x/r)*U.imag+(1-y*y/r)*V.imag)
u0 = np.fft.ifft2(u0)
v0 = np.fft.ifft2(v0)
f = 1.0/(N*N)
for i, j in ti.ndrange(N, N):
v[i, j][0], v[i, j][1] = f * u0[i, j], f * v0[i, j]
print("Okay")
def dens_step():
add_source(dens, dens_prev)
swap(dens, dens_prev)
diffuse(dens, dens_prev, diff)
swap(dens, dens_prev)
advect(dens, dens_prev, v)
def vel_step():
add_source(v, v_prev)
swap(v, v_prev)
diffuse(v, v_prev, visc)
fft_project(v)
swap(v, v_prev)
advect(v, v_prev, v_prev)
fft_project(v) | 2,526 | 0 | 214 |
0a242dfa84979d870503e1a938700b15f2f94260 | 1,000 | py | Python | helpers/populate_zones.py | qbrc-cnap/cnap | 624683e91a64c3b4934b578c59db850242d2f94c | [
"MIT"
] | 1 | 2021-07-08T14:06:04.000Z | 2021-07-08T14:06:04.000Z | helpers/populate_zones.py | qbrc-cnap/cnap | 624683e91a64c3b4934b578c59db850242d2f94c | [
"MIT"
] | 12 | 2020-02-12T00:10:53.000Z | 2021-06-10T21:24:45.000Z | helpers/populate_zones.py | qbrc-cnap/cnap | 624683e91a64c3b4934b578c59db850242d2f94c | [
"MIT"
] | null | null | null | import sys
import os
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.realpath(os.pardir))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cnap_v2.settings')
import django
from django.conf import settings
django.setup()
from base.models import AvailableZones, CurrentZone
if __name__ == '__main__':
if settings.CONFIG_PARAMS['cloud_environment'] == settings.GOOGLE:
default_zone = settings.CONFIG_PARAMS['default_google_zone']
avail_zones_csv = settings.CONFIG_PARAMS['available_google_zones']
avail_zones = [x.strip() for x in avail_zones_csv.split(',')]
for z in avail_zones:
a = AvailableZones.objects.create(cloud_environment=settings.GOOGLE, zone=z)
a.save()
dz = AvailableZones.objects.get(zone=default_zone)
c = CurrentZone.objects.create(zone=dz)
c.save()
else:
print('Only Google-related settings have been implemented so far. Exiting.')
sys.exit(1)
| 33.333333 | 88 | 0.708 | import sys
import os
os.chdir(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.realpath(os.pardir))
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'cnap_v2.settings')
import django
from django.conf import settings
django.setup()
from base.models import AvailableZones, CurrentZone
if __name__ == '__main__':
if settings.CONFIG_PARAMS['cloud_environment'] == settings.GOOGLE:
default_zone = settings.CONFIG_PARAMS['default_google_zone']
avail_zones_csv = settings.CONFIG_PARAMS['available_google_zones']
avail_zones = [x.strip() for x in avail_zones_csv.split(',')]
for z in avail_zones:
a = AvailableZones.objects.create(cloud_environment=settings.GOOGLE, zone=z)
a.save()
dz = AvailableZones.objects.get(zone=default_zone)
c = CurrentZone.objects.create(zone=dz)
c.save()
else:
print('Only Google-related settings have been implemented so far. Exiting.')
sys.exit(1)
| 0 | 0 | 0 |
3df290dfe185dd739a6cc9a5cc23a766723dd5a9 | 2,163 | py | Python | reward/space/continuous.py | lgvaz/torchrl | cfff8acaf70d1fec72169162b95ab5ad3547d17a | [
"MIT"
] | 5 | 2018-06-21T14:33:40.000Z | 2018-08-18T02:26:03.000Z | reward/space/continuous.py | lgvaz/reward | cfff8acaf70d1fec72169162b95ab5ad3547d17a | [
"MIT"
] | null | null | null | reward/space/continuous.py | lgvaz/reward | cfff8acaf70d1fec72169162b95ab5ad3547d17a | [
"MIT"
] | 2 | 2018-05-08T03:34:49.000Z | 2018-06-22T15:04:17.000Z | import torch
import numpy as np, reward.utils as U
from pathlib import Path
from .space import Space
| 35.459016 | 96 | 0.66528 | import torch
import numpy as np, reward.utils as U
from pathlib import Path
from .space import Space
class Continuous(Space):
def __init__(self, low=None, high=None, shape=None):
low, high = np.array(low), np.array(high)
assert low.shape == high.shape if shape is None else True
shape = shape or low.shape
self.shape, self.dtype = shape, np.float32
self.low = low + np.zeros(self.shape, dtype=self.dtype)
self.high = high + np.zeros(self.shape, dtype=self.dtype)
def __repr__(self): return f'Continuous(shape={self.shape},low={self.low},high={self.high})'
def __call__(self, arr): return ContinuousObj(arr=arr)
def from_list(self, arrs): return ContinuousObj.from_list(arrs=arrs)
def sample(self): return np.random.uniform(low=self.low, high=self.high, size=self.shape)
class ContinuousObj:
sig = Continuous
def __init__(self, arr): self.arr = np.array(arr, dtype='float')
def __repr__(self): return f'Continuous({self.arr.__repr__()})'
@property
def shape(self): return self.arr.shape
def __array__(self): return np.array(self.arr, dtype='float', copy=False)
def to_tensor(self): return U.tensor(np.array(self), dtype=torch.float)
def apply_tfms(self, tfms, priority=True):
if priority: tfms = sorted(U.listify(tfms), key=lambda o: o.priority, reverse=True)
x = self.arr.copy()
for tfm in tfms: x = tfm(x)
return self.__class__(arr=x)
@staticmethod
def from_list(arrs): return ContinuousList(arrs=arrs)
class ContinuousList:
sig = Continuous
def __init__(self, arrs): self.arrs = arrs
def __array__(self): return np.array([o.arr for o in self.arrs], dtype='float', copy=False)
def to_tensor(self): return U.tensor(np.array(self), dtype=torch.float)
def unpack(self): return self.arrs
def save(self, savedir, postfix=''):
np.save(Path(savedir)/f'cont_{postfix}.npy', np.array(self))
@classmethod
def load(cls, loaddir, postfix=''):
arr = np.load(Path(loaddir)/f'cont_{postfix}.npy')
return cls([ContinuousObj(o) for o in arr])
| 1,395 | 454 | 202 |
5223cec5815747015e2cf337d69de35d96fdb7b8 | 2,129 | py | Python | household/migrations/0002_auto_20200303_1532.py | desafinadude/municipal-data | 1c86c55bbb59f9c8087f6920fae3585dd90d5d43 | [
"MIT"
] | 19 | 2018-01-09T10:54:15.000Z | 2022-01-25T13:10:55.000Z | household/migrations/0002_auto_20200303_1532.py | desafinadude/municipal-data | 1c86c55bbb59f9c8087f6920fae3585dd90d5d43 | [
"MIT"
] | 29 | 2018-01-12T12:12:38.000Z | 2022-01-31T15:30:36.000Z | household/migrations/0002_auto_20200303_1532.py | desafinadude/municipal-data | 1c86c55bbb59f9c8087f6920fae3585dd90d5d43 | [
"MIT"
] | 13 | 2018-02-11T02:12:57.000Z | 2021-11-22T11:03:22.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-03-03 13:32
from __future__ import unicode_literals
from django.db import migrations
| 41.745098 | 70 | 0.753405 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-03-03 13:32
from __future__ import unicode_literals
from django.db import migrations
def add_financial_year(apps, schema_editor):
FinancialYear = apps.get_model('household', 'FinancialYear')
FinancialYear.objects.create(budget_year="2015/2016")
FinancialYear.objects.create(budget_year="2016/2017")
FinancialYear.objects.create(budget_year="2017/2018")
FinancialYear.objects.create(budget_year='2018/2019')
FinancialYear.objects.create(budget_year='2019/2020')
FinancialYear.objects.create(budget_year='2020/2021')
def add_budget_phase(apps, schema_editor):
BudgetPhase = apps.get_model('household', 'BudgetPhase')
BudgetPhase.objects.create(name='Audited Outcome')
BudgetPhase.objects.create(name='Original Budget')
BudgetPhase.objects.create(name='Adjusted Budget')
BudgetPhase.objects.create(name='Budget Year')
def add_class(apps, schema_editor):
HouseholdClass = apps.get_model('household', 'HouseholdClass')
HouseholdClass.objects.create(name="Middle Income Range")
HouseholdClass.objects.create(name="Affordable Range")
HouseholdClass.objects.create(name='Indigent HH receiving FBS')
def add_service(apps, schema_editor):
HouseholdService = apps.get_model('household', 'HouseholdService')
HouseholdService.objects.create(name='Property Rates')
HouseholdService.objects.create(name='Electricity: Basic levy')
HouseholdService.objects.create(name='Electricity: Consumption')
HouseholdService.objects.create(name='Water: Basic levy')
HouseholdService.objects.create(name='Water: Consumption')
HouseholdService.objects.create(name='Sanitation')
HouseholdService.objects.create(name='Refuse removal')
HouseholdService.objects.create(name='Other')
class Migration(migrations.Migration):
dependencies = [
('household', '0001_initial'),
]
operations = [
migrations.RunPython(add_financial_year),
migrations.RunPython(add_budget_phase),
migrations.RunPython(add_class),
migrations.RunPython(add_service)
]
| 1,576 | 290 | 115 |
767933a3cbc4c4860d84dbb36a3ae605a156b0cb | 9,566 | py | Python | src/condor_tensorflow/metrics.py | GarrettJenkinson/condor_tensorflow | db715a2db6a5c0dbf610f5ad82cec16e2ab3d3d8 | [
"Apache-2.0"
] | 9 | 2021-10-31T16:39:35.000Z | 2022-02-19T17:51:07.000Z | src/condor_tensorflow/metrics.py | GarrettJenkinson/condor_tensorflow | db715a2db6a5c0dbf610f5ad82cec16e2ab3d3d8 | [
"Apache-2.0"
] | 4 | 2022-01-01T19:52:55.000Z | 2022-02-16T00:38:40.000Z | src/condor_tensorflow/metrics.py | GarrettJenkinson/condor_tensorflow | db715a2db6a5c0dbf610f5ad82cec16e2ab3d3d8 | [
"Apache-2.0"
] | 4 | 2021-10-31T17:50:29.000Z | 2022-02-11T02:54:47.000Z | import tensorflow as tf
from tensorflow.keras import backend as K
class OrdinalMeanAbsoluteError(tf.keras.metrics.Metric):
"""Computes mean absolute error for ordinal labels."""
def __init__(self, name="mean_absolute_error_labels",
**kwargs):
"""Creates a `OrdinalMeanAbsoluteError` instance."""
super().__init__(name=name, **kwargs)
self.maes = self.add_weight(name='maes', initializer='zeros')
self.count = self.add_weight(name='count', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
"""Computes mean absolute error for ordinal labels.
Args:
y_true: Cumulatiuve logits from CondorOrdinal layer.
y_pred: CondorOrdinal Encoded Labels.
sample_weight (optional): Not implemented.
"""
# Predict the label as in Cao et al. - using cumulative probabilities
cum_probs = tf.math.cumprod(
tf.math.sigmoid(y_pred),
axis=1) # tf.map_fn(tf.math.sigmoid, y_pred)
# Calculate the labels using the style of Cao et al.
above_thresh = tf.map_fn(
lambda x: tf.cast(
x > 0.5,
tf.float32),
cum_probs)
# Sum across columns to estimate how many cumulative thresholds are
# passed.
labels_v2 = tf.reduce_sum(above_thresh, axis=1)
y_true = tf.cast(tf.reduce_sum(y_true, axis=1), y_pred.dtype)
# remove all dimensions of size 1 (e.g., from [[1], [2]], to [1, 2])
y_true = tf.squeeze(y_true)
if sample_weight is not None:
values = tf.abs(y_true - labels_v2)
sample_weight = tf.cast(tf.squeeze(sample_weight), y_pred.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.maes.assign_add(tf.reduce_sum(values))
self.count.assign_add(tf.reduce_sum(sample_weight))
else:
self.maes.assign_add(tf.reduce_sum(tf.abs(y_true - labels_v2)))
self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))
def reset_state(self):
"""Resets all of the metric state variables at the start of each epoch."""
self.maes.assign(0.0)
self.count.assign(0.0)
def get_config(self):
"""Returns the serializable config of the metric."""
config = {}
base_config = super().get_config()
return {**base_config, **config}
class SparseOrdinalMeanAbsoluteError(OrdinalMeanAbsoluteError):
"""Computes mean absolute error for ordinal labels."""
def __init__(self, name="mean_absolute_error_labels",
**kwargs):
"""Creates a `OrdinalMeanAbsoluteError` instance."""
super().__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Computes mean absolute error for ordinal labels.
Args:
y_true: Cumulatiuve logits from CondorOrdinal layer.
y_pred: CondorOrdinal Encoded Labels.
sample_weight (optional): Not implemented.
"""
# Predict the label as in Cao et al. - using cumulative probabilities
cum_probs = tf.math.cumprod(
tf.math.sigmoid(y_pred),
axis=1) # tf.map_fn(tf.math.sigmoid, y_pred)
# Calculate the labels using the style of Cao et al.
above_thresh = tf.map_fn(
lambda x: tf.cast(
x > 0.5,
tf.float32),
cum_probs)
# Sum across columns to estimate how many cumulative thresholds are
# passed.
labels_v2 = tf.reduce_sum(above_thresh, axis=1)
y_true = tf.cast(y_true, y_pred.dtype)
# remove all dimensions of size 1 (e.g., from [[1], [2]], to [1, 2])
y_true = tf.squeeze(y_true)
if sample_weight is not None:
values = tf.abs(y_true - labels_v2)
sample_weight = tf.cast(tf.squeeze(sample_weight), y_pred.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.maes.assign_add(tf.reduce_sum(values))
self.count.assign_add(tf.reduce_sum(sample_weight))
else:
self.maes.assign_add(tf.reduce_sum(tf.abs(y_true - labels_v2)))
self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))
class OrdinalAccuracy(tf.keras.metrics.Metric):
"""Computes accuracy for ordinal labels (tolerance is allowed rank
distance to be considered 'correct' predictions)."""
def __init__(self, name=None,
tolerance=0,
**kwargs):
"""Creates a `OrdinalAccuracy` instance."""
if name is not None:
super().__init__(name=name, **kwargs)
else:
super().__init__(name="ordinal_accuracy_tol"+str(tolerance),
**kwargs)
self.accs = self.add_weight(name='accs', initializer='zeros')
self.count = self.add_weight(name='count', initializer='zeros')
self.tolerance = tolerance
def update_state(self, y_true, y_pred, sample_weight=None):
"""Computes accuracy for ordinal labels.
Args:
y_true: Cumulatiuve logits from CondorOrdinal layer.
y_pred: CondorOrdinal Encoded Labels.
sample_weight (optional): Not implemented.
"""
# Predict the label as in Cao et al. - using cumulative probabilities
cum_probs = tf.math.cumprod(
tf.math.sigmoid(y_pred),
axis=1) # tf.map_fn(tf.math.sigmoid, y_pred)
# Calculate the labels using the style of Cao et al.
above_thresh = tf.map_fn(
lambda x: tf.cast(
x > 0.5,
tf.float32),
cum_probs)
# Sum across columns to estimate how many cumulative thresholds are
# passed.
labels_v2 = tf.reduce_sum(above_thresh, axis=1)
y_true = tf.cast(tf.reduce_sum(y_true, axis=1), y_pred.dtype)
# remove all dimensions of size 1 (e.g., from [[1], [2]], to [1, 2])
y_true = tf.squeeze(y_true)
if sample_weight is not None:
values = tf.cast(tf.less_equal(
tf.abs(y_true-labels_v2),tf.cast(self.tolerance,y_pred.dtype)),
y_pred.dtype)
sample_weight = tf.cast(tf.squeeze(sample_weight), y_pred.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.accs.assign_add(tf.reduce_sum(values))
self.count.assign_add(tf.reduce_sum(sample_weight))
else:
self.accs.assign_add(tf.reduce_sum(tf.cast(tf.less_equal(
tf.abs(y_true-labels_v2),tf.cast(self.tolerance,y_pred.dtype)),
y_pred.dtype)))
self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))
def reset_state(self):
"""Resets all of the metric state variables at the start of each epoch."""
self.accs.assign(0.0)
self.count.assign(0.0)
def get_config(self):
"""Returns the serializable config of the metric."""
config = {'tolerance': self.tolerance}
base_config = super().get_config()
return {**base_config, **config}
class SparseOrdinalAccuracy(OrdinalAccuracy):
"""Computes accuracy for ordinal labels (tolerance is allowed rank
distance to be considered 'correct' predictions)."""
def update_state(self, y_true, y_pred, sample_weight=None):
"""Computes accuracy for ordinal labels.
Args:
y_true: Cumulatiuve logits from CondorOrdinal layer.
y_pred: CondorOrdinal Encoded Labels.
sample_weight (optional): Not implemented.
"""
# Predict the label as in Cao et al. - using cumulative probabilities
cum_probs = tf.math.cumprod(
tf.math.sigmoid(y_pred),
axis=1) # tf.map_fn(tf.math.sigmoid, y_pred)
# Calculate the labels using the style of Cao et al.
above_thresh = tf.map_fn(
lambda x: tf.cast(
x > 0.5,
tf.float32),
cum_probs)
# Sum across columns to estimate how many cumulative thresholds are
# passed.
labels_v2 = tf.reduce_sum(above_thresh, axis=1)
y_true = tf.cast(y_true, y_pred.dtype)
# remove all dimensions of size 1 (e.g., from [[1], [2]], to [1, 2])
y_true = tf.squeeze(y_true)
if sample_weight is not None:
values = tf.cast(tf.less_equal(
tf.abs(y_true-labels_v2),tf.cast(self.tolerance,y_pred.dtype)),
y_pred.dtype)
sample_weight = tf.cast(tf.squeeze(sample_weight), y_pred.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.accs.assign_add(tf.reduce_sum(values))
self.count.assign_add(tf.reduce_sum(sample_weight))
else:
self.accs.assign_add(tf.reduce_sum(tf.cast(tf.less_equal(
tf.abs(y_true-labels_v2),tf.cast(self.tolerance,y_pred.dtype)),
y_pred.dtype)))
self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))
| 38.728745 | 82 | 0.614363 | import tensorflow as tf
from tensorflow.keras import backend as K
class OrdinalMeanAbsoluteError(tf.keras.metrics.Metric):
"""Computes mean absolute error for ordinal labels."""
def __init__(self, name="mean_absolute_error_labels",
**kwargs):
"""Creates a `OrdinalMeanAbsoluteError` instance."""
super().__init__(name=name, **kwargs)
self.maes = self.add_weight(name='maes', initializer='zeros')
self.count = self.add_weight(name='count', initializer='zeros')
def update_state(self, y_true, y_pred, sample_weight=None):
"""Computes mean absolute error for ordinal labels.
Args:
y_true: Cumulatiuve logits from CondorOrdinal layer.
y_pred: CondorOrdinal Encoded Labels.
sample_weight (optional): Not implemented.
"""
# Predict the label as in Cao et al. - using cumulative probabilities
cum_probs = tf.math.cumprod(
tf.math.sigmoid(y_pred),
axis=1) # tf.map_fn(tf.math.sigmoid, y_pred)
# Calculate the labels using the style of Cao et al.
above_thresh = tf.map_fn(
lambda x: tf.cast(
x > 0.5,
tf.float32),
cum_probs)
# Sum across columns to estimate how many cumulative thresholds are
# passed.
labels_v2 = tf.reduce_sum(above_thresh, axis=1)
y_true = tf.cast(tf.reduce_sum(y_true, axis=1), y_pred.dtype)
# remove all dimensions of size 1 (e.g., from [[1], [2]], to [1, 2])
y_true = tf.squeeze(y_true)
if sample_weight is not None:
values = tf.abs(y_true - labels_v2)
sample_weight = tf.cast(tf.squeeze(sample_weight), y_pred.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.maes.assign_add(tf.reduce_sum(values))
self.count.assign_add(tf.reduce_sum(sample_weight))
else:
self.maes.assign_add(tf.reduce_sum(tf.abs(y_true - labels_v2)))
self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))
def result(self):
return tf.math.divide_no_nan(self.maes, self.count)
def reset_state(self):
"""Resets all of the metric state variables at the start of each epoch."""
self.maes.assign(0.0)
self.count.assign(0.0)
def get_config(self):
"""Returns the serializable config of the metric."""
config = {}
base_config = super().get_config()
return {**base_config, **config}
class SparseOrdinalMeanAbsoluteError(OrdinalMeanAbsoluteError):
"""Computes mean absolute error for ordinal labels."""
def __init__(self, name="mean_absolute_error_labels",
**kwargs):
"""Creates a `OrdinalMeanAbsoluteError` instance."""
super().__init__(name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
"""Computes mean absolute error for ordinal labels.
Args:
y_true: Cumulatiuve logits from CondorOrdinal layer.
y_pred: CondorOrdinal Encoded Labels.
sample_weight (optional): Not implemented.
"""
# Predict the label as in Cao et al. - using cumulative probabilities
cum_probs = tf.math.cumprod(
tf.math.sigmoid(y_pred),
axis=1) # tf.map_fn(tf.math.sigmoid, y_pred)
# Calculate the labels using the style of Cao et al.
above_thresh = tf.map_fn(
lambda x: tf.cast(
x > 0.5,
tf.float32),
cum_probs)
# Sum across columns to estimate how many cumulative thresholds are
# passed.
labels_v2 = tf.reduce_sum(above_thresh, axis=1)
y_true = tf.cast(y_true, y_pred.dtype)
# remove all dimensions of size 1 (e.g., from [[1], [2]], to [1, 2])
y_true = tf.squeeze(y_true)
if sample_weight is not None:
values = tf.abs(y_true - labels_v2)
sample_weight = tf.cast(tf.squeeze(sample_weight), y_pred.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.maes.assign_add(tf.reduce_sum(values))
self.count.assign_add(tf.reduce_sum(sample_weight))
else:
self.maes.assign_add(tf.reduce_sum(tf.abs(y_true - labels_v2)))
self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))
class OrdinalAccuracy(tf.keras.metrics.Metric):
"""Computes accuracy for ordinal labels (tolerance is allowed rank
distance to be considered 'correct' predictions)."""
def __init__(self, name=None,
tolerance=0,
**kwargs):
"""Creates a `OrdinalAccuracy` instance."""
if name is not None:
super().__init__(name=name, **kwargs)
else:
super().__init__(name="ordinal_accuracy_tol"+str(tolerance),
**kwargs)
self.accs = self.add_weight(name='accs', initializer='zeros')
self.count = self.add_weight(name='count', initializer='zeros')
self.tolerance = tolerance
def update_state(self, y_true, y_pred, sample_weight=None):
"""Computes accuracy for ordinal labels.
Args:
y_true: Cumulatiuve logits from CondorOrdinal layer.
y_pred: CondorOrdinal Encoded Labels.
sample_weight (optional): Not implemented.
"""
# Predict the label as in Cao et al. - using cumulative probabilities
cum_probs = tf.math.cumprod(
tf.math.sigmoid(y_pred),
axis=1) # tf.map_fn(tf.math.sigmoid, y_pred)
# Calculate the labels using the style of Cao et al.
above_thresh = tf.map_fn(
lambda x: tf.cast(
x > 0.5,
tf.float32),
cum_probs)
# Sum across columns to estimate how many cumulative thresholds are
# passed.
labels_v2 = tf.reduce_sum(above_thresh, axis=1)
y_true = tf.cast(tf.reduce_sum(y_true, axis=1), y_pred.dtype)
# remove all dimensions of size 1 (e.g., from [[1], [2]], to [1, 2])
y_true = tf.squeeze(y_true)
if sample_weight is not None:
values = tf.cast(tf.less_equal(
tf.abs(y_true-labels_v2),tf.cast(self.tolerance,y_pred.dtype)),
y_pred.dtype)
sample_weight = tf.cast(tf.squeeze(sample_weight), y_pred.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.accs.assign_add(tf.reduce_sum(values))
self.count.assign_add(tf.reduce_sum(sample_weight))
else:
self.accs.assign_add(tf.reduce_sum(tf.cast(tf.less_equal(
tf.abs(y_true-labels_v2),tf.cast(self.tolerance,y_pred.dtype)),
y_pred.dtype)))
self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))
def result(self):
return tf.math.divide_no_nan(self.accs, self.count)
def reset_state(self):
"""Resets all of the metric state variables at the start of each epoch."""
self.accs.assign(0.0)
self.count.assign(0.0)
def get_config(self):
"""Returns the serializable config of the metric."""
config = {'tolerance': self.tolerance}
base_config = super().get_config()
return {**base_config, **config}
class SparseOrdinalAccuracy(OrdinalAccuracy):
"""Computes accuracy for ordinal labels (tolerance is allowed rank
distance to be considered 'correct' predictions)."""
def update_state(self, y_true, y_pred, sample_weight=None):
"""Computes accuracy for ordinal labels.
Args:
y_true: Cumulatiuve logits from CondorOrdinal layer.
y_pred: CondorOrdinal Encoded Labels.
sample_weight (optional): Not implemented.
"""
# Predict the label as in Cao et al. - using cumulative probabilities
cum_probs = tf.math.cumprod(
tf.math.sigmoid(y_pred),
axis=1) # tf.map_fn(tf.math.sigmoid, y_pred)
# Calculate the labels using the style of Cao et al.
above_thresh = tf.map_fn(
lambda x: tf.cast(
x > 0.5,
tf.float32),
cum_probs)
# Sum across columns to estimate how many cumulative thresholds are
# passed.
labels_v2 = tf.reduce_sum(above_thresh, axis=1)
y_true = tf.cast(y_true, y_pred.dtype)
# remove all dimensions of size 1 (e.g., from [[1], [2]], to [1, 2])
y_true = tf.squeeze(y_true)
if sample_weight is not None:
values = tf.cast(tf.less_equal(
tf.abs(y_true-labels_v2),tf.cast(self.tolerance,y_pred.dtype)),
y_pred.dtype)
sample_weight = tf.cast(tf.squeeze(sample_weight), y_pred.dtype)
sample_weight = tf.broadcast_to(sample_weight, values.shape)
values = tf.multiply(values, sample_weight)
self.accs.assign_add(tf.reduce_sum(values))
self.count.assign_add(tf.reduce_sum(sample_weight))
else:
self.accs.assign_add(tf.reduce_sum(tf.cast(tf.less_equal(
tf.abs(y_true-labels_v2),tf.cast(self.tolerance,y_pred.dtype)),
y_pred.dtype)))
self.count.assign_add(tf.cast(tf.size(y_true), tf.float32))
| 112 | 0 | 54 |
c28dee9c33942755f7781bf26747522afc2dd87d | 529 | py | Python | code-everyday-challenge/n62_the_king.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null | code-everyday-challenge/n62_the_king.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null | code-everyday-challenge/n62_the_king.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null |
#https://www.geeksforgeeks.org/puzzle-maximum-number-kings-chessboard-without-check/
if __name__ == '__main__':
print(main(9,3)) | 15.558824 | 84 | 0.449905 |
#https://www.geeksforgeeks.org/puzzle-maximum-number-kings-chessboard-without-check/
def main(l,w):
l, w = max(l,w), min(w,l)
result = 0
count = 0
mn=min(l // 3,w//3)
count += mn*8
bl = l%3
if bl == 2:
count += (w//3)*5
count+=(w%3)*2-1
if bl == 1:
count += (w//3)*2
bw = w%3
if bw == 2:
count += (l//3)*5
count+=(l%3)*2-1
if bw == 1:
count += (l//3)*2
print(count)
if __name__ == '__main__':
print(main(9,3)) | 366 | 0 | 22 |
a7287aeae17119d193ebe8345b7de35f2d5dd0af | 333 | py | Python | testapp/wagtail_wordpress_importer/migrations/0042_delete_customfieldsgrouplocation.py | nickmoreton/wagtail_wordpress_importer | fbe6b60ae624edac3f42a62ce30af4a0c548b4ed | [
"MIT"
] | null | null | null | testapp/wagtail_wordpress_importer/migrations/0042_delete_customfieldsgrouplocation.py | nickmoreton/wagtail_wordpress_importer | fbe6b60ae624edac3f42a62ce30af4a0c548b4ed | [
"MIT"
] | null | null | null | testapp/wagtail_wordpress_importer/migrations/0042_delete_customfieldsgrouplocation.py | nickmoreton/wagtail_wordpress_importer | fbe6b60ae624edac3f42a62ce30af4a0c548b4ed | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2021-01-10 21:41
from django.db import migrations
| 19.588235 | 66 | 0.648649 | # Generated by Django 3.1.4 on 2021-01-10 21:41
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('wagtail_wordpress_importer', '0041_auto_20210110_0628'),
]
operations = [
migrations.DeleteModel(
name='CustomFieldsGroupLocation',
),
]
| 0 | 227 | 23 |
8aff30b1325031846426585dc6e6d56c20755efe | 1,601 | py | Python | systraymgr.py | DawningW/My-Minisite-Server | 3a44656d24cf91e7d2706aea289217a903e109f3 | [
"MIT"
] | 1 | 2020-02-21T15:56:54.000Z | 2020-02-21T15:56:54.000Z | systraymgr.py | DawningW/My-Minisite-Server | 3a44656d24cf91e7d2706aea289217a903e109f3 | [
"MIT"
] | 1 | 2020-02-10T07:15:39.000Z | 2020-02-10T07:15:39.000Z | systraymgr.py | DawningW/My-Minisite-Server | 3a44656d24cf91e7d2706aea289217a903e109f3 | [
"MIT"
] | null | null | null | # coding=utf-8
import os
import threading
import logging
import utils
if utils.getSystem() == utils.System.WINDOWS:
from SysTrayIcon import SysTrayIcon
trayThread = None
tray = None
show = True
def initTray():
"初始化系统托盘线程"
logging.info("Start a new thread to manage system tray.")
global trayThread
trayThread = threading.Thread(target = runTray, daemon = True)
trayThread.start()
return
def runTray():
"添加系统托盘"
global tray
if utils.getSystem() == utils.System.WINDOWS:
logging.info("Init system tray for windows.")
menuOptions = (("显示/隐藏", None, onOptionClicked), ("退出", None, onOptionClicked))
tray = SysTrayIcon("./icon.ico", "我的热点新闻服务器", onTrayClicked, menuOptions)
tray.loop()
elif utils.getSystem() == utils.System.LINUX:
logging.info("System tray doesn't support linux.")
elif utils.getSystem() == utils.System.MACOS:
logging.info("System tray doesn't support macOS.")
else:
logging.info("System tray doesn't support this system.")
return
def removeTray():
"移除系统托盘"
global tray
if tray is not None:
if utils.getSystem() == utils.System.WINDOWS:
tray.close()
tray = None
return
def onTrayClicked():
"托盘被点击"
global show
if show:
utils.hideWindow()
else:
utils.showWindow()
show = not show
return
def onOptionClicked(id):
"托盘菜单选项被点击"
if id == 0:
onTrayClicked()
elif id == 1:
# _thread.interrupt_main() # 读取输入时好像无效
removeTray()
os._exit(0)
return
| 24.257576 | 87 | 0.632105 | # coding=utf-8
import os
import threading
import logging
import utils
if utils.getSystem() == utils.System.WINDOWS:
from SysTrayIcon import SysTrayIcon
trayThread = None
tray = None
show = True
def initTray():
"初始化系统托盘线程"
logging.info("Start a new thread to manage system tray.")
global trayThread
trayThread = threading.Thread(target = runTray, daemon = True)
trayThread.start()
return
def runTray():
"添加系统托盘"
global tray
if utils.getSystem() == utils.System.WINDOWS:
logging.info("Init system tray for windows.")
menuOptions = (("显示/隐藏", None, onOptionClicked), ("退出", None, onOptionClicked))
tray = SysTrayIcon("./icon.ico", "我的热点新闻服务器", onTrayClicked, menuOptions)
tray.loop()
elif utils.getSystem() == utils.System.LINUX:
logging.info("System tray doesn't support linux.")
elif utils.getSystem() == utils.System.MACOS:
logging.info("System tray doesn't support macOS.")
else:
logging.info("System tray doesn't support this system.")
return
def removeTray():
"移除系统托盘"
global tray
if tray is not None:
if utils.getSystem() == utils.System.WINDOWS:
tray.close()
tray = None
return
def onTrayClicked():
"托盘被点击"
global show
if show:
utils.hideWindow()
else:
utils.showWindow()
show = not show
return
def onOptionClicked(id):
"托盘菜单选项被点击"
if id == 0:
onTrayClicked()
elif id == 1:
# _thread.interrupt_main() # 读取输入时好像无效
removeTray()
os._exit(0)
return
| 0 | 0 | 0 |
07bcdf98fadb839414b17802415bfa04081f4e76 | 315 | py | Python | 0072 Invert Tree.py | ansabgillani/binarysearchcomproblems | 12fe8632f8cbb5058c91a55bae53afa813a3247e | [
"MIT"
] | 1 | 2020-12-29T21:17:26.000Z | 2020-12-29T21:17:26.000Z | 0072 Invert Tree.py | ansabgillani/binarysearchcomproblems | 12fe8632f8cbb5058c91a55bae53afa813a3247e | [
"MIT"
] | null | null | null | 0072 Invert Tree.py | ansabgillani/binarysearchcomproblems | 12fe8632f8cbb5058c91a55bae53afa813a3247e | [
"MIT"
] | 4 | 2021-09-09T17:42:43.000Z | 2022-03-18T04:54:03.000Z | # class Tree:
# def __init__(self, val, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
| 28.636364 | 81 | 0.561905 | # class Tree:
# def __init__(self, val, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def solve(self, root):
if root:
root.left, root.right = self.solve(root.right), self.solve(root.left)
return root
| 124 | -6 | 48 |
e3245e7e8922075b4ab710aae52ceed5897380aa | 720 | py | Python | spkcspider/apps/spider/management/commands/update_dynamic_content.py | devkral/spkbspider | 97e448b4da412acebd66c4469c7fcdd07bf90ed2 | [
"MIT"
] | 5 | 2019-06-24T14:15:54.000Z | 2021-05-14T23:16:31.000Z | spkcspider/apps/spider/management/commands/update_dynamic_content.py | devkral/spkbspider | 97e448b4da412acebd66c4469c7fcdd07bf90ed2 | [
"MIT"
] | 2 | 2018-06-19T09:56:18.000Z | 2018-11-20T12:02:44.000Z | spkcspider/apps/spider/management/commands/update_dynamic_content.py | devkral/spkbspider | 97e448b4da412acebd66c4469c7fcdd07bf90ed2 | [
"MIT"
] | null | null | null | import logging
from django.core.management.base import BaseCommand
| 34.285714 | 68 | 0.65 | import logging
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Update dynamic spider content e.g. permissions, content'
def handle(self, *args, **options):
from spkcspider.apps.spider.signals import update_dynamic
self.log = logging.getLogger(__name__)
for handler in self.log.handlers:
self.log.removeHandler(handler)
self.log.addHandler(logging.StreamHandler(self.stdout))
results = update_dynamic.send_robust(self)
for (receiver, result) in results:
if isinstance(result, Exception):
self.log.error(
"%s failed", receiver, exc_info=result
)
| 526 | 102 | 23 |
b41bd65de72b54228d27b15eb7cb53acfa278bd8 | 8,458 | py | Python | rflow/command.py | otaviog/rflow | 8594b9c3e9e1da61382f80b66e749cf7b8a33676 | [
"MIT"
] | 6 | 2019-08-26T11:36:58.000Z | 2020-12-15T21:01:24.000Z | rflow/command.py | otaviog/rflow | 8594b9c3e9e1da61382f80b66e749cf7b8a33676 | [
"MIT"
] | null | null | null | rflow/command.py | otaviog/rflow | 8594b9c3e9e1da61382f80b66e749cf7b8a33676 | [
"MIT"
] | 1 | 2020-04-13T08:05:27.000Z | 2020-04-13T08:05:27.000Z | """Command-line interfacing workflows"""
import argparse
import os
import sys
import imp
import inspect
import argcomplete
from . import core
from . common import WorkflowError, WORKFLOW_DEFAULT_FILENAME
from . import decorators
from . userargument import USER_ARGS_CONTEXT
from . _ui import ui
from . import _util as util
def open_graph(directory, graph_name, wf_filename=WORKFLOW_DEFAULT_FILENAME):
"""Opens an existing workflow and return the specified graph instance.
Args:
directory (str): A directory containg a `workflow.py` file, or
a file named by the `wf_filename` argument.
graph_name (str): The graph's name to open, see
:func:`rflow.decorators.graph`
wf_filename (str): The workflow python script. Default is
`"workflow.py"`.
Returns:
:obj:`rflow.core.Graph`: DAG object.
Raises:
:obj:`rflow.common.WorkflowError`: If the graph
isn't found.
`FileNotFoundError`: If the directory doesn't exists or if the
`workflow.py` or what passed to `wf_filename` does not
exists.
"""
if core.exists_graph(graph_name, directory):
return core.get_graph(graph_name, directory, existing=True)
graph_def_list = _get_all_graph_def(
os.path.abspath(directory), wf_filename)
defgraph_info_list = [graph_def for graph_def in graph_def_list
if graph_def.name == graph_name]
if not defgraph_info_list:
raise WorkflowError(
"Graph not {} found on directory {}. Available ones are: {}".format(
graph_name, directory, ', '.join(
[deco.name for _1, _2, deco in defgraph_info_list])))
else:
defgraph_info = defgraph_info_list[0]
defgraph_info.function()
return core.get_graph(graph_name, directory, existing=True)
ACTIONS = ['run', 'touch', 'print-run', 'viz-dag', 'help', 'clean']
def main(argv=None):
"""Command-line auto main generator.
Generates a command-line main for executing the graphs defined in
the current source file. See the decorator
:class:`rflow.decorators.graph` for how to define
graphs. The default behavior is quit the process when an error is
encountered.
For example::
@srwf.graph()
def workflow1(g):
g.add = Add()
g.add.args.a = 1
g.add.args.b = 2
g.sub = Sub(srwf.FSResource('sub.pkl'))
g.sub.args.a = 8
g.sub.args.b = g.add
if __name__ == '__main__':
srwf.command.main()
In a shell execute::
$ srwf workflow1 run sub
For passing custom arguments by command-line, use the class
:class:`rflow.userargument.UserArgument`.
Args:
args (str, optional): sys.args like command-line arguments.
Returns:
int: exit code.
"""
# pylint: disable=too-many-return-statements
try:
all_graphs = _get_all_graph_def(os.path.abspath(os.path.curdir),
WORKFLOW_DEFAULT_FILENAME)
except WorkflowError as err:
print(str(err))
return 1
arg_parser = argparse.ArgumentParser(
description="RFlow workflow runner",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'graph', choices=[graph.name for graph in all_graphs])
arg_parser.add_argument('action', choices=ACTIONS)
argcomplete.autocomplete(arg_parser)
if not argv:
argv = sys.argv
args = arg_parser.parse_args(argv[1:3])
if int(os.environ.get("RFLOW_DEBUG", 0)) == 1:
ui.complete_traceback = True
abs_path = os.path.abspath('.')
graph = open_graph(abs_path, args.graph)
argv = argv[3:]
if args.action == 'print-run':
raise NotImplementedError()
elif args.action == 'run':
return _run_main(graph, argv)
elif args.action == 'touch':
return _touch_main(graph, argv)
elif args.action == 'clean':
return _clean_main(graph, argv)
elif args.action == 'help':
return _help_main(graph, argv)
elif args.action == 'viz-dag':
return _viz_main(graph, argv)
return 1
| 28.478114 | 95 | 0.657011 | """Command-line interfacing workflows"""
import argparse
import os
import sys
import imp
import inspect
import argcomplete
from . import core
from . common import WorkflowError, WORKFLOW_DEFAULT_FILENAME
from . import decorators
from . userargument import USER_ARGS_CONTEXT
from . _ui import ui
from . import _util as util
def _importdir(path, workflow_fname):
module_name = path.replace('/', '.')
path = os.path.abspath(path)
if module_name[0] == '.':
module_name = module_name[1:]
fname = os.path.join(path, workflow_fname)
if not os.path.exists(fname):
raise WorkflowError('Workflow {} file not found'.format(fname))
try:
return imp.load_source('workflow', fname)
except IOError:
raise WorkflowError('Workflow {} file not found'.format(fname))
def _get_decorator(func, class_instance):
if not inspect.isfunction(func):
return None
if not hasattr(func, '__closure__') or func.__closure__ is None:
return None
for closure in func.__closure__:
if isinstance(closure.cell_contents, class_instance):
return closure.cell_contents
return None
class _GraphDef:
def __init__(self, graph_name, function, decorator_obj):
self.name = graph_name
self.function = function
self.drecorator_obj = decorator_obj
def _get_all_graph_def(abs_path, workflow_fname):
with util.work_directory(abs_path):
module = _importdir(abs_path, workflow_fname)
graph_def_list = []
for func_name, member in inspect.getmembers(module):
decorator_obj = _get_decorator(member, decorators.graph)
if decorator_obj is None:
continue
graph_def_list.append(_GraphDef(func_name, member, decorator_obj))
return graph_def_list
def open_graph(directory, graph_name, wf_filename=WORKFLOW_DEFAULT_FILENAME):
"""Opens an existing workflow and return the specified graph instance.
Args:
directory (str): A directory containg a `workflow.py` file, or
a file named by the `wf_filename` argument.
graph_name (str): The graph's name to open, see
:func:`rflow.decorators.graph`
wf_filename (str): The workflow python script. Default is
`"workflow.py"`.
Returns:
:obj:`rflow.core.Graph`: DAG object.
Raises:
:obj:`rflow.common.WorkflowError`: If the graph
isn't found.
`FileNotFoundError`: If the directory doesn't exists or if the
`workflow.py` or what passed to `wf_filename` does not
exists.
"""
if core.exists_graph(graph_name, directory):
return core.get_graph(graph_name, directory, existing=True)
graph_def_list = _get_all_graph_def(
os.path.abspath(directory), wf_filename)
defgraph_info_list = [graph_def for graph_def in graph_def_list
if graph_def.name == graph_name]
if not defgraph_info_list:
raise WorkflowError(
"Graph not {} found on directory {}. Available ones are: {}".format(
graph_name, directory, ', '.join(
[deco.name for _1, _2, deco in defgraph_info_list])))
else:
defgraph_info = defgraph_info_list[0]
defgraph_info.function()
return core.get_graph(graph_name, directory, existing=True)
def _run_main(graph, argv):
arg_parser = argparse.ArgumentParser(
description="Executes the workflow to a node.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
node_names = graph.get_node_names(filter_show=True)
arg_parser.add_argument(
'node', choices=node_names,
metavar='node', help=', '.join(node_names))
arg_parser.add_argument(
'--redo', '-r',
help="Redo the last node, whatever even if it's updated",
action='store_true')
name_set = set()
for name, kwargs in (
USER_ARGS_CONTEXT.user_arguments):
# TODO compare if they're exact the same or
# raise an exception.
if name in name_set:
continue
arg_parser.add_argument(name, **kwargs)
name_set.add(name)
args = arg_parser.parse_args(argv)
USER_ARGS_CONTEXT.register_argparse_args(args)
goal_node = graph[args.node]
goal_node.call(redo=args.redo)
def _clean_main(graph, argv):
arg_parser = argparse.ArgumentParser(
description="Clean the node resources and last execution parameters.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
node_names = graph.get_node_names()
arg_parser.add_argument(
'node', choices=graph.get_node_names(),
metavar='node', help=', '.join(node_names))
args = arg_parser.parse_args(argv)
goal_node = graph[args.node]
goal_node.clear()
def _touch_main(graph, argv):
arg_parser = argparse.ArgumentParser(
description="Set the node's last parameters to the current ones without executing it.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
node_names = graph.get_node_names(filter_show=True)
arg_parser.add_argument(
'node', choices=node_names,
metavar='node', help=', '.join(node_names))
args = arg_parser.parse_args(argv)
USER_ARGS_CONTEXT.register_argparse_args(args)
goal_node = graph[args.node]
goal_node.touch()
def _help_main(graph, argv):
arg_parser = argparse.ArgumentParser()
node_names = graph.get_node_names()
arg_parser.add_argument(
'node', choices=graph.get_node_names(),
metavar='node', help=', '.join(node_names))
args = arg_parser.parse_args(argv)
goal_node = graph[args.node]
sys.stdout.write(goal_node.__doc__)
sys.stdout.write('\n')
def _viz_main(graph, argv):
from .viz import dag2dot
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--output', '-o')
args = arg_parser.parse_args(argv)
dot = dag2dot(graph)
if args.output:
dot.render(args.output, cleanup=True)
else:
dot.view(cleanup=True)
ACTIONS = ['run', 'touch', 'print-run', 'viz-dag', 'help', 'clean']
def main(argv=None):
"""Command-line auto main generator.
Generates a command-line main for executing the graphs defined in
the current source file. See the decorator
:class:`rflow.decorators.graph` for how to define
graphs. The default behavior is quit the process when an error is
encountered.
For example::
@srwf.graph()
def workflow1(g):
g.add = Add()
g.add.args.a = 1
g.add.args.b = 2
g.sub = Sub(srwf.FSResource('sub.pkl'))
g.sub.args.a = 8
g.sub.args.b = g.add
if __name__ == '__main__':
srwf.command.main()
In a shell execute::
$ srwf workflow1 run sub
For passing custom arguments by command-line, use the class
:class:`rflow.userargument.UserArgument`.
Args:
args (str, optional): sys.args like command-line arguments.
Returns:
int: exit code.
"""
# pylint: disable=too-many-return-statements
try:
all_graphs = _get_all_graph_def(os.path.abspath(os.path.curdir),
WORKFLOW_DEFAULT_FILENAME)
except WorkflowError as err:
print(str(err))
return 1
arg_parser = argparse.ArgumentParser(
description="RFlow workflow runner",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
arg_parser.add_argument(
'graph', choices=[graph.name for graph in all_graphs])
arg_parser.add_argument('action', choices=ACTIONS)
argcomplete.autocomplete(arg_parser)
if not argv:
argv = sys.argv
args = arg_parser.parse_args(argv[1:3])
if int(os.environ.get("RFLOW_DEBUG", 0)) == 1:
ui.complete_traceback = True
abs_path = os.path.abspath('.')
graph = open_graph(abs_path, args.graph)
argv = argv[3:]
if args.action == 'print-run':
raise NotImplementedError()
elif args.action == 'run':
return _run_main(graph, argv)
elif args.action == 'touch':
return _touch_main(graph, argv)
elif args.action == 'clean':
return _clean_main(graph, argv)
elif args.action == 'help':
return _help_main(graph, argv)
elif args.action == 'viz-dag':
return _viz_main(graph, argv)
return 1
| 3,983 | -5 | 233 |
d17c1ec8c4e434997012bd6fecb4cf01d7f5f931 | 1,965 | py | Python | catkin_ws/src/wheel_odom/src/read_encoder.py | AndySer37/duckietown_text | cc7ae0d48c182c991a2afa67bf40d3f0f0e5cd49 | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src/wheel_odom/src/read_encoder.py | AndySer37/duckietown_text | cc7ae0d48c182c991a2afa67bf40d3f0f0e5cd49 | [
"CC-BY-2.0"
] | null | null | null | catkin_ws/src/wheel_odom/src/read_encoder.py | AndySer37/duckietown_text | cc7ae0d48c182c991a2afa67bf40d3f0f0e5cd49 | [
"CC-BY-2.0"
] | null | null | null | #!/usr/bin/env python
import rospy
import tf
import serial
import numpy as np
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point
from std_msgs.msg import Int64
global x, y, theta, v_L, v_R, v_x, v_y, omega
x = 0
y = 0
theta = 0
v_L = 0
v_R = 0
v_x = 0
v_y = 0
omega = 0
pub_tf = False # Use estimate result as tf
if(pub_tf):
br = tf.TransformBroadcaster()
if __name__ == '__main__':
rospy.init_node('whel_odom_node', anonymous = False)
port = rospy.get_param("~port", "/dev/ttyACM0") # default port: /dev/ttyUSB0
ard = serial.Serial(port, 9600)
rospy.Timer(rospy.Duration.from_sec(0.1), read_data) # 10Hz
rospy.spin()
| 26.554054 | 77 | 0.680916 | #!/usr/bin/env python
import rospy
import tf
import serial
import numpy as np
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Point
from std_msgs.msg import Int64
global x, y, theta, v_L, v_R, v_x, v_y, omega
x = 0
y = 0
theta = 0
v_L = 0
v_R = 0
v_x = 0
v_y = 0
omega = 0
pub_tf = False # Use estimate result as tf
if(pub_tf):
br = tf.TransformBroadcaster()
def read_data(event):
pub_L = rospy.Publisher("/encoder_L", Int64, queue_size = 1)
pub_R = rospy.Publisher("/encoder_R", Int64, queue_size = 1)
global str_
str_ = str('')
seq = 0
while ard.inWaiting():
str_ = ard.readline()
split_str = str_.split(' ')
if len(split_str) != 2:
global x, y, theta
if(pub_tf):
br.sendTransform((x, y, 0),
tf.transformations.quaternion_from_euler(0, 0, theta),
rospy.Time.now(),
'odom',
'map')
odom = Odometry()
odom.header.seq = seq
odom.header.stamp = rospy.Time.now()
odom.header.frame_id = "odom"
odom.child_frame_id = "base_link"
odom.pose.pose.position = Point(x, y, 0.0)
odom_quat = tf.transformations.quaternion_from_euler(0, 0, theta)
odom.pose.pose.orientation.x = odom_quat[0]
odom.pose.pose.orientation.y = odom_quat[1]
odom.pose.pose.orientation.z = odom_quat[2]
odom.pose.pose.orientation.w = odom_quat[3]
odom.pose.covariance[0] = 0.2 # X
odom.pose.covariance[7] = 0.2 # Y
odom.pose.covariance[35] = 0.05 # Theta
seq = seq + 1
else:
try:
print split_str[0],split_str[1]
encoder_R = np.int64(split_str[0])
encoder_L = np.int64(split_str[1])
pub_L.publish(encoder_L)
pub_R.publish(encoder_R)
print("R: ", encoder_R,", L: " , encoder_L)
seq = seq + 1
except ValueError:
pass
if __name__ == '__main__':
rospy.init_node('whel_odom_node', anonymous = False)
port = rospy.get_param("~port", "/dev/ttyACM0") # default port: /dev/ttyUSB0
ard = serial.Serial(port, 9600)
rospy.Timer(rospy.Duration.from_sec(0.1), read_data) # 10Hz
rospy.spin()
| 1,297 | 0 | 23 |
e2b0eb16ff59352823b96b85f477cffe67241cc6 | 309 | py | Python | umich-notebook/global_nbgrader_config.py | IllumiDesk/umich-stacks | 92161237f9031ee7e7689fd7b1177c2b5271560a | [
"MIT"
] | 1 | 2021-11-09T20:59:00.000Z | 2021-11-09T20:59:00.000Z | umich-notebook/global_nbgrader_config.py | IllumiDesk/umich-stacks | 92161237f9031ee7e7689fd7b1177c2b5271560a | [
"MIT"
] | 2 | 2021-08-07T04:01:16.000Z | 2021-08-08T00:18:25.000Z | umich-notebook/global_nbgrader_config.py | IllumiDesk/umich-stacks | 92161237f9031ee7e7689fd7b1177c2b5271560a | [
"MIT"
] | 1 | 2020-11-09T02:06:41.000Z | 2020-11-09T02:06:41.000Z | from nbgrader.auth import JupyterHubAuthPlugin
c = get_config()
c.Application.log_level = 30
c.Authenticator.plugin_class = JupyterHubAuthPlugin
c.Exchange.path_includes_course = True
c.Exchange.root = "/srv/nbgrader/exchange"
c.ExecutePreprocessor.iopub_timeout=1800
c.ExecutePreprocessor.timeout=3600
| 20.6 | 51 | 0.825243 | from nbgrader.auth import JupyterHubAuthPlugin
c = get_config()
c.Application.log_level = 30
c.Authenticator.plugin_class = JupyterHubAuthPlugin
c.Exchange.path_includes_course = True
c.Exchange.root = "/srv/nbgrader/exchange"
c.ExecutePreprocessor.iopub_timeout=1800
c.ExecutePreprocessor.timeout=3600
| 0 | 0 | 0 |
ee3d0f2a05ffe897c50b9010e85f734270d74d14 | 5,826 | py | Python | CommitReveal.py | AleBuser/IOTA-Commit-Reveal | f9d00dfd56070b5a9a71addfc876bd628859731e | [
"MIT"
] | null | null | null | CommitReveal.py | AleBuser/IOTA-Commit-Reveal | f9d00dfd56070b5a9a71addfc876bd628859731e | [
"MIT"
] | null | null | null | CommitReveal.py | AleBuser/IOTA-Commit-Reveal | f9d00dfd56070b5a9a71addfc876bd628859731e | [
"MIT"
] | null | null | null | from iota import Iota
from iota import Address, ProposedTransaction, Tag, Transaction
from iota import TryteString
from iota import ProposedBundle
from iota.commands.extended import utils
from datetime import datetime
from pprint import pprint
import hashlib
import time
import random
import string
| 30.030928 | 146 | 0.651562 | from iota import Iota
from iota import Address, ProposedTransaction, Tag, Transaction
from iota import TryteString
from iota import ProposedBundle
from iota.commands.extended import utils
from datetime import datetime
from pprint import pprint
import hashlib
import time
import random
import string
class CommitRevealCheck(object):
#global variables
NodeURL = ""
Seed = ""
TargetAddress = ""
API = None
ToReveal = "EMPTY"
#init class and IOTA API
def __init__(self, _url, _seed, _targetAddress):
self.NodeURL = _url
self.Seed = _seed
self.TargetAddress = _targetAddress
self.API = Iota(_url)
# function which generates the encrypted hash of the information to commit
def generateCommitHash(self, _statement, _salt):
TrytesStatement = TryteString.from_string(_statement)
StatementLen = len(TrytesStatement)
# the format requires that the first 4 chars have the char length of the statement
if StatementLen <= 9:
#if less than 9 than TryteString has only 2 chars, add 99 to get to 4
SignalLenInTrytes = TryteString.from_bytes(bytes(StatementLen)) + "99"
elif StatementLen <= 99 :
#if between 9 and 99 TryteString has 4 chars
SignalLenInTrytes = TryteString.from_bytes(bytes(StatementLen))
#not more than 99 Trytes
else:
raise ValueError('Statement String needs to be less than 99 Trytes!')
#generate plain string
reveal = str(SignalLenInTrytes + "9" + TrytesStatement + "9" + _salt)
#store for reveal
self.ToReveal = reveal
#encrypt/hash string
commit = hashlib.sha256(reveal).hexdigest()
#make into Trytes
commitInTryts = TryteString.from_bytes(commit)
return commitInTryts
def commitSignal(self, _signal):
print "\nPreparing for new commit: "
#generate random single-use salt
salt = ''.join(random.choice(string.ascii_uppercase) for _ in range(9))
#use salt to generate the Hash
TrytesToCommit = self.generateCommitHash(_signal, salt)
#make IOTA transaction to store commit on Tangle and get bundle
revealBundle = str( self.Transact(TrytesToCommit, self.TargetAddress , "DNT9COMMIT") )
return revealBundle
def RevealSignal(self):
print "Preparing reveal: "
#get plain reveal string and store it on Tangle
revealBundle = str(self.Transact(self.ToReveal, self.TargetAddress, "DNT9REVEAL"))
print "Reveal Bundle: " + revealBundle
return revealBundle
def Transact(self, _message , _addr, _tag):
# preparing transactions
transfers = ProposedTransaction(address = Address(_addr), # 81 trytes long address
message = _message,
tag = _tag, # Up to 27 trytes
value = 0)
# list of prepared transactions is needed at least
bundle = ProposedBundle(transactions=[transfers])
# generate bundle hash using sponge/absorb function + normalize bundle hash + copy bundle hash into each transaction / bundle is finalized
bundle.finalize()
# get tips to be approved by your bundle
gta = self.API.get_transactions_to_approve(depth=3)
# bundle as trytes
Trytes = bundle.as_tryte_strings()
print "SENDING...."
#attach Tip to Tangle
tip = self.API.attach_to_tangle(trunk_transaction=gta['trunkTransaction'], # first tip selected
branch_transaction=gta['branchTransaction'], # second tip selected
trytes=Trytes, # our finalized bundle in Trytes
min_weight_magnitude=14) # MWMN
#breadcast Tip to Network
res = self.API.broadcast_and_store(tip['trytes'])
#return bundle hash
return bundle.hash
def CheckReveal(self, _bundleCommit, _bundleReveal):
#get reveal transaction object from Tangle
bundleHash = self.API.find_transactions(bundles=[_bundleReveal])
lastTrytes = self.API.get_trytes(hashes = bundleHash["hashes"])
transaction = Transaction.from_tryte_string(trytes = lastTrytes["trytes"][0])
#get message from transaction
message = transaction.signature_message_fragment
#get the length of the statement from first 4 chars
statementLength = TryteString.decode(message[ : 4])
#get statement from message
signal = str(TryteString.decode(message[5 : 5 + int(statementLength)]))
#get salt from message
salt = str(message[6 + int(statementLength) : 6 + int(statementLength) + 9])
#print results
print "Revealed Data: "
print " Signal: " + signal
print " Salt: " + salt
#use retrieved values to generate hash again
ResultHash = self.generateCommitHash(signal, salt)
print "Resulting Hash: " + str(ResultHash)
#get commit transaction from Tangle
commited = self.API.find_transactions(bundles = [_bundleCommit])
commitedTrytes = self.API.get_trytes(hashes = commited["hashes"])
commitedTransaction = Transaction.from_tryte_string(trytes = commitedTrytes["trytes"][0])
#get commited message
commitedMessage = str(commitedTransaction.signature_message_fragment[ :128])
print "Commited Hash: " + str( commitedMessage )
print "Commited on: " + str(datetime.fromtimestamp( commitedTransaction.timestamp))
#check if commited message is equal message generated from revealed data
print "Is Equal to Commit: " + str( commitedMessage == ResultHash )
| 5,080 | 415 | 23 |
8647ff7e2ccc891de38e05613dc057b290d1ed18 | 32,540 | py | Python | tests/admin_changelist/tests.py | devops2014/djangosite | db77915c9fd35a203edd8206f702ee4082f04d4a | [
"BSD-3-Clause"
] | null | null | null | tests/admin_changelist/tests.py | devops2014/djangosite | db77915c9fd35a203edd8206f702ee4082f04d4a | [
"BSD-3-Clause"
] | null | null | null | tests/admin_changelist/tests.py | devops2014/djangosite | db77915c9fd35a203edd8206f702ee4082f04d4a | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils import formats, six
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, CustomPaginationAdmin,
CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, FilteredChildAdmin, GroupAdmin,
InvitationAdmin, NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin,
SwallowAdmin, site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, CustomIdUser, Event, Genre, Group,
Invitation, Membership, Musician, OrderedObject, Parent, Quartet, Swallow,
UnorderedObject,
)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_changelist.urls")
| 45.766526 | 164 | 0.653934 | from __future__ import unicode_literals
import datetime
from django.contrib import admin
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.templatetags.admin_list import pagination
from django.contrib.admin.tests import AdminSeleniumWebDriverTestCase
from django.contrib.admin.views.main import ALL_VAR, SEARCH_VAR, ChangeList
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.test import TestCase, override_settings
from django.test.client import RequestFactory
from django.utils import formats, six
from .admin import (
BandAdmin, ChildAdmin, ChordsBandAdmin, CustomPaginationAdmin,
CustomPaginator, DynamicListDisplayChildAdmin,
DynamicListDisplayLinksChildAdmin, DynamicListFilterChildAdmin,
DynamicSearchFieldsChildAdmin, FilteredChildAdmin, GroupAdmin,
InvitationAdmin, NoListDisplayLinksParentAdmin, ParentAdmin, QuartetAdmin,
SwallowAdmin, site as custom_site,
)
from .models import (
Band, Child, ChordsBand, ChordsMusician, CustomIdUser, Event, Genre, Group,
Invitation, Membership, Musician, OrderedObject, Parent, Quartet, Swallow,
UnorderedObject,
)
@override_settings(ROOT_URLCONF="admin_changelist.urls")
class ChangeListTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _create_superuser(self, username):
return User.objects.create(username=username, is_superuser=True)
def _mocked_authenticated_request(self, url, user):
request = self.factory.get(url)
request.user = user
return request
def test_select_related_preserved(self):
"""
Regression test for #10348: ChangeList.get_queryset() shouldn't
overwrite a custom select_related provided by ModelAdmin.get_queryset().
"""
m = ChildAdmin(Child, admin.site)
request = self.factory.get('/child/')
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
self.assertEqual(cl.queryset.query.select_related, {
'parent': {'name': {}}
})
def test_select_related_as_tuple(self):
ia = InvitationAdmin(Invitation, admin.site)
request = self.factory.get('/invitation/')
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
ia.list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, {'player': {}})
def test_select_related_as_empty_tuple(self):
ia = InvitationAdmin(Invitation, admin.site)
ia.list_select_related = ()
request = self.factory.get('/invitation/')
cl = ChangeList(request, Child, ia.list_display, ia.list_display_links,
ia.list_filter, ia.date_hierarchy, ia.search_fields,
ia.list_select_related, ia.list_per_page,
ia.list_max_show_all, ia.list_editable, ia)
self.assertEqual(cl.queryset.query.select_related, False)
def test_result_list_empty_changelist_value(self):
"""
Regression test for #14982: EMPTY_CHANGELIST_VALUE should be honored
for relationship fields
"""
new_child = Child.objects.create(name='name', parent=None)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">(None)</td></tr></tbody>' % link
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_html(self):
"""
Verifies that inclusion tag result_list generates a table when with
default ModelAdmin settings.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
cl = ChangeList(request, Child, list_display, list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.formset = None
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
link = reverse('admin:admin_changelist_child_change', args=(new_child.id,))
row_html = '<tbody><tr class="row1"><th class="field-name"><a href="%s">name</a></th><td class="field-parent nowrap">Parent object</td></tr></tbody>' % link
self.assertNotEqual(table_output.find(row_html), -1,
'Failed to find expected row element: %s' % table_output)
def test_result_list_editable_html(self):
"""
Regression tests for #11791: Inclusion tag result_list generates a
table and this checks that the items are nested within the table
element tags.
Also a regression test for #13599, verifies that hidden fields
when list_editable is enabled are rendered in a div outside the
table.
"""
new_parent = Parent.objects.create(name='parent')
new_child = Child.objects.create(name='name', parent=new_parent)
request = self.factory.get('/child/')
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
FormSet = m.get_changelist_formset(request)
cl.formset = FormSet(queryset=cl.result_list)
template = Template('{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}')
context = Context({'cl': cl})
table_output = template.render(context)
# make sure that hidden fields are in the correct place
hiddenfields_div = '<div class="hiddenfields"><input type="hidden" name="form-0-id" value="%d" id="id_form-0-id" /></div>' % new_child.id
self.assertInHTML(hiddenfields_div, table_output, msg_prefix='Failed to find hidden fields')
# make sure that list editable fields are rendered in divs correctly
editable_name_field = '<input name="form-0-name" value="name" class="vTextField" maxlength="30" type="text" id="id_form-0-name" />'
self.assertInHTML('<td class="field-name">%s</td>' % editable_name_field, table_output, msg_prefix='Failed to find "name" list_editable field')
def test_result_list_editable(self):
"""
Regression test for #14312: list_editable with pagination
"""
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/', data={'p': -1}) # Anything outside range
m = ChildAdmin(Child, admin.site)
# Test with list_editable fields
m.list_display = ['id', 'name', 'parent']
m.list_display_links = ['id']
m.list_editable = ['name']
self.assertRaises(IncorrectLookupParameters, lambda:
ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m))
def test_custom_paginator(self):
new_parent = Parent.objects.create(name='parent')
for i in range(200):
Child.objects.create(name='name %s' % i, parent=new_parent)
request = self.factory.get('/child/')
m = CustomPaginationAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
self.assertIsInstance(cl.paginator, CustomPaginator)
def test_distinct_for_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Basic ManyToMany.
"""
blues = Genre.objects.create(name='Blues')
band = Band.objects.create(name='B.B. King Review', nr_of_members=11)
band.genres.add(blues)
band.genres.add(blues)
m = BandAdmin(Band, admin.site)
request = self.factory.get('/band/', data={'genres': blues.pk})
cl = ChangeList(request, Band, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_through_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. With an intermediate model.
"""
lead = Musician.objects.create(name='Vox')
band = Group.objects.create(name='The Hype')
Membership.objects.create(group=band, music=lead, role='lead voice')
Membership.objects.create(group=band, music=lead, role='bass player')
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/', data={'members': lead.pk})
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Group instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_inherited_m2m_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Model managed in the
admin inherits from the one that defins the relationship.
"""
lead = Musician.objects.create(name='John')
four = Quartet.objects.create(name='The Beatles')
Membership.objects.create(group=four, music=lead, role='lead voice')
Membership.objects.create(group=four, music=lead, role='guitar player')
m = QuartetAdmin(Quartet, admin.site)
request = self.factory.get('/quartet/', data={'members': lead.pk})
cl = ChangeList(request, Quartet, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one Quartet instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_m2m_to_inherited_in_list_filter(self):
"""
Regression test for #13902: When using a ManyToMany in list_filter,
results shouldn't appear more than once. Target of the relationship
inherits from another.
"""
lead = ChordsMusician.objects.create(name='Player A')
three = ChordsBand.objects.create(name='The Chords Trio')
Invitation.objects.create(band=three, player=lead, instrument='guitar')
Invitation.objects.create(band=three, player=lead, instrument='bass')
m = ChordsBandAdmin(ChordsBand, admin.site)
request = self.factory.get('/chordsband/', data={'members': lead.pk})
cl = ChangeList(request, ChordsBand, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
cl.get_results(request)
# There's only one ChordsBand instance
self.assertEqual(cl.result_count, 1)
def test_distinct_for_non_unique_related_object_in_list_filter(self):
"""
Regressions tests for #15819: If a field listed in list_filters
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
# Two children with the same name
Child.objects.create(parent=parent, name='Daniel')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={'child__name': 'Daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_distinct_for_non_unique_related_object_in_search_fields(self):
"""
Regressions tests for #15819: If a field listed in search_fields
is a non-unique related object, distinct() must be called.
"""
parent = Parent.objects.create(name='Mary')
Child.objects.create(parent=parent, name='Danielle')
Child.objects.create(parent=parent, name='Daniel')
m = ParentAdmin(Parent, admin.site)
request = self.factory.get('/parent/', data={SEARCH_VAR: 'daniel'})
cl = ChangeList(request, Parent, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
# Make sure distinct() was called
self.assertEqual(cl.queryset.count(), 1)
def test_pagination(self):
"""
Regression tests for #12893: Pagination in admins changelist doesn't
use queryset set by modeladmin.
"""
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
request = self.factory.get('/child/')
# Test default queryset
m = ChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 60)
self.assertEqual(cl.paginator.count, 60)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3, 4, 5, 6])
# Test custom queryset
m = FilteredChildAdmin(Child, admin.site)
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, m.list_max_show_all,
m.list_editable, m)
self.assertEqual(cl.queryset.count(), 30)
self.assertEqual(cl.paginator.count, 30)
self.assertEqual(list(cl.paginator.page_range), [1, 2, 3])
def test_computed_list_display_localization(self):
"""
Regression test for #13196: output of functions should be localized
in the changelist.
"""
User.objects.create_superuser(
username='super', email='super@localhost', password='secret')
self.client.login(username='super', password='secret')
event = Event.objects.create(date=datetime.date.today())
response = self.client.get('/admin/admin_changelist/event/')
self.assertContains(response, formats.localize(event.date))
self.assertNotContains(response, six.text_type(event.date))
def test_dynamic_list_display(self):
"""
Regression tests for #14206: dynamic list_display support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertNotContains(response, 'Parent object')
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ['name', 'age'])
self.assertEqual(list_display_links, ['name'])
# Test with user 'parents'
m = DynamicListDisplayChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
custom_site.unregister(Child)
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['parent'])
# Test default implementation
custom_site.register(Child, ChildAdmin)
m = custom_site._registry[Child]
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertContains(response, 'Parent object')
def test_show_all(self):
parent = Parent.objects.create(name='anything')
for i in range(30):
Child.objects.create(name='name %s' % i, parent=parent)
Child.objects.create(name='filtered %s' % i, parent=parent)
# Add "show all" parameter to request
request = self.factory.get('/child/', data={ALL_VAR: ''})
# Test valid "show all" request (number of total objects is under max)
m = ChildAdmin(Child, admin.site)
# 200 is the max we'll pass to ChangeList
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 200, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 60)
# Test invalid "show all" request (number of total objects over max)
# falls back to paginated pages
m = ChildAdmin(Child, admin.site)
# 30 is the max we'll pass to ChangeList for this test
cl = ChangeList(request, Child, m.list_display, m.list_display_links,
m.list_filter, m.date_hierarchy, m.search_fields,
m.list_select_related, m.list_per_page, 30, m.list_editable, m)
cl.get_results(request)
self.assertEqual(len(cl.result_list), 10)
def test_dynamic_list_display_links(self):
"""
Regression tests for #16257: dynamic list_display_links support.
"""
parent = Parent.objects.create(name='parent')
for i in range(1, 10):
Child.objects.create(id=i, name='child %s' % i, parent=parent, age=i)
m = DynamicListDisplayLinksChildAdmin(Child, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/child/', superuser)
response = m.changelist_view(request)
for i in range(1, 10):
link = reverse('admin:admin_changelist_child_change', args=(i,))
self.assertContains(response, '<a href="%s">%s</a>' % (link, i))
list_display = m.get_list_display(request)
list_display_links = m.get_list_display_links(request, list_display)
self.assertEqual(list_display, ('parent', 'name', 'age'))
self.assertEqual(list_display_links, ['age'])
def test_no_list_display_links(self):
"""#15185 -- Allow no links from the 'change list' view grid."""
p = Parent.objects.create(name='parent')
m = NoListDisplayLinksParentAdmin(Parent, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/parent/', superuser)
response = m.changelist_view(request)
link = reverse('admin:admin_changelist_parent_change', args=(p.pk,))
self.assertNotContains(response, '<a href="%s">' % link)
def test_tuple_list_display(self):
"""
Regression test for #17128
(ChangeList failing under Python 2.5 after r16319)
"""
swallow = Swallow.objects.create(
origin='Africa', load='12.34', speed='22.2')
model_admin = SwallowAdmin(Swallow, admin.site)
superuser = self._create_superuser('superuser')
request = self._mocked_authenticated_request('/swallow/', superuser)
response = model_admin.changelist_view(request)
# just want to ensure it doesn't blow up during rendering
self.assertContains(response, six.text_type(swallow.origin))
self.assertContains(response, six.text_type(swallow.load))
self.assertContains(response, six.text_type(swallow.speed))
def test_deterministic_order_for_unordered_model(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model doesn't have any default ordering defined.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
UnorderedObject.objects.create(id=counter, bool=True)
class UnorderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(UnorderedObject, UnorderedObjectAdmin)
model_admin = UnorderedObjectAdmin(UnorderedObject, admin.site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/unorderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(UnorderedObject)
# When no order is defined at all, everything is ordered by '-pk'.
check_results_order()
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
UnorderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
UnorderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
UnorderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
UnorderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
UnorderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_deterministic_order_for_model_ordered_by_its_manager(self):
"""
Ensure that the primary key is systematically used in the ordering of
the changelist's results to guarantee a deterministic order, even
when the Model has a manager that defines a default ordering.
Refs #17198.
"""
superuser = self._create_superuser('superuser')
for counter in range(1, 51):
OrderedObject.objects.create(id=counter, bool=True, number=counter)
class OrderedObjectAdmin(admin.ModelAdmin):
list_per_page = 10
def check_results_order(ascending=False):
admin.site.register(OrderedObject, OrderedObjectAdmin)
model_admin = OrderedObjectAdmin(OrderedObject, admin.site)
counter = 0 if ascending else 51
for page in range(0, 5):
request = self._mocked_authenticated_request('/orderedobject/?p=%s' % page, superuser)
response = model_admin.changelist_view(request)
for result in response.context_data['cl'].result_list:
counter += 1 if ascending else -1
self.assertEqual(result.id, counter)
admin.site.unregister(OrderedObject)
# When no order is defined at all, use the model's default ordering (i.e. 'number')
check_results_order(ascending=True)
# When an order field is defined but multiple records have the same
# value for that field, make sure everything gets ordered by -pk as well.
OrderedObjectAdmin.ordering = ['bool']
check_results_order()
# When order fields are defined, including the pk itself, use them.
OrderedObjectAdmin.ordering = ['bool', '-pk']
check_results_order()
OrderedObjectAdmin.ordering = ['bool', 'pk']
check_results_order(ascending=True)
OrderedObjectAdmin.ordering = ['-id', 'bool']
check_results_order()
OrderedObjectAdmin.ordering = ['id', 'bool']
check_results_order(ascending=True)
def test_dynamic_list_filter(self):
"""
Regression tests for ticket #17646: dynamic list_filter support.
"""
parent = Parent.objects.create(name='parent')
for i in range(10):
Child.objects.create(name='child %s' % i, parent=parent)
user_noparents = self._create_superuser('noparents')
user_parents = self._create_superuser('parents')
# Test with user 'noparents'
m = DynamicListFilterChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_noparents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ['name', 'age'])
# Test with user 'parents'
m = DynamicListFilterChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', user_parents)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].list_filter, ('parent', 'name', 'age'))
def test_dynamic_search_fields(self):
child = self._create_superuser('child')
m = DynamicSearchFieldsChildAdmin(Child, admin.site)
request = self._mocked_authenticated_request('/child/', child)
response = m.changelist_view(request)
self.assertEqual(response.context_data['cl'].search_fields, ('name', 'age'))
def test_pagination_page_range(self):
"""
Regression tests for ticket #15653: ensure the number of pages
generated for changelist views are correct.
"""
# instantiating and setting up ChangeList object
m = GroupAdmin(Group, admin.site)
request = self.factory.get('/group/')
cl = ChangeList(request, Group, m.list_display,
m.list_display_links, m.list_filter, m.date_hierarchy,
m.search_fields, m.list_select_related, m.list_per_page,
m.list_max_show_all, m.list_editable, m)
per_page = cl.list_per_page = 10
for page_num, objects_count, expected_page_range in [
(0, per_page, []),
(0, per_page * 2, list(range(2))),
(5, per_page * 11, list(range(11))),
(5, per_page * 12, [0, 1, 2, 3, 4, 5, 6, 7, 8, '.', 10, 11]),
(6, per_page * 12, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, 10, 11]),
(6, per_page * 13, [0, 1, '.', 3, 4, 5, 6, 7, 8, 9, '.', 11, 12]),
]:
# assuming we have exactly `objects_count` objects
Group.objects.all().delete()
for i in range(objects_count):
Group.objects.create(name='test band')
# setting page number and calculating page range
cl.page_num = page_num
cl.get_results(request)
real_page_range = pagination(cl)['page_range']
self.assertListEqual(
expected_page_range,
list(real_page_range),
)
class AdminLogNodeTestCase(TestCase):
def test_get_admin_log_templatetag_custom_user(self):
"""
Regression test for ticket #20088: admin log depends on User model
having id field as primary key.
The old implementation raised an AttributeError when trying to use
the id field.
"""
context = Context({'user': CustomIdUser()})
template_string = '{% load log %}{% get_admin_log 10 as admin_log for_user user %}'
template = Template(template_string)
# Rendering should be u'' since this templatetag just logs,
# it doesn't render any string.
self.assertEqual(template.render(context), '')
@override_settings(PASSWORD_HASHERS=['django.contrib.auth.hashers.SHA1PasswordHasher'],
ROOT_URLCONF="admin_changelist.urls")
class SeleniumFirefoxTests(AdminSeleniumWebDriverTestCase):
available_apps = ['admin_changelist'] + AdminSeleniumWebDriverTestCase.available_apps
fixtures = ['users.json']
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
def test_add_row_selection(self):
"""
Ensure that the status line for selected rows gets updated correcly (#22038)
"""
self.admin_login(username='super', password='secret')
self.selenium.get('%s%s' % (self.live_server_url,
'/admin/auth/user/'))
form_id = '#changelist-form'
# Test amount of rows in the Changelist
rows = self.selenium.find_elements_by_css_selector(
'%s #result_list tbody tr' % form_id)
self.assertEqual(len(rows), 1)
# Test current selection
selection_indicator = self.selenium.find_element_by_css_selector(
'%s .action-counter' % form_id)
self.assertEqual(selection_indicator.text, "0 of 1 selected")
# Select a row and check again
row_selector = self.selenium.find_element_by_css_selector(
'%s #result_list tbody tr:first-child .action-select' % form_id)
row_selector.click()
self.assertEqual(selection_indicator.text, "1 of 1 selected")
class SeleniumChromeTests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class SeleniumIETests(SeleniumFirefoxTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| 4,826 | 26,150 | 113 |
db0f8ab101b84a60021a859ef789132b6c6130ab | 4,723 | py | Python | emukit/examples/multi_fidelity_dgp/baseline_model_wrappers.py | ndalchau/emukit | eb6754ea016a7cd82b275bb4075676b5ed662634 | [
"Apache-2.0"
] | 152 | 2020-10-24T13:12:57.000Z | 2022-03-25T11:35:41.000Z | emukit/examples/multi_fidelity_dgp/baseline_model_wrappers.py | Tony-Chiong/emukit | a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a | [
"Apache-2.0"
] | 87 | 2020-10-26T10:29:25.000Z | 2022-03-04T11:17:59.000Z | emukit/examples/multi_fidelity_dgp/baseline_model_wrappers.py | Tony-Chiong/emukit | a068c8d5e06b2ae8b038f67bf2e4f66c4d91651a | [
"Apache-2.0"
] | 41 | 2020-10-24T11:59:21.000Z | 2022-03-22T17:08:30.000Z | """
These are emukit model wrappers that contain the specific optimization procedures we found worked well for each model.
The constructor for each model takes X and Y as lists, with each entry of the list corresponding to data for a fidelity
"""
import logging
import GPy
import numpy as np
from ...core.interfaces import IModel
from ...model_wrappers import GPyMultiOutputWrapper
from ...multi_fidelity.convert_lists_to_array import convert_xy_lists_to_arrays
from ...multi_fidelity.kernels import LinearMultiFidelityKernel
from ...multi_fidelity.models import GPyLinearMultiFidelityModel
from ...multi_fidelity.models.non_linear_multi_fidelity_model import (
NonLinearMultiFidelityModel, make_non_linear_kernels)
_log = logging.getLogger(__name__)
class HighFidelityGp(IModel):
"""
GP at high fidelity only.
The optimization is restarted from random initial points 10 times.
The noise parameter is initialized at 1e-6 for the first optimization round.
"""
def predict(self, X):
"""
Predict from high fidelity
"""
return self.model.predict(X[:, :-1])
@property
@property
class LinearAutoRegressiveModel(IModel):
"""
Linear model, AR1 in paper. Optimized with noise fixed at 1e-6 until convergence then the noise parameter is freed
and the model is optimized again
"""
def __init__(self, X, Y, n_restarts=10):
"""
:param X: List of training data at each fidelity
:param Y: List of training targets at each fidelity
:param n_restarts: Number of restarts during optimization of hyper-parameters
"""
x_train, y_train = convert_xy_lists_to_arrays(X, Y)
n_dims = X[0].shape[1]
kernels = [GPy.kern.RBF(n_dims, ARD=True) for _ in range(len(X))]
lin_mf_kernel = LinearMultiFidelityKernel(kernels)
gpy_lin_mf_model = GPyLinearMultiFidelityModel(x_train, y_train, lin_mf_kernel, n_fidelities=len(X))
gpy_lin_mf_model.mixed_noise.Gaussian_noise.fix(1e-6)
gpy_lin_mf_model.mixed_noise.Gaussian_noise_1.fix(1e-6)
if len(Y) == 3:
gpy_lin_mf_model.mixed_noise.Gaussian_noise_2.fix(1e-6)
self.model = GPyMultiOutputWrapper(gpy_lin_mf_model, len(X), n_optimization_restarts=n_restarts)
self.name = 'ar1'
self.n_fidelities = len(X)
def predict(self, X):
"""
Predict from high fidelity
"""
return self.model.predict(X)
@property
@property
class NonLinearAutoRegressiveModel(IModel):
"""
Non-linear model, NARGP in paper
"""
def predict(self, X):
"""
Predict from high fidelity
"""
return self.model.predict(X)
@property
@property
| 33.027972 | 119 | 0.665043 | """
These are emukit model wrappers that contain the specific optimization procedures we found worked well for each model.
The constructor for each model takes X and Y as lists, with each entry of the list corresponding to data for a fidelity
"""
import logging
import GPy
import numpy as np
from ...core.interfaces import IModel
from ...model_wrappers import GPyMultiOutputWrapper
from ...multi_fidelity.convert_lists_to_array import convert_xy_lists_to_arrays
from ...multi_fidelity.kernels import LinearMultiFidelityKernel
from ...multi_fidelity.models import GPyLinearMultiFidelityModel
from ...multi_fidelity.models.non_linear_multi_fidelity_model import (
NonLinearMultiFidelityModel, make_non_linear_kernels)
_log = logging.getLogger(__name__)
class HighFidelityGp(IModel):
"""
GP at high fidelity only.
The optimization is restarted from random initial points 10 times.
The noise parameter is initialized at 1e-6 for the first optimization round.
"""
def __init__(self, X, Y):
kern = GPy.kern.RBF(X[1].shape[1], ARD=True)
self.model = GPy.models.GPRegression(X[1], Y[1], kernel=kern)
self.model.Gaussian_noise.variance = 1e-6
self.name = 'hf_gp'
def optimize(self):
_log.info('\n--- Optimization: ---\n'.format(self.name))
self.model.optimize_restarts(10, robust=True)
def predict(self, X):
"""
Predict from high fidelity
"""
return self.model.predict(X[:, :-1])
def set_data(self, X: np.ndarray, Y: np.ndarray) -> None:
raise NotImplementedError()
@property
def X(self):
raise NotImplementedError()
@property
def Y(self):
raise NotImplementedError()
class LinearAutoRegressiveModel(IModel):
"""
Linear model, AR1 in paper. Optimized with noise fixed at 1e-6 until convergence then the noise parameter is freed
and the model is optimized again
"""
def __init__(self, X, Y, n_restarts=10):
"""
:param X: List of training data at each fidelity
:param Y: List of training targets at each fidelity
:param n_restarts: Number of restarts during optimization of hyper-parameters
"""
x_train, y_train = convert_xy_lists_to_arrays(X, Y)
n_dims = X[0].shape[1]
kernels = [GPy.kern.RBF(n_dims, ARD=True) for _ in range(len(X))]
lin_mf_kernel = LinearMultiFidelityKernel(kernels)
gpy_lin_mf_model = GPyLinearMultiFidelityModel(x_train, y_train, lin_mf_kernel, n_fidelities=len(X))
gpy_lin_mf_model.mixed_noise.Gaussian_noise.fix(1e-6)
gpy_lin_mf_model.mixed_noise.Gaussian_noise_1.fix(1e-6)
if len(Y) == 3:
gpy_lin_mf_model.mixed_noise.Gaussian_noise_2.fix(1e-6)
self.model = GPyMultiOutputWrapper(gpy_lin_mf_model, len(X), n_optimization_restarts=n_restarts)
self.name = 'ar1'
self.n_fidelities = len(X)
def predict(self, X):
"""
Predict from high fidelity
"""
return self.model.predict(X)
def optimize(self):
_log.info('\n--- Optimization: ---\n'.format(self.name))
self.model.optimize()
self.model.gpy_model.mixed_noise.Gaussian_noise.unfix()
self.model.gpy_model.mixed_noise.Gaussian_noise_1.unfix()
if self.n_fidelities == 3:
self.model.gpy_model.mixed_noise.Gaussian_noise_2.unfix()
self.model.optimize()
def set_data(self, X: np.ndarray, Y: np.ndarray) -> None:
raise NotImplementedError()
@property
def X(self):
raise NotImplementedError()
@property
def Y(self):
raise NotImplementedError()
class NonLinearAutoRegressiveModel(IModel):
"""
Non-linear model, NARGP in paper
"""
def __init__(self, X, Y, n_restarts=10):
x_train, y_train = convert_xy_lists_to_arrays(X, Y)
base_kernel = GPy.kern.RBF
kernels = make_non_linear_kernels(base_kernel, len(X), x_train.shape[1] - 1, ARD=True)
self.model = NonLinearMultiFidelityModel(x_train, y_train, n_fidelities=len(X), kernels=kernels,
verbose=True, optimization_restarts=n_restarts)
self.name = 'nargp'
def predict(self, X):
"""
Predict from high fidelity
"""
return self.model.predict(X)
def optimize(self):
_log.info('\n--- Optimization: ---\n'.format(self.name))
self.model.optimize()
def set_data(self, X: np.ndarray, Y: np.ndarray) -> None:
raise NotImplementedError()
@property
def X(self):
raise NotImplementedError()
@property
def Y(self):
raise NotImplementedError()
| 1,590 | 0 | 371 |
43156f04320249b2999a07df3fb80e4552695395 | 1,278 | py | Python | aztk_cli/spark/endpoints/job/delete.py | lachiemurray/aztk | 8d00a2c444313e77b6b0662f8287fcd9fd67898c | [
"MIT"
] | null | null | null | aztk_cli/spark/endpoints/job/delete.py | lachiemurray/aztk | 8d00a2c444313e77b6b0662f8287fcd9fd67898c | [
"MIT"
] | null | null | null | aztk_cli/spark/endpoints/job/delete.py | lachiemurray/aztk | 8d00a2c444313e77b6b0662f8287fcd9fd67898c | [
"MIT"
] | null | null | null | import argparse
import typing
import aztk.spark
from aztk_cli import log, config
| 33.631579 | 100 | 0.611111 | import argparse
import typing
import aztk.spark
from aztk_cli import log, config
def setup_parser(parser: argparse.ArgumentParser):
parser.add_argument('--id',
dest='job_id',
required=True,
help='The unique id of your AZTK Job')
parser.add_argument('--force',
dest='force',
required=False,
action='store_true',
help='Do not prompt for confirmation, force deletion of cluster.')
parser.set_defaults(force=False)
def execute(args: typing.NamedTuple):
spark_client = aztk.spark.Client(config.load_aztk_secrets())
job_id = args.job_id
if not args.force:
# check if job exists before prompting for confirmation
spark_client.get_job(job_id)
confirmation_cluster_id = input("Please confirm the id of the cluster you wish to delete: ")
if confirmation_cluster_id != job_id:
log.error("Confirmation cluster id does not match. Please try again.")
return
if spark_client.delete_job(job_id):
log.info("Deleting Job %s", job_id)
else:
log.error("Job with id '%s' doesn't exist or was already deleted.", job_id)
| 1,149 | 0 | 46 |
dea20d08c65d75f519b5060b937d68920ccbea77 | 8,174 | py | Python | masp/spherical_array_processing/evaluate_sht_filters.py | andresperezlopez/masp | c6385b6635b5e86233152ccfea2df15caee6acc7 | [
"BSD-3-Clause"
] | 19 | 2020-06-07T10:58:11.000Z | 2022-02-10T08:48:15.000Z | masp/spherical_array_processing/evaluate_sht_filters.py | andresperezlopez/masp | c6385b6635b5e86233152ccfea2df15caee6acc7 | [
"BSD-3-Clause"
] | null | null | null | masp/spherical_array_processing/evaluate_sht_filters.py | andresperezlopez/masp | c6385b6635b5e86233152ccfea2df15caee6acc7 | [
"BSD-3-Clause"
] | 5 | 2020-06-29T07:12:03.000Z | 2021-11-06T12:25:47.000Z | # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Copyright (c) 2019, Eurecat / UPF
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# @file evaluate_sht_filters.py
# @author Andrés Pérez-López
# @date 01/10/2019
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import numpy as np
import matplotlib.pyplot as plt
from masp.validate_data_types import _validate_int, _validate_ndarray_1D, \
_validate_ndarray_2D, _validate_ndarray_3D, _validate_boolean, _validate_number
def evaluate_sht_filters(M_mic2sh, H_array, fs, Y_grid, w_grid=None, plot=False):
"""
Evaluate frequency-dependent performance of SHT filters.
Parameters
----------
M_mic2sh : ndarray
SHT filtering matrix produced by one of the methods included in the library.
Dimension = ( (order+1)^2, nMics, nBins ).
H_array : ndarray, dtype = 'complex'
Modeled or measured spherical array responses in a dense grid of `nGrid` directions.
Dimension = ( nBins, nMics, nGrid ).
fs : int
Target sampling rate.
Y_grid : ndarray
Spherical harmonics matrix for the `nGrid` directions of the evaluation grid.
Dimension = ( nGrid, (order+1)^2 ).
w_grid : ndarray, optional
Vector of integration weights for the grid points.
Dimension = ( nGrid ).
plot : bool, optional
Plot responses. Default to false.
Returns
-------
cSH : ndarray, dtype = 'complex'
Spatial correlation coefficient, for each SHT order and frequency bin.
Dimension = ( nBins, order+1 ).
lSH : ndarray
Level difference, for each SHT order, for each SHT order and frequency bin.
Dimension = ( nBins, order+1 ).
WNG : ndarray
Maximum amplification of all output SH components.
Dimension = ( nBins ).
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
Notes
-----
The SHT filters can be evaluated in terms of how ideal are the SH
components that they generate. The evaluation here follows the metrics
introduced in
Moreau, S., Daniel, J., Bertet, S., 2006,
`3D sound field recording with higher order ambisonics-objectiv
measurements and validation of spherical microphone.`
In Audio Engineering Society Convention 120.
These are a) the spatial correlation coefficient between each ideal
spherical harmonic and the reconstructed pattern, evaluated at a dense
grid of directions, b) level difference between the mean spatial power
of the reconstructed pattern (diffuse power) over the one from an ideal
SH component. Ideally, correlaiton should be close to one, and the
level difference should be close to 0dB.
Additionally, the maximum amplification of all output SH components is
evaluated, through the maximum eigenvalue of the filtering matrix.
Due to the matrix nature of computations,
the minimum valid value for `nMics` and `nGrid` is 2.
"""
_validate_ndarray_3D('M_mic2sh', M_mic2sh)
n_sh = M_mic2sh.shape[0]
order_sht = int(np.sqrt(n_sh) - 1)
nMics = M_mic2sh.shape[1]
_validate_number('nMics', nMics, limit=[2, np.inf])
nBins = M_mic2sh.shape[2]
_validate_ndarray_3D('H_array', H_array, shape0=nBins, shape1=nMics)
nGrid = H_array.shape[2]
_validate_number('nGrid', nGrid, limit=[2, np.inf])
_validate_ndarray_2D('Y_grid', Y_grid, shape0=nGrid, shape1=n_sh)
if w_grid is None:
w_grid = 1/nGrid*np.ones(nGrid)
_validate_ndarray_1D('w_grid', w_grid, size=nGrid)
_validate_int('fs', fs, positive=True)
if plot is not None:
_validate_boolean('plot', plot)
nFFT = 2 * (nBins - 1)
f = np.arange(nFFT // 2 + 1) * fs / nFFT
# Compute spatial correlations and integrated level difference between
# ideal and reconstructed harmonics
cSH = np.empty((nBins, order_sht+1), dtype='complex')
lSH = np.empty((nBins, order_sht+1))
# rSH = np.empty((nBins, order_sht+1))
for kk in range(nBins):
H_kk = H_array[kk,:,:]
y_recon_kk = np.matmul(M_mic2sh[:,:, kk], H_kk)
for n in range(order_sht+1):
cSH_n = 0 # spatial correlation (mean per order)
lSH_n = 0 # diffuse level difference (mean per order)
# rSH_n = 0 # mean level difference (mean per order)
for m in range(-n, n+1):
q = np.power(n, 2) + n + m
y_recon_nm = y_recon_kk[q,:].T
y_ideal_nm = Y_grid[:, q]
cSH_nm = np.matmul((y_recon_nm * w_grid).conj(), y_ideal_nm) / np.sqrt( np.matmul((y_recon_nm*w_grid).conj(), y_recon_nm ))
cSH_n = cSH_n + cSH_nm
lSH_nm = np.real(np.matmul((y_recon_nm * w_grid).conj(), y_recon_nm ))
lSH_n = lSH_n + lSH_nm
# rSH_nm = np.sum(np.power(np.abs(y_recon_nm - y_ideal_nm), 2) * w_grid)
# rSH_n = rSH_n + rSH_nm;
cSH[kk, n] = cSH_n / (2 * n + 1)
lSH[kk, n] = lSH_n / (2 * n + 1)
# rSH[kk, n] = rSH_n / (2 * n + 1)
# Maximum noise amplification of all filters in matrix
WNG = np.empty(nBins)
for kk in range(nBins):
# TODO: Matlab implementation warns when M matrix is complex, e.g. TEST_SCRIPTS l. 191-199
# Avoid ComplexWarning: imaginary parts appearing due to numerical precission
eigM = np.real(np.linalg.eigvals(np.matmul(M_mic2sh[:,:,kk].T.conj(), M_mic2sh[:,:,kk])))
WNG[kk] = np.max(eigM)
# Plots
if plot:
str_legend = [None]*(order_sht+1)
for n in range(order_sht+1):
str_legend[n] = str(n)
plt.figure()
plt.subplot(311)
plt.semilogx(f, np.abs(cSH))
plt.grid()
plt.legend(str_legend)
plt.axis([50, 20000, 0, 1])
plt.title('Spatial correlation')
plt.subplot(312)
plt.semilogx(f, 10 * np.log10(lSH))
plt.grid()
plt.legend(str_legend)
plt.axis([50, 20000, -30, 10])
plt.title('Level correlation')
plt.subplot(313)
plt.semilogx(f, 10 * np.log10(WNG))
plt.grid()
plt.xlim([50, 20000])
plt.title('Maximum amplification')
plt.xlabel('Frequency (Hz)')
# plt.subplot(414)
# plt.semilogx(f, 10 * np.log10(rSH))
# plt.grid()
# plt.xlim([50, 20000])
# plt.title('MSE')
# plt.xlabel('Frequency (Hz)')
plt.show()
return cSH, lSH, WNG
| 40.465347 | 139 | 0.630536 | # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# Copyright (c) 2019, Eurecat / UPF
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
#
# @file evaluate_sht_filters.py
# @author Andrés Pérez-López
# @date 01/10/2019
#
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
import numpy as np
import matplotlib.pyplot as plt
from masp.validate_data_types import _validate_int, _validate_ndarray_1D, \
_validate_ndarray_2D, _validate_ndarray_3D, _validate_boolean, _validate_number
def evaluate_sht_filters(M_mic2sh, H_array, fs, Y_grid, w_grid=None, plot=False):
"""
Evaluate frequency-dependent performance of SHT filters.
Parameters
----------
M_mic2sh : ndarray
SHT filtering matrix produced by one of the methods included in the library.
Dimension = ( (order+1)^2, nMics, nBins ).
H_array : ndarray, dtype = 'complex'
Modeled or measured spherical array responses in a dense grid of `nGrid` directions.
Dimension = ( nBins, nMics, nGrid ).
fs : int
Target sampling rate.
Y_grid : ndarray
Spherical harmonics matrix for the `nGrid` directions of the evaluation grid.
Dimension = ( nGrid, (order+1)^2 ).
w_grid : ndarray, optional
Vector of integration weights for the grid points.
Dimension = ( nGrid ).
plot : bool, optional
Plot responses. Default to false.
Returns
-------
cSH : ndarray, dtype = 'complex'
Spatial correlation coefficient, for each SHT order and frequency bin.
Dimension = ( nBins, order+1 ).
lSH : ndarray
Level difference, for each SHT order, for each SHT order and frequency bin.
Dimension = ( nBins, order+1 ).
WNG : ndarray
Maximum amplification of all output SH components.
Dimension = ( nBins ).
Raises
-----
TypeError, ValueError: if method arguments mismatch in type, dimension or value.
Notes
-----
The SHT filters can be evaluated in terms of how ideal are the SH
components that they generate. The evaluation here follows the metrics
introduced in
Moreau, S., Daniel, J., Bertet, S., 2006,
`3D sound field recording with higher order ambisonics-objectiv
measurements and validation of spherical microphone.`
In Audio Engineering Society Convention 120.
These are a) the spatial correlation coefficient between each ideal
spherical harmonic and the reconstructed pattern, evaluated at a dense
grid of directions, b) level difference between the mean spatial power
of the reconstructed pattern (diffuse power) over the one from an ideal
SH component. Ideally, correlaiton should be close to one, and the
level difference should be close to 0dB.
Additionally, the maximum amplification of all output SH components is
evaluated, through the maximum eigenvalue of the filtering matrix.
Due to the matrix nature of computations,
the minimum valid value for `nMics` and `nGrid` is 2.
"""
_validate_ndarray_3D('M_mic2sh', M_mic2sh)
n_sh = M_mic2sh.shape[0]
order_sht = int(np.sqrt(n_sh) - 1)
nMics = M_mic2sh.shape[1]
_validate_number('nMics', nMics, limit=[2, np.inf])
nBins = M_mic2sh.shape[2]
_validate_ndarray_3D('H_array', H_array, shape0=nBins, shape1=nMics)
nGrid = H_array.shape[2]
_validate_number('nGrid', nGrid, limit=[2, np.inf])
_validate_ndarray_2D('Y_grid', Y_grid, shape0=nGrid, shape1=n_sh)
if w_grid is None:
w_grid = 1/nGrid*np.ones(nGrid)
_validate_ndarray_1D('w_grid', w_grid, size=nGrid)
_validate_int('fs', fs, positive=True)
if plot is not None:
_validate_boolean('plot', plot)
nFFT = 2 * (nBins - 1)
f = np.arange(nFFT // 2 + 1) * fs / nFFT
# Compute spatial correlations and integrated level difference between
# ideal and reconstructed harmonics
cSH = np.empty((nBins, order_sht+1), dtype='complex')
lSH = np.empty((nBins, order_sht+1))
# rSH = np.empty((nBins, order_sht+1))
for kk in range(nBins):
H_kk = H_array[kk,:,:]
y_recon_kk = np.matmul(M_mic2sh[:,:, kk], H_kk)
for n in range(order_sht+1):
cSH_n = 0 # spatial correlation (mean per order)
lSH_n = 0 # diffuse level difference (mean per order)
# rSH_n = 0 # mean level difference (mean per order)
for m in range(-n, n+1):
q = np.power(n, 2) + n + m
y_recon_nm = y_recon_kk[q,:].T
y_ideal_nm = Y_grid[:, q]
cSH_nm = np.matmul((y_recon_nm * w_grid).conj(), y_ideal_nm) / np.sqrt( np.matmul((y_recon_nm*w_grid).conj(), y_recon_nm ))
cSH_n = cSH_n + cSH_nm
lSH_nm = np.real(np.matmul((y_recon_nm * w_grid).conj(), y_recon_nm ))
lSH_n = lSH_n + lSH_nm
# rSH_nm = np.sum(np.power(np.abs(y_recon_nm - y_ideal_nm), 2) * w_grid)
# rSH_n = rSH_n + rSH_nm;
cSH[kk, n] = cSH_n / (2 * n + 1)
lSH[kk, n] = lSH_n / (2 * n + 1)
# rSH[kk, n] = rSH_n / (2 * n + 1)
# Maximum noise amplification of all filters in matrix
WNG = np.empty(nBins)
for kk in range(nBins):
# TODO: Matlab implementation warns when M matrix is complex, e.g. TEST_SCRIPTS l. 191-199
# Avoid ComplexWarning: imaginary parts appearing due to numerical precission
eigM = np.real(np.linalg.eigvals(np.matmul(M_mic2sh[:,:,kk].T.conj(), M_mic2sh[:,:,kk])))
WNG[kk] = np.max(eigM)
# Plots
if plot:
str_legend = [None]*(order_sht+1)
for n in range(order_sht+1):
str_legend[n] = str(n)
plt.figure()
plt.subplot(311)
plt.semilogx(f, np.abs(cSH))
plt.grid()
plt.legend(str_legend)
plt.axis([50, 20000, 0, 1])
plt.title('Spatial correlation')
plt.subplot(312)
plt.semilogx(f, 10 * np.log10(lSH))
plt.grid()
plt.legend(str_legend)
plt.axis([50, 20000, -30, 10])
plt.title('Level correlation')
plt.subplot(313)
plt.semilogx(f, 10 * np.log10(WNG))
plt.grid()
plt.xlim([50, 20000])
plt.title('Maximum amplification')
plt.xlabel('Frequency (Hz)')
# plt.subplot(414)
# plt.semilogx(f, 10 * np.log10(rSH))
# plt.grid()
# plt.xlim([50, 20000])
# plt.title('MSE')
# plt.xlabel('Frequency (Hz)')
plt.show()
return cSH, lSH, WNG
| 0 | 0 | 0 |
ec129eab7264ce4762df078524cdc7e14a10fc29 | 483 | py | Python | backend/flask_app/getData.py | avtansh-code/traffic-congestion | 5bdab6e64fd45ba17eabf18c846cc51e4b3e45fc | [
"MIT"
] | 3 | 2018-04-12T21:10:56.000Z | 2021-01-14T07:14:43.000Z | backend/flask_app/getData.py | avtansh-code/traffic-congestion | 5bdab6e64fd45ba17eabf18c846cc51e4b3e45fc | [
"MIT"
] | null | null | null | backend/flask_app/getData.py | avtansh-code/traffic-congestion | 5bdab6e64fd45ba17eabf18c846cc51e4b3e45fc | [
"MIT"
] | 2 | 2019-10-06T06:38:26.000Z | 2020-12-29T05:06:33.000Z | import pandas as pd
import sys
import math
import requests | 40.25 | 161 | 0.714286 | import pandas as pd
import sys
import math
import requests
def getData():
my_url = "https://firebasestorage.googleapis.com/v0/b/traffic-predictor-233145.appspot.com/o/output.csv?alt=media&token=9b79b904-17ff-4fd0-9637-55844ef9cdf2"
r = requests.get(my_url, allow_redirects=True)
open('output.csv', 'wb').write(r.content)
data = pd.read_csv("output.csv")
data = data[['Location', 'CurrSpeed', 'NormSpeed', 'Date', 'Hour', 'Congestion', 'Weekday']]
return data | 402 | 0 | 23 |
07c09b4f3e610e9e9acb328dbcd7cb6d5ad95305 | 707 | py | Python | linkedlist/first_cyclic_node.py | AnshulPatni/Algorithms | c8bcfb86d50f68041921e5140f01821ac12d9521 | [
"MIT"
] | 2 | 2018-04-30T19:31:04.000Z | 2018-05-05T14:29:45.000Z | linkedlist/first_cyclic_node.py | AnshulPatni/Algorithms | c8bcfb86d50f68041921e5140f01821ac12d9521 | [
"MIT"
] | null | null | null | linkedlist/first_cyclic_node.py | AnshulPatni/Algorithms | c8bcfb86d50f68041921e5140f01821ac12d9521 | [
"MIT"
] | 1 | 2018-04-16T12:45:49.000Z | 2018-04-16T12:45:49.000Z | """
Given a linked list, find the first node of a cycle in it.
1 -> 2 -> 3 -> 4 -> 5 -> 1 => 1
A -> B -> C -> D -> E -> C => C
Note: The solution is a direct implementation
Floyd's cycle-finding algorithm (Floyd's Tortoise and Hare).
"""
def firstCyclicNode(head):
"""
:type head: Node
:rtype: Node
"""
runner = walker = head
while runner and runner.next:
runner = runner.next.next
walker = walker.next
if runner is walker:
break
if runner is None or runner.next is None:
return None
walker = head
while runner is not walker:
runner, walker = runner.next, walker.next
return runner
| 23.566667 | 70 | 0.5686 | """
Given a linked list, find the first node of a cycle in it.
1 -> 2 -> 3 -> 4 -> 5 -> 1 => 1
A -> B -> C -> D -> E -> C => C
Note: The solution is a direct implementation
Floyd's cycle-finding algorithm (Floyd's Tortoise and Hare).
"""
def firstCyclicNode(head):
"""
:type head: Node
:rtype: Node
"""
runner = walker = head
while runner and runner.next:
runner = runner.next.next
walker = walker.next
if runner is walker:
break
if runner is None or runner.next is None:
return None
walker = head
while runner is not walker:
runner, walker = runner.next, walker.next
return runner
| 0 | 0 | 0 |
569b8568f568c9cc9c98f203a6144f0b659dd00e | 2,453 | py | Python | Arduino Robot/PC_Clients/PythonRobotController/RESTPublishClient/RESTClient.py | wso2-incubator/device-cloud-appliances | c91229cede8f0a302446a4ad0aaba7cfd86898cc | [
"Apache-2.0"
] | null | null | null | Arduino Robot/PC_Clients/PythonRobotController/RESTPublishClient/RESTClient.py | wso2-incubator/device-cloud-appliances | c91229cede8f0a302446a4ad0aaba7cfd86898cc | [
"Apache-2.0"
] | null | null | null | Arduino Robot/PC_Clients/PythonRobotController/RESTPublishClient/RESTClient.py | wso2-incubator/device-cloud-appliances | c91229cede8f0a302446a4ad0aaba7cfd86898cc | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
import serial
import time
import requests
import datetime
import thread
import time
bluetoothSerial = serial.Serial( "/dev/tty.HC-06-DevB", baudrate=9600 )
serverIP = "localhost"
serverPort = "8080"
publisherEndpoint = "/ConnectedDevices/pushdata"
#"/pushdata/{ip}/{owner}/{type}/{mac}/{time}/{pin}/{value}")
deviceIP = "/192.168.1.999"
deviceOwner = "/SMEAN"
deviceType = "/ArduinoUNO"
deviceMAC = "/98:D3:31:80:38:D3"
publisherEndpoint = "http://" + serverIP + ":" + serverPort + publisherEndpoint + deviceIP + deviceOwner + deviceType + deviceMAC + "/"
import termios, fcntl, sys, os
if __name__=='__main__':
main()
| 22.504587 | 135 | 0.614757 | #! /usr/bin/python
import serial
import time
import requests
import datetime
import thread
import time
bluetoothSerial = serial.Serial( "/dev/tty.HC-06-DevB", baudrate=9600 )
serverIP = "localhost"
serverPort = "8080"
publisherEndpoint = "/ConnectedDevices/pushdata"
#"/pushdata/{ip}/{owner}/{type}/{mac}/{time}/{pin}/{value}")
deviceIP = "/192.168.1.999"
deviceOwner = "/SMEAN"
deviceType = "/ArduinoUNO"
deviceMAC = "/98:D3:31:80:38:D3"
publisherEndpoint = "http://" + serverIP + ":" + serverPort + publisherEndpoint + deviceIP + deviceOwner + deviceType + deviceMAC + "/"
import termios, fcntl, sys, os
class _Getch:
def __call__(self):
fd = sys.stdin.fileno()
oldterm = termios.tcgetattr(fd)
newattr = termios.tcgetattr(fd)
newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO
termios.tcsetattr(fd, termios.TCSANOW, newattr)
try:
ch1 = sys.stdin.read(1)
if(ch1=='\x1b'):
ch2 = sys.stdin.read(1)
ch3 = sys.stdin.read(1)
ch=ch1+ch2+ch3
else: ch =ch1
finally:
termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)
return ch
def get():
inkey = _Getch()
while(1):
k=inkey()
#print k
if k==None:break
if k!='':break
if k=='\x1b[A':
return 1
elif k=='\x1b[B':
return 2
elif k=='\x1b[D':
return 3
elif k=='\x1b[C':
return 4
else:
return 5;
def getControl( threadName, delay):
while(1):
motion=get()
print motion
bluetoothSerial.write("{0}".format( motion))
def main():
bluetoothSerial.write("6"); # simple approximate time sync-- assumed latency is negligible considering sensor information
global time
ts = time.time()
lines = bluetoothSerial.readline()
print lines
try:
print "Started waiting for time sync"
thread.start_new_thread( getControl, ("Thread", 2, ) )
except:
print "Error: unable to start thread"
while True:
lines = bluetoothSerial.readline()
#print lines+"\n"
sensorData=lines.split(',')
for line in sensorData:
line = line.split(':')
sensor = line[0]
value = line[1]
time = ts+float(line[2])
currentResource = publisherEndpoint + str(long(round(time*1000)))+"/"+sensor + "/" + value
#print currentResource
r = requests.post(currentResource)
#print(r.text)
if __name__=='__main__':
main()
| 1,680 | -8 | 117 |
556b37bde6ed7dc092a133d1fa97a7da1e6d3eaf | 1,626 | py | Python | redis/concurrency/threadpool_imgur.py | fengpf/py | 21f18573d97036d2b3796a16436de1895064def0 | [
"MIT"
] | 148 | 2015-03-20T08:50:52.000Z | 2022-02-20T21:18:53.000Z | threadpool_imgur.py | volker48/python-concurrency | 184db1527bbf48c1483cb0657f4696dc953867cb | [
"MIT"
] | 9 | 2015-10-16T09:01:36.000Z | 2022-03-11T23:20:57.000Z | threadpool_imgur.py | gwsu2008/python-concurrency | 252ddb1d7196b8a386dc3dc3564d8da8f30eff28 | [
"MIT"
] | 73 | 2015-03-20T09:31:22.000Z | 2022-01-17T13:10:05.000Z | ####
# This sample is published as part of the blog article at www.toptal.com/blog
# Visit www.toptal.com/blog and subscribe to our newsletter to read great posts
####
import logging
import os
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from time import time
from download import setup_download_dir, get_links, download_link
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
if __name__ == '__main__':
main()
| 36.954545 | 122 | 0.730627 | ####
# This sample is published as part of the blog article at www.toptal.com/blog
# Visit www.toptal.com/blog and subscribe to our newsletter to read great posts
####
import logging
import os
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from time import time
from download import setup_download_dir, get_links, download_link
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def main():
ts = time()
client_id = os.getenv('IMGUR_CLIENT_ID')
if not client_id:
raise Exception("Couldn't find IMGUR_CLIENT_ID environment variable!")
download_dir = setup_download_dir()
links = get_links(client_id)
# By placing the executor inside a with block, the executors shutdown method will be called cleaning up threas
# By default, the executor sets number of workers to 5 times the number of CPUs.
with ThreadPoolExecutor() as executor:
# Create a new partially applied function that stores the directory argument.
# This allows the download_link function that normally takes two arguments to work
# with the map function that expects a function of a single argument
fn = partial(download_link, download_dir)
# Executes fn concurrently using threads on the links iterable. The timeout is for the entire process not a single
# call so downloading all images must complete within 30 seconds.
executor.map(fn, links, timeout=30)
logging.info('Took %s', time() - ts)
if __name__ == '__main__':
main()
| 1,057 | 0 | 23 |
4a6c182e6f0778cf4a38745e6fe085b2dadf7e3f | 1,228 | py | Python | userena/middleware.py | mortenwh/django-userena | 6919ffa7764c6a4a493b0be4e624855c22398bfb | [
"BSD-3-Clause"
] | 501 | 2015-01-05T19:45:27.000Z | 2022-03-16T02:56:24.000Z | userena/middleware.py | mortenwh/django-userena | 6919ffa7764c6a4a493b0be4e624855c22398bfb | [
"BSD-3-Clause"
] | 119 | 2015-01-09T08:43:39.000Z | 2018-11-13T16:59:38.000Z | userena/middleware.py | mortenwh/django-userena | 6919ffa7764c6a4a493b0be4e624855c22398bfb | [
"BSD-3-Clause"
] | 202 | 2015-01-06T11:54:56.000Z | 2021-09-03T03:31:33.000Z | from django.utils import translation
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from userena import settings as userena_settings
from userena.compat import SiteProfileNotAvailable
from userena.utils import get_user_profile
class UserenaLocaleMiddleware(object):
"""
Set the language by looking at the language setting in the profile.
It doesn't override the cookie that is set by Django so a user can still
switch languages depending if the cookie is set.
"""
| 37.212121 | 88 | 0.658795 | from django.utils import translation
from django.core.exceptions import ObjectDoesNotExist
from django.conf import settings
from userena import settings as userena_settings
from userena.compat import SiteProfileNotAvailable
from userena.utils import get_user_profile
class UserenaLocaleMiddleware(object):
"""
Set the language by looking at the language setting in the profile.
It doesn't override the cookie that is set by Django so a user can still
switch languages depending if the cookie is set.
"""
def process_request(self, request):
lang_cookie = request.session.get(settings.LANGUAGE_COOKIE_NAME)
if not lang_cookie:
if request.user.is_authenticated():
try:
profile = get_user_profile(user=request.user)
except (ObjectDoesNotExist, SiteProfileNotAvailable):
profile = False
if profile:
try:
lang = getattr(profile, userena_settings.USERENA_LANGUAGE_FIELD)
translation.activate(lang)
request.LANGUAGE_CODE = translation.get_language()
except AttributeError: pass
| 673 | 0 | 26 |
d8454e302db0e3b89273f2a1027420e601f162e4 | 278 | py | Python | 1_beginner/chapter7/solutions/first_three_words.py | code4tomorrow/Python | 035b6f5d8fd635a16caaff78bcd3f582663dadc3 | [
"MIT"
] | 4 | 2021-03-01T00:32:45.000Z | 2021-05-21T22:01:52.000Z | 1_beginner/chapter7/solutions/first_three_words.py | code4tomorrow/Python | 035b6f5d8fd635a16caaff78bcd3f582663dadc3 | [
"MIT"
] | 29 | 2020-09-12T22:56:04.000Z | 2021-09-25T17:08:42.000Z | 1_beginner/chapter7/solutions/first_three_words.py | code4tomorrow/Python | 035b6f5d8fd635a16caaff78bcd3f582663dadc3 | [
"MIT"
] | 7 | 2021-02-25T01:50:55.000Z | 2022-02-28T00:00:42.000Z | """
First Three Words
Write a program which asks
the user to enter a sentence.
Print the first three words in the sentence.
(Assume the user enters at least 3 words.)
"""
sentence = input("Enter a sentence: ")
words = sentence.split()
for word in words[:3]:
print(word)
| 17.375 | 44 | 0.708633 | """
First Three Words
Write a program which asks
the user to enter a sentence.
Print the first three words in the sentence.
(Assume the user enters at least 3 words.)
"""
sentence = input("Enter a sentence: ")
words = sentence.split()
for word in words[:3]:
print(word)
| 0 | 0 | 0 |
b6b6f58927be4d96e344bc459e393288445d1051 | 404 | py | Python | Employment_System/apps/users/migrations/0002_user_imgurl.py | rui1106/Graduation_Project | 77457588f82cfa8c35b74fc60ec3c1ffd5271600 | [
"CC0-1.0"
] | null | null | null | Employment_System/apps/users/migrations/0002_user_imgurl.py | rui1106/Graduation_Project | 77457588f82cfa8c35b74fc60ec3c1ffd5271600 | [
"CC0-1.0"
] | null | null | null | Employment_System/apps/users/migrations/0002_user_imgurl.py | rui1106/Graduation_Project | 77457588f82cfa8c35b74fc60ec3c1ffd5271600 | [
"CC0-1.0"
] | null | null | null | # Generated by Django 2.2.5 on 2022-03-02 12:32
from django.db import migrations, models
| 21.263158 | 85 | 0.591584 | # Generated by Django 2.2.5 on 2022-03-02 12:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='ImgUrl',
field=models.CharField(default='', max_length=500, verbose_name='图片url'),
),
]
| 0 | 294 | 23 |
81d2cae57ab8e0815f198becb724e531a46d96fe | 3,183 | py | Python | gtkssw.py | dmitriiweb/Stopwatch | f8b1921da6b6a823e6d874cc5fe3fe5c366d2f06 | [
"MIT"
] | 1 | 2020-11-09T10:44:16.000Z | 2020-11-09T10:44:16.000Z | gtkssw.py | dmitriiweb/Stopwatch | f8b1921da6b6a823e6d874cc5fe3fe5c366d2f06 | [
"MIT"
] | null | null | null | gtkssw.py | dmitriiweb/Stopwatch | f8b1921da6b6a823e6d874cc5fe3fe5c366d2f06 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import datetime
import os
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, GObject
BASEDIR = os.path.dirname(os.path.abspath(__file__))
if __name__ == '__main__':
win = StopWatch()
icon_path = os.path.join(BASEDIR, 'stopwatch.png')
win.set_icon_from_file(icon_path)
win.connect('destroy', Gtk.main_quit)
win.show_all()
Gtk.main()
| 32.151515 | 98 | 0.650016 | #!/usr/bin/env python3
import datetime
import os
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, GObject
BASEDIR = os.path.dirname(os.path.abspath(__file__))
class StopWatch(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title='Stopwatch')
self.create_variables()
self.set_default_size(400, 100)
self.set_resizable(False)
self.set_time_label()
self.set_title('Stopwatch {}'.format(self.time_label_val))
self.create_widgets()
self.create_layouts()
def create_variables(self):
self.is_running = False
self.time_in_seconds = 0
self.time_label_val = None
def set_time_label(self):
self.time_label_val = str(datetime.timedelta(seconds=self.time_in_seconds))
def create_widgets(self):
self.time_label = Gtk.Label()
self.time_label.set_markup('<span font="48"><b>{}</b></span>'.format(self.time_label_val))
self.image_start = Gtk.Image().new_from_icon_name('media-playback-start', 1)
self.image_pause = Gtk.Image().new_from_icon_name('media-playback-pause', 1)
self.start_pause_btn = Gtk.Button(image=self.image_start)
self.image_update = Gtk.Image().new_from_icon_name('system-software-update', 1)
self.update_btn = Gtk.Button(image=self.image_update)
self.start_pause_btn.connect('clicked', self.start_pause)
self.update_btn.connect('clicked', self.reset_label)
def start_pause(self, button):
if not self.is_running:
self.is_running = True
self.start_pause_btn.set_image(self.image_pause)
GObject.timeout_add(1000, self.update_label)
else:
self.is_running = False
self.start_pause_btn.set_image(self.image_start)
def update_label(self):
if self.is_running:
self.main_def(1)
return True
def reset_label(self, button):
if not self.is_running:
self.main_def(0)
def main_def(self, counter):
if counter == 0:
self.time_in_seconds = 0
else:
self.time_in_seconds += counter
self.set_time_label()
self.time_label.set_markup('<span font="48"><b>{}</b></span>'.format(self.time_label_val))
self.set_title('Stopwatch {}'.format(self.time_label_val))
def create_layouts(self):
self.main_box = Gtk.Box(spacing=6,
orientation=Gtk.Orientation.VERTICAL)
self.label_box = Gtk.Box()
self.btn_box = Gtk.Box(spacing=6)
self.label_box.pack_start(self.time_label, True, True, 0)
self.btn_box.pack_start(self.start_pause_btn, True, True, 0)
self.btn_box.pack_start(self.update_btn, True, True, 0)
self.main_box.pack_start(self.label_box, True, True, 0)
self.main_box.pack_start(self.btn_box, True, True, 0)
self.add(self.main_box)
if __name__ == '__main__':
win = StopWatch()
icon_path = os.path.join(BASEDIR, 'stopwatch.png')
win.set_icon_from_file(icon_path)
win.connect('destroy', Gtk.main_quit)
win.show_all()
Gtk.main()
| 2,503 | 7 | 265 |
d325bb8b5bd131bccf2d5094bfd89865d38cb9f5 | 417 | py | Python | blog/migrations/0004_blogpost_image.py | skynette/CSMD | 8b3f90adad2d18569ef44b235a1213c547f94f22 | [
"CC-BY-3.0"
] | null | null | null | blog/migrations/0004_blogpost_image.py | skynette/CSMD | 8b3f90adad2d18569ef44b235a1213c547f94f22 | [
"CC-BY-3.0"
] | null | null | null | blog/migrations/0004_blogpost_image.py | skynette/CSMD | 8b3f90adad2d18569ef44b235a1213c547f94f22 | [
"CC-BY-3.0"
] | null | null | null | # Generated by Django 3.0.6 on 2021-01-10 17:55
from django.db import migrations, models
| 21.947368 | 89 | 0.592326 | # Generated by Django 3.0.6 on 2021-01-10 17:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0003_blogpost_views'),
]
operations = [
migrations.AddField(
model_name='blogpost',
name='image',
field=models.ImageField(blank=True, null=True, upload_to='photos/%Y/%m/%d/'),
),
]
| 0 | 303 | 23 |
6771a7c9409ba16d21ba2ac2dc43bc32f72bf439 | 2,565 | py | Python | apps/student_mgmt/app.py | PrasadHonrao/python-samples | faa48aa3eaf2d67b8cef0114e1f6ef08e2c1300a | [
"MIT"
] | 3 | 2018-08-20T13:00:01.000Z | 2021-09-18T04:19:46.000Z | apps/student_mgmt/app.py | PrasadHonrao/python-samples | faa48aa3eaf2d67b8cef0114e1f6ef08e2c1300a | [
"MIT"
] | 1 | 2021-06-25T20:25:02.000Z | 2021-08-19T22:44:31.000Z | apps/student_mgmt/app.py | PrasadHonrao/python-samples | faa48aa3eaf2d67b8cef0114e1f6ef08e2c1300a | [
"MIT"
] | 1 | 2021-09-18T23:51:20.000Z | 2021-09-18T23:51:20.000Z | """
Student management module
"""
students = []
def get_student_title_case() -> str:
"""
Function to return student name in title case
:return: student name
"""
students_title_case = []
for student in students:
students_title_case.append(student["name"].title())
return students_title_case
def print_students_title_case() -> None:
"""
Function to print student name using title case
"""
students_title_case = get_student_title_case()
print(students_title_case)
def add_student(name, stud_id=999):
"""
Function to add a student in the list
:param name: student name
:param stud_id: student id
"""
student = {"name": name, "id": stud_id}
students.append(student)
print("Student count is {0}".format(len(students)))
def save_file(student):
"""
Function to save student information to the file
:param student: student info
"""
try:
student_file = open("students.txt", "a")
student_file.write(student + "\n")
student_file.close()
except IOError:
print("Could not save")
def read_file():
"""
Function to read student information file
"""
try:
student_file = open("students.txt", "r")
for student in student_file.readlines():
add_student(student)
student_file.close()
except IOError:
print("Could not read")
# ADD NEW STUDENT BLOCK
student_list = get_student_title_case()
add_student("Prasad", "101")
# ADD NEW STUDENT VIA USER INPUT AND DISPLAY THE LIST
student_name = input("Enter student name : ")
student_id = input("Enter student id : ")
add_student(student_name, student_id)
# PRINT STUDENT DETAILS
print_students_title_case()
# USE BELOW CODE BLOCK IF YOU WANT TO ADD NEW STUDENT IN A LOOP
ADD_NEW_STUDENT_FLAG: str = ""
MESSAGE = "Do you want to add new student record?? Press [Y] / [y] to continue."
ADD_NEW_STUDENT_FLAG = input(MESSAGE)
while ADD_NEW_STUDENT_FLAG in ("Y", "y"):
student_name = input("enter student name : ")
student_id = input("enter student id : ")
add_student(student_name, student_id)
ADD_NEW_STUDENT_FLAG = input(MESSAGE)
print_students_title_case()
# READ FROM File
read_file()
print_students_title_case()
# WRITE TO FILE
print("writing to file...")
student_name = input("enter student name : ")
student_id = input("enter student id : ")
add_student(student_name, student_id)
save_file(student_name)
| 26.173469 | 81 | 0.65575 | """
Student management module
"""
students = []
def get_student_title_case() -> str:
"""
Function to return student name in title case
:return: student name
"""
students_title_case = []
for student in students:
students_title_case.append(student["name"].title())
return students_title_case
def print_students_title_case() -> None:
"""
Function to print student name using title case
"""
students_title_case = get_student_title_case()
print(students_title_case)
def add_student(name, stud_id=999):
"""
Function to add a student in the list
:param name: student name
:param stud_id: student id
"""
student = {"name": name, "id": stud_id}
students.append(student)
print("Student count is {0}".format(len(students)))
def save_file(student):
"""
Function to save student information to the file
:param student: student info
"""
try:
student_file = open("students.txt", "a")
student_file.write(student + "\n")
student_file.close()
except IOError:
print("Could not save")
def read_file():
"""
Function to read student information file
"""
try:
student_file = open("students.txt", "r")
for student in student_file.readlines():
add_student(student)
student_file.close()
except IOError:
print("Could not read")
# ADD NEW STUDENT BLOCK
student_list = get_student_title_case()
add_student("Prasad", "101")
# ADD NEW STUDENT VIA USER INPUT AND DISPLAY THE LIST
student_name = input("Enter student name : ")
student_id = input("Enter student id : ")
add_student(student_name, student_id)
# PRINT STUDENT DETAILS
print_students_title_case()
# USE BELOW CODE BLOCK IF YOU WANT TO ADD NEW STUDENT IN A LOOP
ADD_NEW_STUDENT_FLAG: str = ""
MESSAGE = "Do you want to add new student record?? Press [Y] / [y] to continue."
ADD_NEW_STUDENT_FLAG = input(MESSAGE)
while ADD_NEW_STUDENT_FLAG in ("Y", "y"):
student_name = input("enter student name : ")
student_id = input("enter student id : ")
add_student(student_name, student_id)
ADD_NEW_STUDENT_FLAG = input(MESSAGE)
print_students_title_case()
# READ FROM File
read_file()
print_students_title_case()
# WRITE TO FILE
print("writing to file...")
student_name = input("enter student name : ")
student_id = input("enter student id : ")
add_student(student_name, student_id)
save_file(student_name)
| 0 | 0 | 0 |
f0ad7e1d335e7dbc804e10acdfb62405c8e28311 | 1,096 | py | Python | rllib/utils/spaces/repeated.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 22 | 2018-05-08T05:52:34.000Z | 2020-04-01T10:09:55.000Z | rllib/utils/spaces/repeated.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 73 | 2021-09-25T07:11:39.000Z | 2022-03-26T07:10:59.000Z | rllib/utils/spaces/repeated.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 10 | 2018-04-27T10:50:59.000Z | 2020-02-24T02:41:43.000Z | import gym
import numpy as np
from ray.rllib.utils.annotations import PublicAPI
@PublicAPI
class Repeated(gym.Space):
"""Represents a variable-length list of child spaces.
Example:
self.observation_space = spaces.Repeated(spaces.Box(4,), max_len=10)
--> from 0 to 10 boxes of shape (4,)
See also: documentation for rllib.models.RepeatedValues, which shows how
the lists are represented as batched input for ModelV2 classes.
"""
| 28.102564 | 76 | 0.623175 | import gym
import numpy as np
from ray.rllib.utils.annotations import PublicAPI
@PublicAPI
class Repeated(gym.Space):
"""Represents a variable-length list of child spaces.
Example:
self.observation_space = spaces.Repeated(spaces.Box(4,), max_len=10)
--> from 0 to 10 boxes of shape (4,)
See also: documentation for rllib.models.RepeatedValues, which shows how
the lists are represented as batched input for ModelV2 classes.
"""
def __init__(self, child_space: gym.Space, max_len: int):
super().__init__()
self.child_space = child_space
self.max_len = max_len
def sample(self):
return [
self.child_space.sample()
for _ in range(self.np_random.randint(1, self.max_len + 1))
]
def contains(self, x):
return (
isinstance(x, (list, np.ndarray))
and len(x) <= self.max_len
and all(self.child_space.contains(c) for c in x)
)
def __repr__(self):
return "Repeated({}, {})".format(self.child_space, self.max_len)
| 511 | 0 | 108 |
f188b0989d76a3e9d827b499042fb58f173865f0 | 1,334 | py | Python | scripts/common_features.py | kdelwat/LangEvolve | 6e400f9e3d7ff7bc0dce0d1db0af3682b2ad0e01 | [
"MIT"
] | 29 | 2016-12-18T08:44:26.000Z | 2022-03-20T09:39:22.000Z | scripts/common_features.py | kdelwat/LangEvolve | 6e400f9e3d7ff7bc0dce0d1db0af3682b2ad0e01 | [
"MIT"
] | 11 | 2016-11-22T01:13:11.000Z | 2022-03-04T21:21:15.000Z | scripts/common_features.py | kdelwat/LangEvolve | 6e400f9e3d7ff7bc0dce0d1db0af3682b2ad0e01 | [
"MIT"
] | 5 | 2017-02-07T08:54:49.000Z | 2022-01-13T15:23:45.000Z | # common_features.py
# Invoke on the command line like: python common_features.py pbtd
# Outputs all features common to all of the given segments, to help
# in rule writing.
from tabulate import tabulate
import csv
import sys
import os.path as path
base_directory = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(base_directory)
def load_segments(filename):
'''Load a segment feature matrix from a CSV file, returning a list of
dictionaries with information about each segment.
'''
with open(filename, 'r') as f:
return [segment for segment in csv.DictReader(f)]
if __name__ == '__main__':
main(sys.argv[1])
| 31.023256 | 79 | 0.66042 | # common_features.py
# Invoke on the command line like: python common_features.py pbtd
# Outputs all features common to all of the given segments, to help
# in rule writing.
from tabulate import tabulate
import csv
import sys
import os.path as path
base_directory = path.dirname(path.dirname(path.abspath(__file__)))
sys.path.append(base_directory)
def load_segments(filename):
'''Load a segment feature matrix from a CSV file, returning a list of
dictionaries with information about each segment.
'''
with open(filename, 'r') as f:
return [segment for segment in csv.DictReader(f)]
def main(segment_string):
all_segments = load_segments(path.join(base_directory, 'engine', 'data',
'features.csv'))
target_segments = [segment for segment in all_segments if segment['IPA'] in
segment_string]
common_features = []
for feature, value in target_segments[0].items():
if feature != 'IPA' and value != '0':
if all(segment[feature] == value for segment in target_segments):
common_features.append([feature, value])
print('Common features')
print('===============')
print(tabulate(common_features, headers=['Feature', 'Value']))
if __name__ == '__main__':
main(sys.argv[1])
| 647 | 0 | 23 |
eb7f5a8a820668422a863c8028972cebdb13707d | 8,150 | py | Python | simplemud.py | alexandershuping/MuddySwamp | c9fea7f9b5b0c372afdffdbc24f29eb90fd5881d | [
"MIT"
] | null | null | null | simplemud.py | alexandershuping/MuddySwamp | c9fea7f9b5b0c372afdffdbc24f29eb90fd5881d | [
"MIT"
] | null | null | null | simplemud.py | alexandershuping/MuddySwamp | c9fea7f9b5b0c372afdffdbc24f29eb90fd5881d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import time
import sys
# import the MUD server class
from mudserver import MudServer, Event, EventType
#prints to stderr
VERBOSE_PRINT = False
# structure defining the rooms in the game. Try adding more rooms to the game!
rooms = {
"Tavern": {
"description": "You're in a cozy tavern warmed by an open fire.",
"exits": {"outside": "Outside"},
},
"Outside": {
"description": "You're standing outside a tavern. It's raining.",
"exits": {"inside": "Tavern"},
}
}
# stores the players in the game
players = {}
# start the server
mud = MudServer()
# main game loop. We loop forever (i.e. until the program is terminated)
while True:
# pause for 1/5 of a second on each loop, so that we don't constantly
# use 100% CPU time
time.sleep(0.2)
# 'update' must be called in the loop to keep the game running and give
# us up-to-date information
mud.update()
# handle events on the server_queue
while (len(mud.server_queue) > 0):
event = mud.server_queue.popleft()
err_print(event)
id = event.id
if event.type is EventType.PLAYER_JOIN:
# add the new player to the dictionary, noting that they've not been
# named yet.
# The dictionary key is the player's id number. We set their room to
# None initially until they have entered a name
err_print("Player %s joined." % event.id)
players[id] = {
"name": None,
"room": None,
}
#prompt the user for their name
mud.send_message(id, "What is your name?")
elif event.type is EventType.MESSAGE_RECEIVED:
# splitting into command + params to make porting the code easier
command, params = (event.message.split(" ", 1) + ["", ""])[:2]
err_print(event.message)
# all these elifs will be replaced with "character.parse([input])"
if players[id]["name"] is None:
players[id]["name"] = event.message.split(" ")[0]
players[id]["room"] = "Tavern"
for pid, pl in players.items():
# send each player a message to tell them about the new player
mud.send_message(pid, "%s entered the game" %
players[id]["name"])
mud.send_message(id, "Welcome to the game, %s. " %
players[id]["name"]
+ "Type 'help' for a list of commands. Have fun!")
# 'help' command
elif command == "help":
# send the player back the list of possible commands
mud.send_message(id, "Commands:")
mud.send_message(id, " say <message> - Says something out loud, "
+ "e.g. 'say Hello'")
mud.send_message(id, " look - Examines the "
+ "surroundings, e.g. 'look'")
mud.send_message(id, " go <exit> - Moves through the exit "
+ "specified, e.g. 'go outside'")
# 'say' command
elif command == "say":
# go through every player in the game
for pid, pl in players.items():
# if they're in the same room as the player
if players[pid]["room"] == players[id]["room"]:
# send them a message telling them what the player said
mud.send_message(pid, "{} says: {}".format(
players[id]["name"], params))
# 'look' command
elif command == "look":
# store the player's current room
rm = rooms[players[id]["room"]]
# send the player back the description of their current room
mud.send_message(id, rm["description"])
playershere = []
# go through every player in the game
for pid, pl in players.items():
# if they're in the same room as the player
if players[pid]["room"] == players[id]["room"]:
# ... and they have a name to be shown
if players[pid]["name"] is not None:
# add their name to the list
playershere.append(players[pid]["name"])
# send player a message containing the list of players in the room
mud.send_message(id, "Players here: {}".format(
", ".join(playershere)))
# send player a message containing the list of exits from this room
mud.send_message(id, "Exits are: {}".format(
", ".join(rm["exits"])))
# 'go' command
elif command == "go":
# store the exit name
ex = params.lower()
# store the player's current room
rm = rooms[players[id]["room"]]
# if the specified exit is found in the room's exits list
if ex in rm["exits"]:
# go through all the players in the game
for pid, pl in players.items():
# if player is in the same room and isn't the player
# sending the command
if players[pid]["room"] == players[id]["room"] \
and pid != id:
# send them a message telling them that the player
# left the room
mud.send_message(pid, "{} left via exit '{}'".format(
players[id]["name"], ex))
# update the player's current room to the one the exit leads to
players[id]["room"] = rm["exits"][ex]
rm = rooms[players[id]["room"]]
# go through all the players in the game
for pid, pl in players.items():
# if player is in the same (new) room and isn't the player
# sending the command
if players[pid]["room"] == players[id]["room"] \
and pid != id:
# send them a message telling them that the player
# entered the room
mud.send_message(pid,
"{} arrived via exit '{}'".format(
players[id]["name"], ex))
# send the player a message telling them where they are now
mud.send_message(id, "You arrive at '{}'".format(
players[id]["room"]))
# the specified exit wasn't found in the current room
else:
# send back an 'unknown exit' message
mud.send_message(id, "Unknown exit '{}'".format(ex))
# some other, unrecognised command
else:
# send back an 'unknown command' message
mud.send_message(id, "Unknown command '{}'".format(command))
elif event.type is EventType.PLAYER_DISCONNECT:
err_print("Player %s left" % event.id)
#if the player has been added to the list, they must be removed
if event.id in players:
for pid in players:
mud.send_message(pid, "%s quit the game" % players[event.id]["name"])
del(players[id]) | 43.351064 | 89 | 0.480613 | #!/usr/bin/env python
import time
import sys
# import the MUD server class
from mudserver import MudServer, Event, EventType
#prints to stderr
def err_print(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
VERBOSE_PRINT = False
def v_print(*args, **kwargs):
if VERBOSE_PRINT:
err_print(*args, **kwargs)
# structure defining the rooms in the game. Try adding more rooms to the game!
rooms = {
"Tavern": {
"description": "You're in a cozy tavern warmed by an open fire.",
"exits": {"outside": "Outside"},
},
"Outside": {
"description": "You're standing outside a tavern. It's raining.",
"exits": {"inside": "Tavern"},
}
}
# stores the players in the game
players = {}
# start the server
mud = MudServer()
# main game loop. We loop forever (i.e. until the program is terminated)
while True:
# pause for 1/5 of a second on each loop, so that we don't constantly
# use 100% CPU time
time.sleep(0.2)
# 'update' must be called in the loop to keep the game running and give
# us up-to-date information
mud.update()
# handle events on the server_queue
while (len(mud.server_queue) > 0):
event = mud.server_queue.popleft()
err_print(event)
id = event.id
if event.type is EventType.PLAYER_JOIN:
# add the new player to the dictionary, noting that they've not been
# named yet.
# The dictionary key is the player's id number. We set their room to
# None initially until they have entered a name
err_print("Player %s joined." % event.id)
players[id] = {
"name": None,
"room": None,
}
#prompt the user for their name
mud.send_message(id, "What is your name?")
elif event.type is EventType.MESSAGE_RECEIVED:
# splitting into command + params to make porting the code easier
command, params = (event.message.split(" ", 1) + ["", ""])[:2]
err_print(event.message)
# all these elifs will be replaced with "character.parse([input])"
if players[id]["name"] is None:
players[id]["name"] = event.message.split(" ")[0]
players[id]["room"] = "Tavern"
for pid, pl in players.items():
# send each player a message to tell them about the new player
mud.send_message(pid, "%s entered the game" %
players[id]["name"])
mud.send_message(id, "Welcome to the game, %s. " %
players[id]["name"]
+ "Type 'help' for a list of commands. Have fun!")
# 'help' command
elif command == "help":
# send the player back the list of possible commands
mud.send_message(id, "Commands:")
mud.send_message(id, " say <message> - Says something out loud, "
+ "e.g. 'say Hello'")
mud.send_message(id, " look - Examines the "
+ "surroundings, e.g. 'look'")
mud.send_message(id, " go <exit> - Moves through the exit "
+ "specified, e.g. 'go outside'")
# 'say' command
elif command == "say":
# go through every player in the game
for pid, pl in players.items():
# if they're in the same room as the player
if players[pid]["room"] == players[id]["room"]:
# send them a message telling them what the player said
mud.send_message(pid, "{} says: {}".format(
players[id]["name"], params))
# 'look' command
elif command == "look":
# store the player's current room
rm = rooms[players[id]["room"]]
# send the player back the description of their current room
mud.send_message(id, rm["description"])
playershere = []
# go through every player in the game
for pid, pl in players.items():
# if they're in the same room as the player
if players[pid]["room"] == players[id]["room"]:
# ... and they have a name to be shown
if players[pid]["name"] is not None:
# add their name to the list
playershere.append(players[pid]["name"])
# send player a message containing the list of players in the room
mud.send_message(id, "Players here: {}".format(
", ".join(playershere)))
# send player a message containing the list of exits from this room
mud.send_message(id, "Exits are: {}".format(
", ".join(rm["exits"])))
# 'go' command
elif command == "go":
# store the exit name
ex = params.lower()
# store the player's current room
rm = rooms[players[id]["room"]]
# if the specified exit is found in the room's exits list
if ex in rm["exits"]:
# go through all the players in the game
for pid, pl in players.items():
# if player is in the same room and isn't the player
# sending the command
if players[pid]["room"] == players[id]["room"] \
and pid != id:
# send them a message telling them that the player
# left the room
mud.send_message(pid, "{} left via exit '{}'".format(
players[id]["name"], ex))
# update the player's current room to the one the exit leads to
players[id]["room"] = rm["exits"][ex]
rm = rooms[players[id]["room"]]
# go through all the players in the game
for pid, pl in players.items():
# if player is in the same (new) room and isn't the player
# sending the command
if players[pid]["room"] == players[id]["room"] \
and pid != id:
# send them a message telling them that the player
# entered the room
mud.send_message(pid,
"{} arrived via exit '{}'".format(
players[id]["name"], ex))
# send the player a message telling them where they are now
mud.send_message(id, "You arrive at '{}'".format(
players[id]["room"]))
# the specified exit wasn't found in the current room
else:
# send back an 'unknown exit' message
mud.send_message(id, "Unknown exit '{}'".format(ex))
# some other, unrecognised command
else:
# send back an 'unknown command' message
mud.send_message(id, "Unknown command '{}'".format(command))
elif event.type is EventType.PLAYER_DISCONNECT:
err_print("Player %s left" % event.id)
#if the player has been added to the list, they must be removed
if event.id in players:
for pid in players:
mud.send_message(pid, "%s quit the game" % players[event.id]["name"])
del(players[id]) | 107 | 0 | 44 |
67e72487e0c252d181d62a1ff9eaf9df986e0154 | 322 | py | Python | shopit/forms/flag.py | dinoperovic/djangoshop-shopit | b42a2bf0ec319817eb37ef939608b04498fc4ff2 | [
"BSD-3-Clause"
] | 14 | 2016-11-25T16:06:20.000Z | 2018-08-30T19:20:41.000Z | shopit/forms/flag.py | dinoperovic/djangoshop-shopit | b42a2bf0ec319817eb37ef939608b04498fc4ff2 | [
"BSD-3-Clause"
] | 3 | 2017-04-14T13:18:22.000Z | 2018-07-18T11:34:53.000Z | shopit/forms/flag.py | dinoperovic/django-shop | b42a2bf0ec319817eb37ef939608b04498fc4ff2 | [
"BSD-3-Clause"
] | 6 | 2019-04-07T23:52:54.000Z | 2020-09-20T05:30:07.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from mptt.forms import MPTTAdminForm
from parler.forms import TranslatableModelForm
from shopit.models.flag import Flag
| 23 | 58 | 0.751553 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from mptt.forms import MPTTAdminForm
from parler.forms import TranslatableModelForm
from shopit.models.flag import Flag
class FlagModelForm(MPTTAdminForm, TranslatableModelForm):
class Meta:
model = Flag
exclude = []
| 0 | 95 | 23 |
4a45ce3726c6ff7902572a389f2c5f6afdff316e | 4,066 | py | Python | Nesting software/sheet.py | prasadbhatane/Nesting_Software_and_Automated_Marker | 4d75bb799c17376845d2a5a60c046d9ef5c27011 | [
"Apache-2.0"
] | 1 | 2020-12-12T01:06:15.000Z | 2020-12-12T01:06:15.000Z | Nesting software/sheet.py | prasadbhatane/Nesting_Software_and_Automated_Marker | 4d75bb799c17376845d2a5a60c046d9ef5c27011 | [
"Apache-2.0"
] | 1 | 2020-12-14T00:36:12.000Z | 2021-01-17T05:35:39.000Z | Nesting software/sheet.py | prasadbhatane/Nesting_Software_and_Automated_Marker | 4d75bb799c17376845d2a5a60c046d9ef5c27011 | [
"Apache-2.0"
] | 2 | 2020-10-27T08:19:50.000Z | 2021-08-30T03:31:34.000Z | from point import Point
from rectangle import Rectangle
from utils import areOverlapping
| 36.630631 | 116 | 0.511559 | from point import Point
from rectangle import Rectangle
from utils import areOverlapping
class Sheet:
def __init__(self, length, breadth):
self.length = length
self.breadth = breadth
self.area = length * breadth
self.cornerPoints = set() # contains tuples (x, y)
self.cornerPoints.add((0, 0))
self.rectangleSet = set()
self.x_set = set()
self.y_set = set()
def getInfo(self):
print("sheet dimensions are :", self.length, self.breadth)
print("corner points : ", self.cornerPoints)
print("rectangles_tl_br : ", self.rectangleSet)
def isSheetEmpty(self):
return len(self.cornerPoints) == 1
def addRectangle(self, rectangle):
# get greedy position for given rectangle in sheet
xl, yl, reverse = self.getGreedyPosition(rectangle)
rl = rectangle.length
rb = rectangle.breadth
if reverse:
rl, rb = rb, rl
l1 = Point(xl, yl)
r1 = Point(xl + rl, yl + rb)
# add the rectangle with coordinates in rectangleSet
self.rectangleSet.add((l1, r1))
# add all 4 corner points of rectangle in cornerPoints
self.cornerPoints.add((xl, yl))
self.cornerPoints.add((xl, yl + rb))
self.cornerPoints.add((xl + rl, yl))
self.cornerPoints.add((xl + rl, yl + rb))
# add x_set points
self.x_set.add(xl)
self.x_set.add(xl + rl)
# add y_set points
self.y_set.add(yl)
self.y_set.add(yl + rb)
def getGreedyPosition(self, rectangle):
if self.isSheetEmpty():
return 0, 0, False
else:
xyInscribedArea = []
# without reversing rectangle
for p in self.cornerPoints:
tl = Point(p[0], p[1])
br = Point(p[0] + rectangle.length, p[1] + rectangle.breadth)
overlapFlag = False
# checking overlap with every rectangle ...
for old_tl_br in self.rectangleSet:
old_tl = old_tl_br[0]
old_br = old_tl_br[1]
if areOverlapping(old_tl, old_br, tl, br):
overlapFlag = True
break
if not overlapFlag:
if ((p[0] + rectangle.length) <= self.length) and ((p[1] + rectangle.breadth) <= self.breadth):
m_x = max(p[0] + rectangle.length, max(self.x_set))
m_y = max(p[1] + rectangle.breadth, max(self.y_set))
m_area_inscribed = m_x * m_y
xyInscribedArea.append((p[0], p[1], m_area_inscribed, False))
else:
pass
# after reversing rectangle
for p in self.cornerPoints:
tl = Point(p[0], p[1])
br = Point(p[0] + rectangle.breadth, p[1] + rectangle.length)
overlapFlag = False
# checking overlap with every rectangle ...
for old_tl_br in self.rectangleSet:
old_tl = old_tl_br[0]
old_br = old_tl_br[1]
if areOverlapping(old_tl, old_br, tl, br):
overlapFlag = True
break
if not overlapFlag:
if ((p[0] + rectangle.breadth) <= self.length) and ((p[1] + rectangle.length) <= self.breadth):
m_x = max(p[0] + rectangle.breadth, max(self.x_set))
m_y = max(p[1] + rectangle.length, max(self.y_set))
m_area_inscribed = m_x * m_y
xyInscribedArea.append((p[0], p[1], m_area_inscribed, True))
xyInscribedArea = sorted(xyInscribedArea, key=lambda x: x[2])
return xyInscribedArea[0][0], xyInscribedArea[0][1], xyInscribedArea[0][3]
| 3,813 | -9 | 168 |
e42b7212e9b3fc61daf5b1358fef5f37df1dac80 | 1,623 | py | Python | re_findall.py | akiselev1/hackerrank-solutions | 53c2a76c71c9b3553c077ccfde5178b27594ae72 | [
"MIT"
] | null | null | null | re_findall.py | akiselev1/hackerrank-solutions | 53c2a76c71c9b3553c077ccfde5178b27594ae72 | [
"MIT"
] | null | null | null | re_findall.py | akiselev1/hackerrank-solutions | 53c2a76c71c9b3553c077ccfde5178b27594ae72 | [
"MIT"
] | null | null | null | """
Created by akiselev on 2019-06-13
re.findall()
The expression re.findall() returns all the non-overlapping matches of patterns in a string as a list of strings.
Code
>>> import re
>>> re.findall(r'\w','http://www.hackerrank.com/')
['h', 't', 't', 'p', 'w', 'w', 'w', 'h', 'a', 'c', 'k', 'e', 'r', 'r', 'a', 'n', 'k', 'c', 'o', 'm']
re.finditer()
The expression re.finditer() returns an iterator yielding MatchObject instances over all non-overlapping matches for the re pattern in the string.
Code
>>> import re
>>> re.finditer(r'\w','http://www.hackerrank.com/')
<callable-iterator object at 0x0266C790>
>>> map(lambda x: x.group(),re.finditer(r'\w','http://www.hackerrank.com/'))
['h', 't', 't', 'p', 'w', 'w', 'w', 'h', 'a', 'c', 'k', 'e', 'r', 'r', 'a', 'n', 'k', 'c', 'o', 'm']
Task
You are given a string
. It consists of alphanumeric characters, spaces and symbols(+,-).
Your task is to find all the substrings of that contains or more vowels.
Also, these substrings must lie in between
consonants and should contain vowels only.
Note :
Vowels are defined as: AEIOU and aeiou.
Consonants are defined as: QWRTYPSDFGHJKLZXCVBNM and qwrtypsdfghjklzxcvbnm.
Input Format
A single line of input containing string
.
Constraints
Output Format
Print the matched substrings in their order of occurrence on separate lines.
If no match is found, print -1.
Sample Input
rabcdeefgyYhFjkIoomnpOeorteeeeet
Sample Output
ee
Ioo
Oeo
eeeee
"""
import re
x = re.compile(r'[qwrtypsdfghjklzxcvbnm]([aeiou]{2,})(?=[qwrtypsdfghjklzxcvbnm])', re.I)
m = re.findall(x, input().strip())
print('\n'.join(m or ['-1'])) | 24.969231 | 146 | 0.676525 | """
Created by akiselev on 2019-06-13
re.findall()
The expression re.findall() returns all the non-overlapping matches of patterns in a string as a list of strings.
Code
>>> import re
>>> re.findall(r'\w','http://www.hackerrank.com/')
['h', 't', 't', 'p', 'w', 'w', 'w', 'h', 'a', 'c', 'k', 'e', 'r', 'r', 'a', 'n', 'k', 'c', 'o', 'm']
re.finditer()
The expression re.finditer() returns an iterator yielding MatchObject instances over all non-overlapping matches for the re pattern in the string.
Code
>>> import re
>>> re.finditer(r'\w','http://www.hackerrank.com/')
<callable-iterator object at 0x0266C790>
>>> map(lambda x: x.group(),re.finditer(r'\w','http://www.hackerrank.com/'))
['h', 't', 't', 'p', 'w', 'w', 'w', 'h', 'a', 'c', 'k', 'e', 'r', 'r', 'a', 'n', 'k', 'c', 'o', 'm']
Task
You are given a string
. It consists of alphanumeric characters, spaces and symbols(+,-).
Your task is to find all the substrings of that contains or more vowels.
Also, these substrings must lie in between
consonants and should contain vowels only.
Note :
Vowels are defined as: AEIOU and aeiou.
Consonants are defined as: QWRTYPSDFGHJKLZXCVBNM and qwrtypsdfghjklzxcvbnm.
Input Format
A single line of input containing string
.
Constraints
Output Format
Print the matched substrings in their order of occurrence on separate lines.
If no match is found, print -1.
Sample Input
rabcdeefgyYhFjkIoomnpOeorteeeeet
Sample Output
ee
Ioo
Oeo
eeeee
"""
import re
x = re.compile(r'[qwrtypsdfghjklzxcvbnm]([aeiou]{2,})(?=[qwrtypsdfghjklzxcvbnm])', re.I)
m = re.findall(x, input().strip())
print('\n'.join(m or ['-1'])) | 0 | 0 | 0 |
ddc3ae83023e4553b939bfb770711ae4531a84d8 | 3,120 | py | Python | launcher.py | nomanbaig98/syntax-analyzer-python | 426d4890603c6075d126217718ed413a065aad42 | [
"MIT"
] | null | null | null | launcher.py | nomanbaig98/syntax-analyzer-python | 426d4890603c6075d126217718ed413a065aad42 | [
"MIT"
] | null | null | null | launcher.py | nomanbaig98/syntax-analyzer-python | 426d4890603c6075d126217718ed413a065aad42 | [
"MIT"
] | null | null | null | from antlr4 import FileStream, CommonTokenStream
from src.Python3Lexer import Python3Lexer
from src.Python3Parser import Python3Parser
from antlr4.tree.Tree import TerminalNodeImpl
from antlr4.error.ErrorListener import ErrorListener
import json
class FileErrorListener(ErrorListener):
"""Class for storing errors which occured during the syntax analysis"""
def walk(subtree, rule_names):
""" Function for converting tree to dictionary
Function takes subtree and array of names and recursively
goes through each node and returns dictionary
(possibly array of dictionaries back)
Args:
@subtree - root of subtree to be walked through
@rule_names - corresponding to states rule names
Returnes: dict representation of the tree
"""
if isinstance(subtree, TerminalNodeImpl):
token = subtree.getSymbol()
token_name = Python3Parser.symbolicNames[token.type]
return {'Type': token_name, 'Value': token.text}
else:
child_nodes = []
name = rule_names[subtree.getRuleIndex()]
for i in range(subtree.getChildCount()):
child_nodes.append(walk(subtree.getChild(i), rule_names))
if len(child_nodes) == 1:
return {name: child_nodes[0]}
else:
return {name: child_nodes}
def lex(i_stream):
"""Makes lexical analysis
Returns: stream of tokens
"""
lexer = Python3Lexer(i_stream)
t_stream = CommonTokenStream(lexer)
t_stream.fill()
return t_stream
def parse(t_stream):
"""Handles parsing
Params:
t_stream: stream of tokens to parse
Returns:
resulting tree
error handler (with possible errors stored inside)
"""
py_parser = Python3Parser(t_stream)
py_parser.removeErrorListeners()
error_listener = FileErrorListener()
py_parser.addErrorListener(error_listener)
built_tree = py_parser.file_input()
return built_tree, error_listener
def tree_to_json(built_tree, error_listener):
"""Converts tree to json
Params:
built_tree - tree to be converted
error_listener - error hadling object
Returns:
json, if tree was constructed without errors
array of errors, otherwise
"""
if len(error_listener.errors) > 0:
return '\n'.join(["Syntax errors were found"] + error_listener.errors)
else:
result = walk(built_tree, Python3Parser.ruleNames)
return json.dumps(result, indent=2, ensure_ascii=False)
if __name__ == '__main__':
from tests.run_tests import run_tests
run_tests()
launch()
| 27.610619 | 79 | 0.674038 | from antlr4 import FileStream, CommonTokenStream
from src.Python3Lexer import Python3Lexer
from src.Python3Parser import Python3Parser
from antlr4.tree.Tree import TerminalNodeImpl
from antlr4.error.ErrorListener import ErrorListener
import json
class FileErrorListener(ErrorListener):
"""Class for storing errors which occured during the syntax analysis"""
def __init__(self):
self.errors = []
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
self.errors.append("line " + str(line) + ":" + str(column) + " " + msg)
def walk(subtree, rule_names):
""" Function for converting tree to dictionary
Function takes subtree and array of names and recursively
goes through each node and returns dictionary
(possibly array of dictionaries back)
Args:
@subtree - root of subtree to be walked through
@rule_names - corresponding to states rule names
Returnes: dict representation of the tree
"""
if isinstance(subtree, TerminalNodeImpl):
token = subtree.getSymbol()
token_name = Python3Parser.symbolicNames[token.type]
return {'Type': token_name, 'Value': token.text}
else:
child_nodes = []
name = rule_names[subtree.getRuleIndex()]
for i in range(subtree.getChildCount()):
child_nodes.append(walk(subtree.getChild(i), rule_names))
if len(child_nodes) == 1:
return {name: child_nodes[0]}
else:
return {name: child_nodes}
def lex(i_stream):
"""Makes lexical analysis
Returns: stream of tokens
"""
lexer = Python3Lexer(i_stream)
t_stream = CommonTokenStream(lexer)
t_stream.fill()
return t_stream
def parse(t_stream):
"""Handles parsing
Params:
t_stream: stream of tokens to parse
Returns:
resulting tree
error handler (with possible errors stored inside)
"""
py_parser = Python3Parser(t_stream)
py_parser.removeErrorListeners()
error_listener = FileErrorListener()
py_parser.addErrorListener(error_listener)
built_tree = py_parser.file_input()
return built_tree, error_listener
def tree_to_json(built_tree, error_listener):
"""Converts tree to json
Params:
built_tree - tree to be converted
error_listener - error hadling object
Returns:
json, if tree was constructed without errors
array of errors, otherwise
"""
if len(error_listener.errors) > 0:
return '\n'.join(["Syntax errors were found"] + error_listener.errors)
else:
result = walk(built_tree, Python3Parser.ruleNames)
return json.dumps(result, indent=2, ensure_ascii=False)
def launch():
i_stream = FileStream('in.txt', encoding='utf-8')
t_stream = lex(i_stream)
built_tree, error_listener = parse(t_stream)
result = tree_to_json(built_tree, error_listener)
with open('out.txt', 'w') as f_out:
f_out.write(result)
if __name__ == '__main__':
from tests.run_tests import run_tests
run_tests()
launch()
| 402 | 0 | 77 |
5319ef793827510e97e9ddc010233d47d84e71ec | 334 | py | Python | tridentstream/dbs/memory/handler.py | tridentstream/mediaserver | 5d47d766df2e8dca076e41348062567a569019fd | [
"MIT"
] | 6 | 2020-01-03T14:50:09.000Z | 2021-09-13T01:44:31.000Z | tridentstream/dbs/memory/handler.py | tidalstream/mediaserver | 5d47d766df2e8dca076e41348062567a569019fd | [
"MIT"
] | null | null | null | tridentstream/dbs/memory/handler.py | tidalstream/mediaserver | 5d47d766df2e8dca076e41348062567a569019fd | [
"MIT"
] | null | null | null | from unplugged import Schema
from ...plugins import DatabasePlugin
| 15.181818 | 49 | 0.637725 | from unplugged import Schema
from ...plugins import DatabasePlugin
class MemoryDatabasePlugin(dict, DatabasePlugin):
plugin_name = "memory"
config_schema = Schema
def __init__(self, config):
pass
def unload(self):
self.clear()
def sync(self):
pass
def close(self):
pass
| 51 | 191 | 23 |
1c9de91a508d69159b4450a7cb1f1f58bb9f5d59 | 1,885 | py | Python | StateMainMenu.py | Exs1de/TicTacToe | 0119ab798b1c04cd1d003c9c95591415d1576156 | [
"MIT"
] | 1 | 2019-04-29T19:41:12.000Z | 2019-04-29T19:41:12.000Z | StateMainMenu.py | Exs1de/TicTacToe | 0119ab798b1c04cd1d003c9c95591415d1576156 | [
"MIT"
] | null | null | null | StateMainMenu.py | Exs1de/TicTacToe | 0119ab798b1c04cd1d003c9c95591415d1576156 | [
"MIT"
] | null | null | null | import GUI
from GUI import root, tk, MyButton
import ButtonClickHandler as BH
from ButtonClickHandler import MainMenuButtons as M
| 38.469388 | 83 | 0.46313 | import GUI
from GUI import root, tk, MyButton
import ButtonClickHandler as BH
from ButtonClickHandler import MainMenuButtons as M
class StateMainMenu(object):
def __init__(self):
GUI.root.title("TIC TAC TOE")
self.GAME_WIDTH = GUI.GAME_WIDTH
self.GAME_HEIGHT = GUI.GAME_HEIGHT
self.BTN_WIDTH = 0.45 * self.GAME_WIDTH
self.BTN_HEIGHT = 0.14 * self.GAME_HEIGHT
print('STATE_MAIN_MENU')
self.container = tk.Label(GUI.SCREEN)
self.container.place(x=self.GAME_WIDTH / 2 - self.BTN_WIDTH / 2,
y=self.GAME_HEIGHT / 2 - 0.54 * self.GAME_HEIGHT / 2,
width=self.BTN_WIDTH,
height=0.54 * self.GAME_HEIGHT
)
self.btn_play = MyButton(self.container,
height=self.BTN_HEIGHT,
text='START',
command=lambda: M.btn_play_click()
)
self.btn_play.pack(fill=tk.X, pady=0.08 * self.GAME_HEIGHT)
# self.btn_settings = MyButton(self.container,
# height=self.BTN_HEIGHT,
# text='SETTINGS'
# )
# self.btn_settings.pack(fill=tk.X,
# pady=0.06 * self.GAME_HEIGHT
# )
self.btn_exit = MyButton(self.container,
height=self.BTN_HEIGHT,
text='EXIT',
command=lambda: root.destroy()
)
self.btn_exit.pack(fill=tk.X)
GUI.root.protocol('WM_DELETE_WINDOW', lambda: BH.on_closing())
| 1,690 | 7 | 52 |
ea5999e87cc8e8afb85868ea9a64bdbb2d0d5632 | 3,824 | py | Python | project/optimizer.py | AnyByte/ErgoKB | 4aa192b0e23872681fa3a3bab4706408aeac6619 | [
"MIT"
] | null | null | null | project/optimizer.py | AnyByte/ErgoKB | 4aa192b0e23872681fa3a3bab4706408aeac6619 | [
"MIT"
] | 3 | 2020-03-14T14:54:25.000Z | 2021-12-13T20:31:53.000Z | project/optimizer.py | AnyByte/ErgoKB | 4aa192b0e23872681fa3a3bab4706408aeac6619 | [
"MIT"
] | null | null | null | import random
from copy import deepcopy
| 37.490196 | 119 | 0.653766 | import random
from copy import deepcopy
class Optimizer:
def __init__(self):
self.results = []
self.best_index = 0
self.min_score = 0
self.last_score = 0
self.consequent_bad_score_count = 0
self.consequent_score_count = 0
self.last_min_score = 0
self.last_min_score_count = 0
self.last_min_score_idx = 0
self.overall_min_score = 0
self.overall_min_score_fail_count = 0
def default(self, iteration_index, sorted_variants):
self.results.append(sorted_variants)
best_variant = sorted_variants[0]
score = best_variant['avg'] + best_variant['delta']
test_layout = deepcopy(best_variant["layout"])
if iteration_index == 0:
self.min_score = score
if score < self.min_score:
self.best_index = iteration_index
self.min_score = score
self.consequent_bad_score_count = 0
else:
self.consequent_bad_score_count += 1
if self.consequent_bad_score_count >= 10:
rand_int = random.randint(0, len(self.results) - 1)
test_layout = deepcopy(random.choice(self.results[rand_int])["layout"])
if self.consequent_bad_score_count >= 100:
rand_int = random.randint(0, len(self.results) - 1)
test_layout = deepcopy(self.results[rand_int][0]["layout"])
if self.consequent_bad_score_count >= 300 and score <= self.min_score:
return best_variant, True
# Копируем последний результат
self.last_score = score
return test_layout, False
def old(self, iteration_index, sorted_variants):
self.results.append(sorted_variants)
best_variant = sorted_variants[0]
score = best_variant['avg'] + best_variant['delta']
test_layout = deepcopy(best_variant["layout"])
# Если результат равен предыдущему
if score == self.last_score:
self.consequent_score_count += 1
# Если последний результат меньше минимального, то обновляем минимальный результат и сохроняем его индекс
if score < self.min_score or self.min_score == 0:
self.min_score = score
self.last_min_score_idx = iteration_index
# Если последний результат чем минимальный, то инкрементим
if score > self.min_score:
self.last_min_score_count += 1
# Если последний минимальный результат был очень давно и мы движемся не туда
if self.last_min_score_count > 10:
test_layout = deepcopy(self.results[self.last_min_score_idx][-1]["layout"])
self.last_min_score = 0
# Если результат "застрял" и не менялся уже втечение 10 измерений, то кидаем предыдущий рандом, чтобы раскачать
if self.consequent_score_count > 10:
self.consequent_score_count = 0
rand_int = random.randint(0, len(self.results) - 1)
# test_layout = deepcopy(random.choice(results[rand_int])["layout"])
test_layout = deepcopy(self.results[rand_int][-1]["layout"])
# Если результат опустился ниже минимума за все время, то обновляем минимум
if score < self.overall_min_score or self.overall_min_score == 0:
self.overall_min_score = score
self.overall_min_score_fail_count = 0
# Если результат равен минимальному результату за все время, то инкрементим
if score == self.overall_min_score:
self.overall_min_score_fail_count += 1
# Результат не опускался ниже самого минимального полученного значения за N раз
if self.overall_min_score_fail_count > 100:
return best_variant, True
# Копируем последний результат
self.last_score = score
return test_layout, False
| 4,247 | -5 | 103 |
1bf19843567acc9a0e6fa51958307c151fa4cdf0 | 503 | py | Python | mediafeed/api/server.py | media-feed/mediafeed | c2fb37b20a5bc41a4299193fa9b11f8a3e3b2acf | [
"MIT"
] | null | null | null | mediafeed/api/server.py | media-feed/mediafeed | c2fb37b20a5bc41a4299193fa9b11f8a3e3b2acf | [
"MIT"
] | null | null | null | mediafeed/api/server.py | media-feed/mediafeed | c2fb37b20a5bc41a4299193fa9b11f8a3e3b2acf | [
"MIT"
] | null | null | null | import os
from bottle import Bottle, response
from whitenoise import WhiteNoise
from ..settings import DATA_PATH
api = Bottle()
@api.hook('after_request')
application = WhiteNoise(api)
application.add_files(os.path.join(DATA_PATH, 'thumbnails'), prefix='thumbnails/')
application.add_files(os.path.join(DATA_PATH, 'medias'), prefix='medias/')
| 23.952381 | 82 | 0.747515 | import os
from bottle import Bottle, response
from whitenoise import WhiteNoise
from ..settings import DATA_PATH
api = Bottle()
@api.hook('after_request')
def enable_cors():
response.headers['Access-Control-Allow-Origin'] = '*'
response.headers['Access-Control-Allow-Methods'] = 'GET, POST, DELETE'
application = WhiteNoise(api)
application.add_files(os.path.join(DATA_PATH, 'thumbnails'), prefix='thumbnails/')
application.add_files(os.path.join(DATA_PATH, 'medias'), prefix='medias/')
| 130 | 0 | 22 |
f0821f7691b0cefe6e266ce8feb7fa9ca9ca7209 | 300 | py | Python | katas/kyu_7/what_is_my_name_score_1.py | the-zebulan/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 40 | 2016-03-09T12:26:20.000Z | 2022-03-23T08:44:51.000Z | katas/kyu_7/what_is_my_name_score_1.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | null | null | null | katas/kyu_7/what_is_my_name_score_1.py | akalynych/CodeWars | 1eafd1247d60955a5dfb63e4882e8ce86019f43a | [
"MIT"
] | 36 | 2016-11-07T19:59:58.000Z | 2022-03-31T11:18:27.000Z | # for testing only, 'alpha' is included in the preloaded section on Codewars
alpha = {'ABCDE': 1, 'FGHIJ': 2, 'KLMNO': 3, 'PQRST': 4, 'UVWXY': 5}
| 37.5 | 76 | 0.643333 | # for testing only, 'alpha' is included in the preloaded section on Codewars
alpha = {'ABCDE': 1, 'FGHIJ': 2, 'KLMNO': 3, 'PQRST': 4, 'UVWXY': 5}
def name_score(name):
scores = {k: v for keys, v in alpha.iteritems() for k in keys}
return {name: sum(scores.get(a, 0) for a in name.upper())}
| 130 | 0 | 23 |
2875aa0e22df649277cae1742669f39f25d038f2 | 1,932 | py | Python | qdk/qdk/chemistry/broombridge.py | Anatoliy-Litvinenko/qdk-python | 74b2638a404717424090023ef49afb3045ea920e | [
"MIT"
] | 53 | 2021-01-21T23:38:09.000Z | 2022-03-29T16:34:42.000Z | qdk/qdk/chemistry/broombridge.py | Anatoliy-Litvinenko/qdk-python | 74b2638a404717424090023ef49afb3045ea920e | [
"MIT"
] | 152 | 2021-01-23T07:01:49.000Z | 2022-03-31T19:43:21.000Z | qdk/qdk/chemistry/broombridge.py | slowy07/qdk-python | e4ce0c433cc986bc1c746e9a58f3f05733c657e2 | [
"MIT"
] | 47 | 2021-01-30T20:15:46.000Z | 2022-03-25T23:35:28.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
# Module for loading and encoding Broombridge data
##
import logging
from qsharp.chemistry import load_broombridge, load_input_state, encode
from typing import List, Tuple
NumQubits = int
HamiltonianTermList = Tuple[List[Tuple[List[int], List[float]]]]
InputStateTerms = Tuple[int, List[Tuple[Tuple[float, float], List[int]]]]
EnergyOffset = float
JWEncodedData = Tuple[
NumQubits,
HamiltonianTermList,
InputStateTerms,
EnergyOffset
]
_log = logging.getLogger(__name__)
def load_and_encode(
file_name: str,
problem_description_index: int = 0,
initial_state_label: str = None
) -> JWEncodedData:
"""Wrapper function for loading and encoding Broombridge file into
JWEncodedData-compatible format.
:param file_name: Broombridge file name
:type file_name: str
:param problem_description_index: Index of problem description to use,
defaults to 0
:type problem_description_index: int, optional
:param initial_state_label: Label of initial state to use, defaults to
first available label
:type initial_state_label: str, optional
"""
broombridge_data = load_broombridge(file_name)
problem = broombridge_data.problem_description[problem_description_index]
if initial_state_label is None:
# Pick first in list
initial_state_label = problem.initial_state_suggestions[0].get("Label")
_log.info(f"Using initial state label: {initial_state_label}")
input_state = load_input_state(file_name, initial_state_label)
ferm_hamiltonian = problem.load_fermion_hamiltonian()
(
num_qubits,
hamiltonian_term_list,
input_state_terms,
energy_offset
) = encode(ferm_hamiltonian, input_state)
return (
num_qubits,
hamiltonian_term_list,
input_state_terms,
energy_offset
)
| 29.723077 | 79 | 0.728778 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
##
# Module for loading and encoding Broombridge data
##
import logging
from qsharp.chemistry import load_broombridge, load_input_state, encode
from typing import List, Tuple
NumQubits = int
HamiltonianTermList = Tuple[List[Tuple[List[int], List[float]]]]
InputStateTerms = Tuple[int, List[Tuple[Tuple[float, float], List[int]]]]
EnergyOffset = float
JWEncodedData = Tuple[
NumQubits,
HamiltonianTermList,
InputStateTerms,
EnergyOffset
]
_log = logging.getLogger(__name__)
def load_and_encode(
file_name: str,
problem_description_index: int = 0,
initial_state_label: str = None
) -> JWEncodedData:
"""Wrapper function for loading and encoding Broombridge file into
JWEncodedData-compatible format.
:param file_name: Broombridge file name
:type file_name: str
:param problem_description_index: Index of problem description to use,
defaults to 0
:type problem_description_index: int, optional
:param initial_state_label: Label of initial state to use, defaults to
first available label
:type initial_state_label: str, optional
"""
broombridge_data = load_broombridge(file_name)
problem = broombridge_data.problem_description[problem_description_index]
if initial_state_label is None:
# Pick first in list
initial_state_label = problem.initial_state_suggestions[0].get("Label")
_log.info(f"Using initial state label: {initial_state_label}")
input_state = load_input_state(file_name, initial_state_label)
ferm_hamiltonian = problem.load_fermion_hamiltonian()
(
num_qubits,
hamiltonian_term_list,
input_state_terms,
energy_offset
) = encode(ferm_hamiltonian, input_state)
return (
num_qubits,
hamiltonian_term_list,
input_state_terms,
energy_offset
)
| 0 | 0 | 0 |
262be4baffa1f0ba78fdcc51038b21e05e64bc18 | 193 | py | Python | TSIS5/10.py | ayazhan112/python- | fba09ecc25e11dbfb116f273838b13174f66126d | [
"MIT"
] | null | null | null | TSIS5/10.py | ayazhan112/python- | fba09ecc25e11dbfb116f273838b13174f66126d | [
"MIT"
] | null | null | null | TSIS5/10.py | ayazhan112/python- | fba09ecc25e11dbfb116f273838b13174f66126d | [
"MIT"
] | null | null | null | from collections import Counter
print('Number of words:', word_count('test.txt'))
| 27.571429 | 50 | 0.668394 | from collections import Counter
def word_count(file_name):
with open(file_name, 'r') as f:
return Counter(f.read().split())
print('Number of words:', word_count('test.txt'))
| 84 | 0 | 23 |
510971622a2d34c62749680ee58612a8430d1019 | 7,136 | py | Python | torchreid/engine/image/triplet.py | Vill-Lab/IGOAS | 42ca1d45e441f993c95b5e8f33c9f97ea3b916f3 | [
"MIT"
] | 8 | 2021-05-27T10:19:28.000Z | 2021-10-15T12:38:04.000Z | torchreid/engine/image/triplet.py | Vill-Lab/IGOAS | 42ca1d45e441f993c95b5e8f33c9f97ea3b916f3 | [
"MIT"
] | 3 | 2021-06-23T12:06:39.000Z | 2021-09-12T08:40:44.000Z | torchreid/engine/image/triplet.py | Vill-Lab/IGOAS | 42ca1d45e441f993c95b5e8f33c9f97ea3b916f3 | [
"MIT"
] | 6 | 2021-05-27T10:19:18.000Z | 2021-11-13T12:02:17.000Z | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import datetime
import torch
import torchreid
from torchreid.engine import engine
from torchreid.losses import CrossEntropyLoss, TripletLoss, HctLoss
from torchreid.utils import AverageMeter, open_specified_layers, open_all_layers
from torchreid import metrics
class ImageTripletEngine(engine.Engine):
r"""Triplet-loss engine for image-reid.
Args:
datamanager (DataManager): an instance of ``torchreid.data.ImageDataManager``
or ``torchreid.data.VideoDataManager``.
model (nn.Module): model instance.
optimizer (Optimizer): an Optimizer.
margin (float, optional): margin for triplet loss. Default is 0.3.
weight_t (float, optional): weight for triplet loss. Default is 1.
weight_x (float, optional): weight for softmax loss. Default is 1.
scheduler (LRScheduler, optional): if None, no learning rate decay will be performed.
use_gpu (bool, optional): use gpu. Default is True.
label_smooth (bool, optional): use label smoothing regularizer. Default is True.
Examples::
import torch
import torchreid
datamanager = torchreid.data.ImageDataManager(
root='path/to/reid-data',
sources='market1501',
height=256,
width=128,
combineall=False,
batch_size=32,
num_instances=4,
train_sampler='RandomIdentitySampler' # this is important
)
model = torchreid.models.build_model(
name='resnet50',
num_classes=datamanager.num_train_pids,
loss='triplet'
)
model = model.cuda()
optimizer = torchreid.optim.build_optimizer(
model, optim='adam', lr=0.0003
)
scheduler = torchreid.optim.build_lr_scheduler(
optimizer,
lr_scheduler='single_step',
stepsize=20
)
engine = torchreid.engine.ImageTripletEngine(
datamanager, model, optimizer, margin=0.3,
weight_t=0.7, weight_x=1, scheduler=scheduler
)
engine.run(
max_epoch=60,
save_dir='log/resnet50-triplet-market1501',
print_freq=10
)
"""
| 38.160428 | 120 | 0.582399 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import time
import datetime
import torch
import torchreid
from torchreid.engine import engine
from torchreid.losses import CrossEntropyLoss, TripletLoss, HctLoss
from torchreid.utils import AverageMeter, open_specified_layers, open_all_layers
from torchreid import metrics
class ImageTripletEngine(engine.Engine):
r"""Triplet-loss engine for image-reid.
Args:
datamanager (DataManager): an instance of ``torchreid.data.ImageDataManager``
or ``torchreid.data.VideoDataManager``.
model (nn.Module): model instance.
optimizer (Optimizer): an Optimizer.
margin (float, optional): margin for triplet loss. Default is 0.3.
weight_t (float, optional): weight for triplet loss. Default is 1.
weight_x (float, optional): weight for softmax loss. Default is 1.
scheduler (LRScheduler, optional): if None, no learning rate decay will be performed.
use_gpu (bool, optional): use gpu. Default is True.
label_smooth (bool, optional): use label smoothing regularizer. Default is True.
Examples::
import torch
import torchreid
datamanager = torchreid.data.ImageDataManager(
root='path/to/reid-data',
sources='market1501',
height=256,
width=128,
combineall=False,
batch_size=32,
num_instances=4,
train_sampler='RandomIdentitySampler' # this is important
)
model = torchreid.models.build_model(
name='resnet50',
num_classes=datamanager.num_train_pids,
loss='triplet'
)
model = model.cuda()
optimizer = torchreid.optim.build_optimizer(
model, optim='adam', lr=0.0003
)
scheduler = torchreid.optim.build_lr_scheduler(
optimizer,
lr_scheduler='single_step',
stepsize=20
)
engine = torchreid.engine.ImageTripletEngine(
datamanager, model, optimizer, margin=0.3,
weight_t=0.7, weight_x=1, scheduler=scheduler
)
engine.run(
max_epoch=60,
save_dir='log/resnet50-triplet-market1501',
print_freq=10
)
"""
def __init__(self, datamanager, model, optimizer, margin=0.3,
weight_t=0.0001, weight_x=1.0, scheduler=None, use_gpu=True,
label_smooth=True):
super(ImageTripletEngine, self).__init__(datamanager, model, optimizer, scheduler, use_gpu)
self.weight_t = weight_t
self.weight_x = weight_x
# self.criterion_m = torch.nn.MSELoss()
self.criterion_t = TripletLoss(margin=margin)
self.criterion = CrossEntropyLoss(
num_classes=self.datamanager.num_train_pids,
use_gpu=self.use_gpu,
label_smooth=label_smooth
)
def train(self, epoch, max_epoch, trainloader, fixbase_epoch=0, open_layers=None, print_freq=10):
losses = AverageMeter()
losses1 = AverageMeter()
losses2 = AverageMeter()
losses3 = AverageMeter()
accs1 = AverageMeter()
accs2 = AverageMeter()
accs3 = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
self.model.train()
if (epoch + 1) <= fixbase_epoch and open_layers is not None:
print('* Only train {} (epoch: {}/{})'.format(open_layers, epoch + 1, fixbase_epoch))
open_specified_layers(self.model, open_layers)
else:
open_all_layers(self.model)
num_batches = len(trainloader)
end = time.time()
for batch_idx, data in enumerate(trainloader):
data_time.update(time.time() - end)
imgs, pids = self._parse_data_for_train(data)
if self.use_gpu:
imgs = imgs.cuda()
pids = pids.cuda()
self.optimizer.zero_grad()
output1, output2, output3, fea1, fea2, fea3 = self.model(imgs)
loss_x1 = self._compute_loss(self.criterion, output1, pids)
loss_x2 = self._compute_loss(self.criterion, output2, pids)
loss_x3 = self._compute_loss(self.criterion, output3, pids)
loss_t1 = self._compute_loss(self.criterion_t, fea1, pids)
loss_t2 = self._compute_loss(self.criterion_t, fea2, pids)
loss_t3 = self._compute_loss(self.criterion_t, fea3, pids)
loss1 = loss_x1 + loss_t1
loss2 = loss_x2 + loss_t2
loss3 = loss_x3 + loss_t3
loss = 1.0 * loss1 + 1.0 * loss2 + 1.0 * loss3
# loss_m1 = self._compute_loss(self.criterion_m, fea1[0], fea2[0])
# loss_m2 = self._compute_loss(self.criterion_m, fea1[1], fea2[1])
# loss_m3 = self._compute_loss(self.criterion_m, fea1[2], fea2[2])
# loss_m4 = self._compute_loss(self.criterion_m, fea1[3], fea2[3])
# loss_m = (loss_m1 + loss_m2 + loss_m3 + loss_m4) / 4
# loss = loss_x + loss_t + loss_m
loss.backward()
self.optimizer.step()
batch_time.update(time.time() - end)
losses.update(loss.item(), pids.size(0))
losses1.update(loss1.item(), pids.size(0))
losses2.update(loss2.item(), pids.size(0))
losses3.update(loss3.item(), pids.size(0))
if (batch_idx + 1) % print_freq == 0:
# estimate remaining time
eta_seconds = batch_time.avg * (num_batches - (batch_idx + 1) + (max_epoch - (epoch + 1)) * num_batches)
eta_str = str(datetime.timedelta(seconds=int(eta_seconds)))
print('Epoch: [{0}/{1}][{2}/{3}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Loss1 {loss1.val:.4f} ({loss1.avg:.4f})\t'
'Loss2 {loss2.val:.4f} ({loss2.avg:.4f})\t'
'Loss3 {loss3.val:.4f} ({loss3.avg:.4f})\t'
'Lr {lr:.6f}\t'
'eta {eta}'.format(
epoch + 1, max_epoch, batch_idx + 1, num_batches,
loss=losses,
loss1=losses1,
loss2=losses2,
loss3=losses3,
lr=self.optimizer.param_groups[0]['lr'],
eta=eta_str
)
)
if self.writer is not None:
n_iter = epoch * num_batches + batch_idx
self.writer.add_scalar('Train/Loss', losses.avg, n_iter)
self.writer.add_scalar('Train/Loss1', losses1.avg, n_iter)
self.writer.add_scalar('Train/Loss2', losses2.avg, n_iter)
self.writer.add_scalar('Train/Loss3', losses3.avg, n_iter)
self.writer.add_scalar('Train/Lr', self.optimizer.param_groups[0]['lr'], n_iter)
end = time.time()
if self.scheduler is not None:
self.scheduler.step()
| 4,726 | 0 | 54 |
11bdfe506c8bdf705d13a3bfae5ffc73fd0be948 | 1,335 | py | Python | HSCTF2021/Crypto/opisthocomus-hoazin.py | yl-ang/CTF | a075231a3dc32630a26f3b2d4dfc1dd9b9f1e0b9 | [
"MIT"
] | null | null | null | HSCTF2021/Crypto/opisthocomus-hoazin.py | yl-ang/CTF | a075231a3dc32630a26f3b2d4dfc1dd9b9f1e0b9 | [
"MIT"
] | null | null | null | HSCTF2021/Crypto/opisthocomus-hoazin.py | yl-ang/CTF | a075231a3dc32630a26f3b2d4dfc1dd9b9f1e0b9 | [
"MIT"
] | 3 | 2021-06-28T09:52:07.000Z | 2021-09-22T03:28:40.000Z | # crypto/opisthocomus-hoazin
e, n = (65537, 15888457769674642859708800597310299725338251830976423740469342107745469667544014118426981955901595652146093596535042454720088489883832573612094938281276141337632202496209218136026441342435018861975571842724577501821204305185018320446993699281538507826943542962060000957702417455609633977888711896513101590291125131953317446916178315755142103529251195112400643488422928729091341969985567240235775120515891920824933965514217511971572242643456664322913133669621953247121022723513660621629349743664178128863766441389213302642916070154272811871674136669061719947615578346412919910075334517952880722801011983182804339339643)
flag_enc = [65639, 65645, 65632, 65638, 65658, 65653, 65609, 65584, 65650, 65630, 65640, 65634, 65586, 65630, 65634, 65651, 65586, 65589, 65644, 65630, 65640, 65588, 65630, 65618, 65646, 65630, 65607, 65651, 65646, 65627, 65586, 65647, 65630, 65640, 65571, 65612, 65630, 65649, 65651, 65586, 65653, 65621, 65656, 65630, 65618, 65652, 65651, 65636, 65630, 65640, 65621, 65574, 65650, 65630, 65589, 65634, 65653, 65652, 65632, 65584, 65645, 65656, 65630, 65635, 65586, 65647, 65605, 65640, 65647, 65606, 65630, 65644, 65624, 65630, 65588, 65649, 65585, 65614, 65647, 65660]
enc_map = {x ^ e % n: chr(x) for x in range(30,255)}
print(''.join([enc_map[c] for c in flag_enc])) | 222.5 | 633 | 0.836704 | # crypto/opisthocomus-hoazin
e, n = (65537, 15888457769674642859708800597310299725338251830976423740469342107745469667544014118426981955901595652146093596535042454720088489883832573612094938281276141337632202496209218136026441342435018861975571842724577501821204305185018320446993699281538507826943542962060000957702417455609633977888711896513101590291125131953317446916178315755142103529251195112400643488422928729091341969985567240235775120515891920824933965514217511971572242643456664322913133669621953247121022723513660621629349743664178128863766441389213302642916070154272811871674136669061719947615578346412919910075334517952880722801011983182804339339643)
flag_enc = [65639, 65645, 65632, 65638, 65658, 65653, 65609, 65584, 65650, 65630, 65640, 65634, 65586, 65630, 65634, 65651, 65586, 65589, 65644, 65630, 65640, 65588, 65630, 65618, 65646, 65630, 65607, 65651, 65646, 65627, 65586, 65647, 65630, 65640, 65571, 65612, 65630, 65649, 65651, 65586, 65653, 65621, 65656, 65630, 65618, 65652, 65651, 65636, 65630, 65640, 65621, 65574, 65650, 65630, 65589, 65634, 65653, 65652, 65632, 65584, 65645, 65656, 65630, 65635, 65586, 65647, 65605, 65640, 65647, 65606, 65630, 65644, 65624, 65630, 65588, 65649, 65585, 65614, 65647, 65660]
enc_map = {x ^ e % n: chr(x) for x in range(30,255)}
print(''.join([enc_map[c] for c in flag_enc])) | 0 | 0 | 0 |
a9312b6a278eb78e59ba06e693a5e9f1f7d1cb2c | 284 | py | Python | problem/01000~09999/02511/2511.pypy3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-19T16:37:44.000Z | 2019-04-19T16:37:44.000Z | problem/01000~09999/02511/2511.pypy3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 1 | 2019-04-20T11:42:44.000Z | 2019-04-20T11:42:44.000Z | problem/01000~09999/02511/2511.pypy3.py | njw1204/BOJ-AC | 1de41685725ae4657a7ff94e413febd97a888567 | [
"MIT"
] | 3 | 2019-04-19T16:37:47.000Z | 2021-10-25T00:45:00.000Z | a,b,A,B,l=list(map(int, input().split())), list(map(int, input().split())),0,0,0
for i in range(10): (A,B,l) = (A+3,B,1) if a[i]>b[i] else (A,B+3,2) if a[i]<b[i] else (A+1,B+1,l)
print('{} {}\n{}'.format(A,B, 'A' if A>B or (A==B and l==1) else 'B' if A<B or (A==B and l==2) else 'D')) | 94.666667 | 105 | 0.521127 | a,b,A,B,l=list(map(int, input().split())), list(map(int, input().split())),0,0,0
for i in range(10): (A,B,l) = (A+3,B,1) if a[i]>b[i] else (A,B+3,2) if a[i]<b[i] else (A+1,B+1,l)
print('{} {}\n{}'.format(A,B, 'A' if A>B or (A==B and l==1) else 'B' if A<B or (A==B and l==2) else 'D')) | 0 | 0 | 0 |
70ac7ff92d4189250299cb7c47f28f8a6c285c49 | 1,452 | py | Python | kombu_aliyun_mqs/aliyun_mqs/mqs_exception.py | YuelianINC/kombu-aliyun-mqs | c385e256c9c020effde03f10bb73f323e4548973 | [
"Apache-2.0"
] | 1 | 2017-04-20T03:43:08.000Z | 2017-04-20T03:43:08.000Z | kombu_aliyun_mqs/aliyun_mqs/mqs_exception.py | YuelianINC/kombu-aliyun-mqs | c385e256c9c020effde03f10bb73f323e4548973 | [
"Apache-2.0"
] | null | null | null | kombu_aliyun_mqs/aliyun_mqs/mqs_exception.py | YuelianINC/kombu-aliyun-mqs | c385e256c9c020effde03f10bb73f323e4548973 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
| 30.893617 | 68 | 0.65427 | # -*- coding: utf-8 -*-
class MQSExceptionBase(Exception):
def __init__(self, type, message):
self.type = type
self.message = message
def get_info(self):
return "(\"%s\" \"%s\")\n" % (self.type, self.message)
def __str__(self):
return "MQSExceptionBase %s" % (self.get_info())
class MQSClientException(MQSExceptionBase):
def __init__(self, type, message):
MQSExceptionBase.__init__(self, type, message)
def __str__(self):
return "MQSClientException %s" % (self.get_info())
class MQSServerException(MQSExceptionBase):
def __init__(self, type, message, request_id, host_id):
MQSExceptionBase.__init__(self, type, message)
self.request_id = request_id
self.host_id = host_id
def __str__(self):
return "MQSServerException %s" % (self.get_info())
class MQSClientNetworkException(MQSClientException):
def __init__(self, type, message):
MQSClientException.__init__(self, type, message)
def get_info(self):
return "(\"%s\", \"%s\")\n" % (self.type, self.message)
def __str__(self):
return "MQSClientNetworkException %s" % (self.get_info())
class MQSClientParameterException(MQSClientException):
def __init__(self, type, message):
MQSClientException.__init__(self, type, message)
def __str__(self):
return "MQSClientParameterException %s" % (self.get_info())
| 856 | 121 | 442 |
3f4387a58d4f1914a658e49891ce4cb5c24e3ba2 | 7,610 | py | Python | apps/python/MiTuner_socket_server.py | UBTEDU/ZLK38AVS | 050ac1d98fbb65bd5bacd4e2024a65a4465a5731 | [
"MIT"
] | 1 | 2020-03-30T05:50:08.000Z | 2020-03-30T05:50:08.000Z | apps/python/MiTuner_socket_server.py | UBTEDU/ZLK38AVS | 050ac1d98fbb65bd5bacd4e2024a65a4465a5731 | [
"MIT"
] | null | null | null | apps/python/MiTuner_socket_server.py | UBTEDU/ZLK38AVS | 050ac1d98fbb65bd5bacd4e2024a65a4465a5731 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# MiTuner_socket_server.py -- Python 2.7 socket server to be used with MiTuner Bridge
# Copyright 2018 Microsemi Inc. All rights reserved.
#Licensed under the MIT License. See LICENSE.txt in the project root for license information.
from os.path import dirname, realpath, isfile
import argparse
import sys
import struct
import socket
sys.path.append(dirname(realpath(__file__)) + "/../../vproc_sdk/libs")
from hbi import *
from tw_firmware_converter import GetFirmwareBinFileB
from hbi_load_firmware import LoadFirmware, SaveFirmwareToFlash, InitFlash, EraseFlash, SaveConfigToFlash, IsFirmwareRunning, LoadFirmwareFromFlash
# Port for the socket (random)
PORT = 5678
BUFFER_SZ = 2048
HEADER_SZ = 6
# ****************************************************************************
# ****************************************************************************
# ****************************************************************************
# ****************************************************************************
# ****************************************************************************
# ****************************************************************************
# ****************************************************************************
# ****************************************************************************
# ****************************************************************************
# ****************************************************************************
# ****************************************************************************
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Raspberry Pi socket server for MiTuner V1.0.0")
parser.add_argument("-d", "--debug", help = "debug level 0: none, 1: in, 2: out, 3: in/out", type = int, default = 0)
# Parse the input arguments
args = parser.parse_args()
# Init the HBI driver
cfg = hbi_dev_cfg_t();
handle = HBI_open(cfg)
try:
# Create a socket and listen on port 'PORT'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', PORT))
s.listen(1)
# Accept connections from outside
print("Socket created on port %d, waiting for a connection" % PORT)
while True:
clientsocket, address = s.accept()
print("Incoming connection from: %s" % address[0])
message = ""
waitType = "header"
while True:
buff = clientsocket.recv(BUFFER_SZ).decode()
if (buff == ""):
print("Connection closed by the client (%s)" % address[0])
break
else:
message += buff
if ((waitType == "header") and (len(message) >= HEADER_SZ)):
header = message[0: HEADER_SZ]
message = message[HEADER_SZ:]
cmdLen = int(header[2: 6], 16)
waitType = "cmd"
if ((waitType == "cmd") and (len(message) >= cmdLen)):
cmd = message[0: cmdLen]
message = message[cmdLen:]
if (args.debug & 1):
print("header = %s, cmd = %s" % (header, cmd))
answer = ParseCmd(handle, header, cmd)
if (args.debug & 2):
print("\t" + answer)
clientsocket.send(answer.encode())
waitType = "header"
clientsocket.close()
except:
print("Server shut down")
# Close the Socket
s.close()
# Close HBI driver
HBI_close(handle)
| 31.446281 | 147 | 0.508936 | #!/usr/bin/env python
# MiTuner_socket_server.py -- Python 2.7 socket server to be used with MiTuner Bridge
# Copyright 2018 Microsemi Inc. All rights reserved.
#Licensed under the MIT License. See LICENSE.txt in the project root for license information.
from os.path import dirname, realpath, isfile
import argparse
import sys
import struct
import socket
sys.path.append(dirname(realpath(__file__)) + "/../../vproc_sdk/libs")
from hbi import *
from tw_firmware_converter import GetFirmwareBinFileB
from hbi_load_firmware import LoadFirmware, SaveFirmwareToFlash, InitFlash, EraseFlash, SaveConfigToFlash, IsFirmwareRunning, LoadFirmwareFromFlash
# Port for the socket (random)
PORT = 5678
BUFFER_SZ = 2048
HEADER_SZ = 6
# ****************************************************************************
def FormatNumber(res_list):
number = 0
for byteNum in res_list:
number = (number << 8) + byteNum
return number
# ****************************************************************************
def SpiBufferRead(handle, address, numBytes):
bufferString = ""
byteList = HBI_read(handle, address, numBytes)
for byteEl in byteList:
bufferString += "%02X" % byteEl
return bufferString
# ****************************************************************************
def SpiBufferWrite(handle, address, bufferString):
byteList = []
nbBytes = len(bufferString) // 2
for i in range(nbBytes):
byteList.append(int(bufferString[i * 2: i * 2 + 2], 16))
HBI_write(handle, address, byteList)
# ****************************************************************************
def SpiHwBufferRead(handle, address, numBytes):
bufferString = ""
# Setup the start address using the page 255 scheme
addressSeq = struct.unpack("4B", struct.pack(">I", address))
hbiOffset = addressSeq[3]
HBI_write(handle, 0x00C, addressSeq)
# Read the requested bytes
byteList = HBI_read(handle, 0xFF00 + hbiOffset, numBytes)
for byteEl in byteList:
bufferString += "%02X" % byteEl
return bufferString
# ****************************************************************************
def FirmwareLoading(handle, type, cmd):
if (type == "FA"):
# Start to receive a new file
FirmwareLoading.s3File = cmd
elif (type == "FB"):
# Continue to receive a new file
FirmwareLoading.s3File += cmd
else:
# FC, receive the last piece and load
FirmwareLoading.s3File += cmd
try:
# Convert the S3 in BIN (doesn't matter if not a 38040)
fwBin = GetFirmwareBinFileB(FirmwareLoading.s3File, 38040, 64)
# Load the FW
LoadFirmware(handle, fwBin)
except ValueError as err:
print(err)
return "ERROR"
return "OK"
# ****************************************************************************
def EraseSpiFlash(handle):
try:
EraseFlash(handle)
except ValueError as err:
print(err)
return "ERROR"
return "OK"
# ****************************************************************************
def SaveFirmware2Flash(handle):
try:
InitFlash(handle)
SaveFirmwareToFlash(handle)
except ValueError as err:
print(err)
return "ERROR"
return "OK"
# ****************************************************************************
def SaveConfig2Flash(handle, index):
try:
if not IsFirmwareRunning(handle):
InitFlash(handle)
SaveConfigToFlash(handle, index)
except ValueError as err:
print(err)
return "ERROR"
return "OK"
# ****************************************************************************
def LoadFwfromFlash(handle, index):
try:
InitFlash(handle)
LoadFirmwareFromFlash(handle, index)
except ValueError as err:
print(err)
return "ERROR"
return "OK"
# ****************************************************************************
def ParseCmd(handle, header, cmd):
if (header[0: 2] == "RD"):
# 16b read
retval = "%04X" % FormatNumber(HBI_read(handle, int(cmd[0: 3], 16), 2))
elif (header[0: 2] == "WR"):
# 16b write
HBI_write(handle, int(cmd[0: 3], 16), (int(cmd[3: 5], 16), int(cmd[5: 7], 16)))
retval = "OK"
elif (header[0: 2] == "BR"):
# Buffer read
retval = SpiBufferRead(handle, int(cmd[0: 3], 16), int(cmd[3: 7], 16) * 2)
elif (header[0: 2] == "BW"):
# Buffer write
retval = SpiBufferWrite(handle, int(cmd[0: 3], 16), cmd[3:])
retval = "OK"
elif (header[0: 2] == "HR"):
# Hardware buffer read (HBI 255 access)
retval = SpiHwBufferRead(handle, int(cmd[0: 8], 16), int(cmd[8: 10], 16))
elif (header[0: 2] == "FA") or (header[0: 2] == "FB") or (header[0: 2] == "FC"):
retval = FirmwareLoading(handle, header[0: 2], cmd)
elif (header[0: 2] == "ER"):
retval = EraseSpiFlash(handle)
elif (header[0: 2] == "SF"):
retval = SaveFirmware2Flash(handle)
elif (header[0: 2] == "SC"):
retval = SaveConfig2Flash(handle, int(cmd, 16))
elif (header[0: 2] == "LF"):
retval = LoadFwfromFlash(handle, int(cmd, 16))
else:
retval = "ERROR"
return "ANS" + ("%04X" % len(retval)) + retval
# ****************************************************************************
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "Raspberry Pi socket server for MiTuner V1.0.0")
parser.add_argument("-d", "--debug", help = "debug level 0: none, 1: in, 2: out, 3: in/out", type = int, default = 0)
# Parse the input arguments
args = parser.parse_args()
# Init the HBI driver
cfg = hbi_dev_cfg_t();
handle = HBI_open(cfg)
try:
# Create a socket and listen on port 'PORT'
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', PORT))
s.listen(1)
# Accept connections from outside
print("Socket created on port %d, waiting for a connection" % PORT)
while True:
clientsocket, address = s.accept()
print("Incoming connection from: %s" % address[0])
message = ""
waitType = "header"
while True:
buff = clientsocket.recv(BUFFER_SZ).decode()
if (buff == ""):
print("Connection closed by the client (%s)" % address[0])
break
else:
message += buff
if ((waitType == "header") and (len(message) >= HEADER_SZ)):
header = message[0: HEADER_SZ]
message = message[HEADER_SZ:]
cmdLen = int(header[2: 6], 16)
waitType = "cmd"
if ((waitType == "cmd") and (len(message) >= cmdLen)):
cmd = message[0: cmdLen]
message = message[cmdLen:]
if (args.debug & 1):
print("header = %s, cmd = %s" % (header, cmd))
answer = ParseCmd(handle, header, cmd)
if (args.debug & 2):
print("\t" + answer)
clientsocket.send(answer.encode())
waitType = "header"
clientsocket.close()
except:
print("Server shut down")
# Close the Socket
s.close()
# Close HBI driver
HBI_close(handle)
| 3,623 | 0 | 220 |
4c89e38833d63e400656e1c2d7eb7aca26967cc1 | 314 | py | Python | privacy_evaluator/datasets/tf/cifar10.py | chen-yuxuan/privacy-evaluator | ed4852408108c3e6a01216af4183261945fd7e67 | [
"MIT"
] | 7 | 2021-04-10T15:01:19.000Z | 2022-02-08T14:45:21.000Z | privacy_evaluator/datasets/tf/cifar10.py | chen-yuxuan/privacy-evaluator | ed4852408108c3e6a01216af4183261945fd7e67 | [
"MIT"
] | 175 | 2021-04-13T08:32:27.000Z | 2021-08-30T09:44:51.000Z | privacy_evaluator/datasets/tf/cifar10.py | chen-yuxuan/privacy-evaluator | ed4852408108c3e6a01216af4183261945fd7e67 | [
"MIT"
] | 21 | 2021-04-13T08:03:36.000Z | 2021-10-05T15:35:01.000Z | import tensorflow as tf
from .tf import TFDataset
class TFCIFAR10(TFDataset):
"""`TFCIFAR10` class.
Represents a CIFAR10 dataset class for TensorFlow.
"""
TF_MODULE = tf.keras.datasets.cifar10
DATASET_SIZE = {"train": 50000, "test": 10000}
INPUT_SHAPE = (32, 32, 3)
N_CLASSES = 10
| 19.625 | 54 | 0.66879 | import tensorflow as tf
from .tf import TFDataset
class TFCIFAR10(TFDataset):
"""`TFCIFAR10` class.
Represents a CIFAR10 dataset class for TensorFlow.
"""
TF_MODULE = tf.keras.datasets.cifar10
DATASET_SIZE = {"train": 50000, "test": 10000}
INPUT_SHAPE = (32, 32, 3)
N_CLASSES = 10
| 0 | 0 | 0 |
c660ad90eae4fb538c71d5a8dc812c1fba056f81 | 4,732 | py | Python | signing.py | tsunghowu/DiskImageCreator | b56d6cdf20fcedc70f64f1a89a73934460ac9973 | [
"MIT"
] | 1 | 2021-03-07T12:13:58.000Z | 2021-03-07T12:13:58.000Z | signing.py | tsunghowu/DiskImageCreator | b56d6cdf20fcedc70f64f1a89a73934460ac9973 | [
"MIT"
] | null | null | null | signing.py | tsunghowu/DiskImageCreator | b56d6cdf20fcedc70f64f1a89a73934460ac9973 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# File name: name.py
import sys
import json
import os
import rsa
import struct
import json
from block import *
from controlblock import *
base = [str(x) for x in range(10)] + [chr(x) for x in range(ord('A'), ord('A') + 6)]
if __name__ == '__main__':
print 'Signing Tool for the new Secure Boot validation. Version: 1.01'
print ' This tool is to generate valid Configuration/Regional Blocks base on given DISK raw image'
print ' Usage(windows platform): python27 signing.py config.json'
print ' See the details in .json files'
if len(sys.argv) != 2 :
sys.exit(-1)
SigningObjects = []
NewRegionBlock = [None,None,None,None]
ConfigData = {}
ConfigFile = sys.argv[1]
with open(ConfigFile) as inputFile:
ConfigData = json.load(inputFile)
inputFile.close()
pass
ConfigData['Jobs'].sort(object_compare)
print ConfigData['InputFile']
'''
Extract raw data from each section.
'''
with open( ConfigData['InputFile'] , 'rb') as diskFile:
TargetFileSize = os.path.getsize( ConfigData['InputFile'] )
fileContent = diskFile.read(TargetFileSize)
diskFile.close()
fileContent = bytearray(fileContent)
MBR = fileContent[0:512]
Partition1LBA, = struct.unpack("<I", fileContent[0x1C6:0x1C6+4] )
if Partition1LBA != 0:
if Partition1LBA > 0x800 :
print "Warning!!! The size of MBR+Booloader exceeds 2048 sectors."
MBR = fileContent[0:Partition1LBA*512]
MBR_obj = PartitionBlock(MBR, 0)
MBR_obj.SetRawData(MBR)
SigningObjects.append(MBR_obj)
for dataElement in ConfigData['Jobs']:
if dataElement['RegionID'] == 1: #MBR
NewRegionBlock[0] = RegionBlock(int(dataElement['RegionID']),
int(dataElement['HashingType']),
dataElement['PrivateKeyFile'])
NewRegionBlock[0].SigningRegionalData(MBR_obj.GetRawData())
with open(dataElement['OutputRawFile'], 'wb+') as OutputFile:
OutputFile.write(MBR_obj.GetRawData())
OutputFile.close()
for dataElement in ConfigData['Jobs']:
if dataElement['RegionID'] != 1:
PartIndex = dataElement['RegionID']-2
PartitionEntity = fileContent[0x1C6+0x10*PartIndex:0x1C6+0x10*PartIndex+8]
PartitionLBA, PartitionSize = struct.unpack("<II", PartitionEntity )
if PartitionLBA == 0 or PartitionSize == 0:
print "Error!!! The config does not match to the structure in MBR."
sys.exit(-1)
Part_Objs = PartitionBlock(PartitionEntity, 1)
RawData = fileContent[Part_Objs.GetLBAStarting()*512:
Part_Objs.GetLBAStarting()*512 + Part_Objs.GetSize()*512 ]
Part_Objs.SetRawData(RawData)
SigningObjects.append(Part_Objs)
i = dataElement['RegionID']-1
NewRegionBlock[i] = RegionBlock(int(dataElement['RegionID']),
int(dataElement['HashingType']),
dataElement['PrivateKeyFile'])
NewRegionBlock[i].SigningRegionalData(Part_Objs.GetRawData())
with open(dataElement['OutputRawFile'], 'wb+') as OutputFile:
OutputFile.write(Part_Objs.GetRawData())
OutputFile.close()
pass
with open(ConfigData['OutputConfigBlock'], 'wb+') as OutputFile:
OutputFile.write(fileContent)
CB = ControlBlock(int(ConfigData['Version']),
3,
ConfigData['PrivateKeyFile'],
int(ConfigData['HashingType']))
# version, NumberOfRegions, CtrlPrivateKey, HashType
for rb in NewRegionBlock:
CB.add_region_block(rb)
OutputFile.write(CB.GetRawData())
OutputFile.close()
with open(ConfigData['OutputRawPubkey'], 'wb+') as PubRawFile:
PubRawFile.write(CB.Get_Raw_Public_Key())
PubRawFile.close()
pass
else:
print 'I am being imported from another module.'
| 36.682171 | 105 | 0.560862 | #!/usr/bin/env python
# File name: name.py
import sys
import json
import os
import rsa
import struct
import json
from block import *
from controlblock import *
base = [str(x) for x in range(10)] + [chr(x) for x in range(ord('A'), ord('A') + 6)]
def object_compare(x, y): #used for compare key in dict.
if x['Seq'] > y['Seq']:
return 1
elif x['Seq'] == y['Seq']:
return 0
else: #x.resultType < y.resultType
return -1
if __name__ == '__main__':
print 'Signing Tool for the new Secure Boot validation. Version: 1.01'
print ' This tool is to generate valid Configuration/Regional Blocks base on given DISK raw image'
print ' Usage(windows platform): python27 signing.py config.json'
print ' See the details in .json files'
if len(sys.argv) != 2 :
sys.exit(-1)
SigningObjects = []
NewRegionBlock = [None,None,None,None]
ConfigData = {}
ConfigFile = sys.argv[1]
with open(ConfigFile) as inputFile:
ConfigData = json.load(inputFile)
inputFile.close()
pass
ConfigData['Jobs'].sort(object_compare)
print ConfigData['InputFile']
'''
Extract raw data from each section.
'''
with open( ConfigData['InputFile'] , 'rb') as diskFile:
TargetFileSize = os.path.getsize( ConfigData['InputFile'] )
fileContent = diskFile.read(TargetFileSize)
diskFile.close()
fileContent = bytearray(fileContent)
MBR = fileContent[0:512]
Partition1LBA, = struct.unpack("<I", fileContent[0x1C6:0x1C6+4] )
if Partition1LBA != 0:
if Partition1LBA > 0x800 :
print "Warning!!! The size of MBR+Booloader exceeds 2048 sectors."
MBR = fileContent[0:Partition1LBA*512]
MBR_obj = PartitionBlock(MBR, 0)
MBR_obj.SetRawData(MBR)
SigningObjects.append(MBR_obj)
for dataElement in ConfigData['Jobs']:
if dataElement['RegionID'] == 1: #MBR
NewRegionBlock[0] = RegionBlock(int(dataElement['RegionID']),
int(dataElement['HashingType']),
dataElement['PrivateKeyFile'])
NewRegionBlock[0].SigningRegionalData(MBR_obj.GetRawData())
with open(dataElement['OutputRawFile'], 'wb+') as OutputFile:
OutputFile.write(MBR_obj.GetRawData())
OutputFile.close()
for dataElement in ConfigData['Jobs']:
if dataElement['RegionID'] != 1:
PartIndex = dataElement['RegionID']-2
PartitionEntity = fileContent[0x1C6+0x10*PartIndex:0x1C6+0x10*PartIndex+8]
PartitionLBA, PartitionSize = struct.unpack("<II", PartitionEntity )
if PartitionLBA == 0 or PartitionSize == 0:
print "Error!!! The config does not match to the structure in MBR."
sys.exit(-1)
Part_Objs = PartitionBlock(PartitionEntity, 1)
RawData = fileContent[Part_Objs.GetLBAStarting()*512:
Part_Objs.GetLBAStarting()*512 + Part_Objs.GetSize()*512 ]
Part_Objs.SetRawData(RawData)
SigningObjects.append(Part_Objs)
i = dataElement['RegionID']-1
NewRegionBlock[i] = RegionBlock(int(dataElement['RegionID']),
int(dataElement['HashingType']),
dataElement['PrivateKeyFile'])
NewRegionBlock[i].SigningRegionalData(Part_Objs.GetRawData())
with open(dataElement['OutputRawFile'], 'wb+') as OutputFile:
OutputFile.write(Part_Objs.GetRawData())
OutputFile.close()
pass
with open(ConfigData['OutputConfigBlock'], 'wb+') as OutputFile:
OutputFile.write(fileContent)
CB = ControlBlock(int(ConfigData['Version']),
3,
ConfigData['PrivateKeyFile'],
int(ConfigData['HashingType']))
# version, NumberOfRegions, CtrlPrivateKey, HashType
for rb in NewRegionBlock:
CB.add_region_block(rb)
OutputFile.write(CB.GetRawData())
OutputFile.close()
with open(ConfigData['OutputRawPubkey'], 'wb+') as PubRawFile:
PubRawFile.write(CB.Get_Raw_Public_Key())
PubRawFile.close()
pass
else:
print 'I am being imported from another module.'
| 179 | 0 | 23 |
91ed008cb5ddfd5bd67892c75d563cdad9ee65b3 | 11,416 | py | Python | tools/android/loading/analyze.py | google-ar/chromium | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | tools/android/loading/analyze.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | tools/android/loading/analyze.py | harrymarkovskiy/WebARonARCore | 2441c86a5fd975f09a6c30cddb57dfb7fc239699 | [
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #! /usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import cgi
import json
import logging
import os
import subprocess
import sys
import tempfile
import time
_SRC_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(_SRC_DIR, 'third_party', 'catapult', 'devil'))
from devil.android import device_utils
from devil.android.sdk import intent
sys.path.append(os.path.join(_SRC_DIR, 'build', 'android'))
import devil_chromium
from pylib import constants
import activity_lens
import clovis_constants
import content_classification_lens
import controller
import device_setup
import frame_load_lens
import loading_graph_view
import loading_graph_view_visualization
import loading_trace
import options
import request_dependencies_lens
import request_track
import xvfb_helper
# TODO(mattcary): logging.info isn't that useful, as the whole (tools) world
# uses logging info; we need to introduce logging modules to get finer-grained
# output. For now we just do logging.warning.
OPTIONS = options.OPTIONS
def _LoadPage(device, url):
"""Load a page on chrome on our device.
Args:
device: an AdbWrapper for the device on which to load the page.
url: url as a string to load.
"""
load_intent = intent.Intent(
package=OPTIONS.ChromePackage().package,
activity=OPTIONS.ChromePackage().activity,
data=url)
logging.warning('Loading ' + url)
device.StartActivity(load_intent, blocking=True)
def _GetPrefetchHtml(graph_view, name=None):
"""Generate prefetch page for the resources in resource graph.
Args:
graph_view: (LoadingGraphView)
name: optional string used in the generated page.
Returns:
HTML as a string containing all the link rel=prefetch directives necessary
for prefetching the given ResourceGraph.
"""
if name:
title = 'Prefetch for ' + cgi.escape(name)
else:
title = 'Generated prefetch page'
output = []
output.append("""<!DOCTYPE html>
<html>
<head>
<title>%s</title>
""" % title)
for node in graph_view.deps_graph.graph.Nodes():
output.append('<link rel="prefetch" href="%s">\n' % node.request.url)
output.append("""</head>
<body>%s</body>
</html>
""" % title)
return '\n'.join(output)
def _LogRequests(url, clear_cache_override=None):
"""Logs requests for a web page.
Args:
url: url to log as string.
clear_cache_override: if not None, set clear_cache different from OPTIONS.
Returns:
JSON dict of logged information (ie, a dict that describes JSON).
"""
xvfb_process = None
if OPTIONS.local:
chrome_ctl = controller.LocalChromeController()
if OPTIONS.headless:
xvfb_process = xvfb_helper.LaunchXvfb()
chrome_ctl.SetChromeEnvOverride(xvfb_helper.GetChromeEnvironment())
else:
chrome_ctl = controller.RemoteChromeController(
device_setup.GetFirstDevice())
clear_cache = (clear_cache_override if clear_cache_override is not None
else OPTIONS.clear_cache)
if OPTIONS.emulate_device:
chrome_ctl.SetDeviceEmulation(OPTIONS.emulate_device)
if OPTIONS.emulate_network:
chrome_ctl.SetNetworkEmulation(OPTIONS.emulate_network)
try:
with chrome_ctl.Open() as connection:
if clear_cache:
connection.ClearCache()
trace = loading_trace.LoadingTrace.RecordUrlNavigation(
url, connection, chrome_ctl.ChromeMetadata(),
categories=clovis_constants.DEFAULT_CATEGORIES)
except controller.ChromeControllerError as e:
e.Dump(sys.stderr)
raise
if xvfb_process:
xvfb_process.terminate()
return trace.ToJsonDict()
def _FullFetch(url, json_output, prefetch):
"""Do a full fetch with optional prefetching."""
if not url.startswith('http') and not url.startswith('file'):
url = 'http://' + url
logging.warning('Cold fetch')
cold_data = _LogRequests(url)
assert cold_data, 'Cold fetch failed to produce data. Check your phone.'
if prefetch:
assert not OPTIONS.local
logging.warning('Generating prefetch')
prefetch_html = _GetPrefetchHtml(_ProcessJsonTrace(cold_data), name=url)
tmp = tempfile.NamedTemporaryFile()
tmp.write(prefetch_html)
tmp.flush()
# We hope that the tmpfile name is unique enough for the device.
target = os.path.join('/sdcard/Download', os.path.basename(tmp.name))
device = device_setup.GetFirstDevice()
device.adb.Push(tmp.name, target)
logging.warning('Pushed prefetch %s to device at %s' % (tmp.name, target))
_LoadPage(device, 'file://' + target)
time.sleep(OPTIONS.prefetch_delay_seconds)
logging.warning('Warm fetch')
warm_data = _LogRequests(url, clear_cache_override=False)
with open(json_output, 'w') as f:
json.dump(warm_data, f)
logging.warning('Wrote ' + json_output)
with open(json_output + '.cold', 'w') as f:
json.dump(cold_data, f)
logging.warning('Wrote ' + json_output + '.cold')
else:
with open(json_output, 'w') as f:
json.dump(cold_data, f)
logging.warning('Wrote ' + json_output)
COMMAND_MAP = {
'png': DoPng,
'prefetch_setup': DoPrefetchSetup,
'log_requests': DoLogRequests,
'longpole': DoLongPole,
'nodecost': DoNodeCost,
'cost': DoCost,
'fetch': DoFetch,
}
if __name__ == '__main__':
main()
| 33.576471 | 80 | 0.689296 | #! /usr/bin/python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import argparse
import cgi
import json
import logging
import os
import subprocess
import sys
import tempfile
import time
_SRC_DIR = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', '..', '..'))
sys.path.append(os.path.join(_SRC_DIR, 'third_party', 'catapult', 'devil'))
from devil.android import device_utils
from devil.android.sdk import intent
sys.path.append(os.path.join(_SRC_DIR, 'build', 'android'))
import devil_chromium
from pylib import constants
import activity_lens
import clovis_constants
import content_classification_lens
import controller
import device_setup
import frame_load_lens
import loading_graph_view
import loading_graph_view_visualization
import loading_trace
import options
import request_dependencies_lens
import request_track
import xvfb_helper
# TODO(mattcary): logging.info isn't that useful, as the whole (tools) world
# uses logging info; we need to introduce logging modules to get finer-grained
# output. For now we just do logging.warning.
OPTIONS = options.OPTIONS
def _LoadPage(device, url):
"""Load a page on chrome on our device.
Args:
device: an AdbWrapper for the device on which to load the page.
url: url as a string to load.
"""
load_intent = intent.Intent(
package=OPTIONS.ChromePackage().package,
activity=OPTIONS.ChromePackage().activity,
data=url)
logging.warning('Loading ' + url)
device.StartActivity(load_intent, blocking=True)
def _GetPrefetchHtml(graph_view, name=None):
"""Generate prefetch page for the resources in resource graph.
Args:
graph_view: (LoadingGraphView)
name: optional string used in the generated page.
Returns:
HTML as a string containing all the link rel=prefetch directives necessary
for prefetching the given ResourceGraph.
"""
if name:
title = 'Prefetch for ' + cgi.escape(name)
else:
title = 'Generated prefetch page'
output = []
output.append("""<!DOCTYPE html>
<html>
<head>
<title>%s</title>
""" % title)
for node in graph_view.deps_graph.graph.Nodes():
output.append('<link rel="prefetch" href="%s">\n' % node.request.url)
output.append("""</head>
<body>%s</body>
</html>
""" % title)
return '\n'.join(output)
def _LogRequests(url, clear_cache_override=None):
"""Logs requests for a web page.
Args:
url: url to log as string.
clear_cache_override: if not None, set clear_cache different from OPTIONS.
Returns:
JSON dict of logged information (ie, a dict that describes JSON).
"""
xvfb_process = None
if OPTIONS.local:
chrome_ctl = controller.LocalChromeController()
if OPTIONS.headless:
xvfb_process = xvfb_helper.LaunchXvfb()
chrome_ctl.SetChromeEnvOverride(xvfb_helper.GetChromeEnvironment())
else:
chrome_ctl = controller.RemoteChromeController(
device_setup.GetFirstDevice())
clear_cache = (clear_cache_override if clear_cache_override is not None
else OPTIONS.clear_cache)
if OPTIONS.emulate_device:
chrome_ctl.SetDeviceEmulation(OPTIONS.emulate_device)
if OPTIONS.emulate_network:
chrome_ctl.SetNetworkEmulation(OPTIONS.emulate_network)
try:
with chrome_ctl.Open() as connection:
if clear_cache:
connection.ClearCache()
trace = loading_trace.LoadingTrace.RecordUrlNavigation(
url, connection, chrome_ctl.ChromeMetadata(),
categories=clovis_constants.DEFAULT_CATEGORIES)
except controller.ChromeControllerError as e:
e.Dump(sys.stderr)
raise
if xvfb_process:
xvfb_process.terminate()
return trace.ToJsonDict()
def _FullFetch(url, json_output, prefetch):
"""Do a full fetch with optional prefetching."""
if not url.startswith('http') and not url.startswith('file'):
url = 'http://' + url
logging.warning('Cold fetch')
cold_data = _LogRequests(url)
assert cold_data, 'Cold fetch failed to produce data. Check your phone.'
if prefetch:
assert not OPTIONS.local
logging.warning('Generating prefetch')
prefetch_html = _GetPrefetchHtml(_ProcessJsonTrace(cold_data), name=url)
tmp = tempfile.NamedTemporaryFile()
tmp.write(prefetch_html)
tmp.flush()
# We hope that the tmpfile name is unique enough for the device.
target = os.path.join('/sdcard/Download', os.path.basename(tmp.name))
device = device_setup.GetFirstDevice()
device.adb.Push(tmp.name, target)
logging.warning('Pushed prefetch %s to device at %s' % (tmp.name, target))
_LoadPage(device, 'file://' + target)
time.sleep(OPTIONS.prefetch_delay_seconds)
logging.warning('Warm fetch')
warm_data = _LogRequests(url, clear_cache_override=False)
with open(json_output, 'w') as f:
json.dump(warm_data, f)
logging.warning('Wrote ' + json_output)
with open(json_output + '.cold', 'w') as f:
json.dump(cold_data, f)
logging.warning('Wrote ' + json_output + '.cold')
else:
with open(json_output, 'w') as f:
json.dump(cold_data, f)
logging.warning('Wrote ' + json_output)
def _ProcessTraceFile(filename):
with open(filename) as f:
return _ProcessJsonTrace(json.load(f))
def _ProcessJsonTrace(json_dict):
trace = loading_trace.LoadingTrace.FromJsonDict(json_dict)
content_lens = (
content_classification_lens.ContentClassificationLens.WithRulesFiles(
trace, OPTIONS.ad_rules, OPTIONS.tracking_rules))
frame_lens = frame_load_lens.FrameLoadLens(trace)
activity = activity_lens.ActivityLens(trace)
deps_lens = request_dependencies_lens.RequestDependencyLens(trace)
graph_view = loading_graph_view.LoadingGraphView(
trace, deps_lens, content_lens, frame_lens, activity)
if OPTIONS.noads:
graph_view.RemoveAds()
return graph_view
def InvalidCommand(cmd):
sys.exit('Invalid command "%s"\nChoices are: %s' %
(cmd, ' '.join(COMMAND_MAP.keys())))
def DoPng(arg_str):
OPTIONS.ParseArgs(arg_str, description='Generates a PNG from a trace',
extra=['request_json', ('--png_output', ''),
('--eog', False)])
graph_view = _ProcessTraceFile(OPTIONS.request_json)
visualization = (
loading_graph_view_visualization.LoadingGraphViewVisualization(
graph_view))
tmp = tempfile.NamedTemporaryFile()
visualization.OutputDot(tmp)
tmp.flush()
png_output = OPTIONS.png_output
if not png_output:
if OPTIONS.request_json.endswith('.json'):
png_output = OPTIONS.request_json[
:OPTIONS.request_json.rfind('.json')] + '.png'
else:
png_output = OPTIONS.request_json + '.png'
subprocess.check_call(['dot', '-Tpng', tmp.name, '-o', png_output])
logging.warning('Wrote ' + png_output)
if OPTIONS.eog:
subprocess.Popen(['eog', png_output])
tmp.close()
def DoPrefetchSetup(arg_str):
OPTIONS.ParseArgs(arg_str, description='Sets up prefetch',
extra=['request_json', 'target_html', ('--upload', False)])
graph_view = _ProcessTraceFile(OPTIONS.request_json)
with open(OPTIONS.target_html, 'w') as html:
html.write(_GetPrefetchHtml(
graph_view, name=os.path.basename(OPTIONS.request_json)))
if OPTIONS.upload:
device = device_setup.GetFirstDevice()
destination = os.path.join('/sdcard/Download',
os.path.basename(OPTIONS.target_html))
device.adb.Push(OPTIONS.target_html, destination)
logging.warning(
'Pushed %s to device at %s' % (OPTIONS.target_html, destination))
def DoLogRequests(arg_str):
OPTIONS.ParseArgs(arg_str, description='Logs requests of a load',
extra=['--url', '--output', ('--prefetch', False)])
_FullFetch(url=OPTIONS.url,
json_output=OPTIONS.output,
prefetch=OPTIONS.prefetch)
def DoFetch(arg_str):
OPTIONS.ParseArgs(arg_str,
description=('Fetches SITE into DIR with '
'standard naming that can be processed by '
'./cost_to_csv.py. Both warm and cold '
'fetches are done. SITE can be a full url '
'but the filename may be strange so better '
'to just use a site (ie, domain).'),
extra=['--site', '--dir'])
if not os.path.exists(OPTIONS.dir):
os.makedirs(OPTIONS.dir)
_FullFetch(url=OPTIONS.site,
json_output=os.path.join(OPTIONS.dir, OPTIONS.site + '.json'),
prefetch=True)
def DoLongPole(arg_str):
OPTIONS.ParseArgs(arg_str, description='Calculates long pole',
extra='request_json')
graph_view = _ProcessTraceFile(OPTIONS.request_json)
path_list = []
cost = graph_view.deps_graph.Cost(path_list=path_list)
print '%s (%s)' % (path_list[-1].request.url, cost)
def DoNodeCost(arg_str):
OPTIONS.ParseArgs(arg_str,
description='Calculates node cost',
extra='request_json')
graph_view = _ProcessTraceFile(OPTIONS.request_json)
print sum((n.cost for n in graph_view.deps_graph.graph.Nodes()))
def DoCost(arg_str):
OPTIONS.ParseArgs(arg_str,
description='Calculates total cost',
extra=['request_json', ('--path', False)])
graph_view = _ProcessTraceFile(OPTIONS.request_json)
path_list = []
print 'Graph cost: %s' % graph_view.deps_graph.Cost(path_list=path_list)
if OPTIONS.path:
for n in path_list:
print ' ' + request_track.ShortName(n.request.url)
COMMAND_MAP = {
'png': DoPng,
'prefetch_setup': DoPrefetchSetup,
'log_requests': DoLogRequests,
'longpole': DoLongPole,
'nodecost': DoNodeCost,
'cost': DoCost,
'fetch': DoFetch,
}
def main():
logging.basicConfig(level=logging.WARNING)
OPTIONS.AddGlobalArgument(
'clear_cache', True, 'clear browser cache before loading')
OPTIONS.AddGlobalArgument(
'emulate_device', '',
'Name of the device to emulate. Must be present '
'in --devices_file, or empty for no emulation.')
OPTIONS.AddGlobalArgument('emulate_network', '',
'Type of network emulation. Empty for no emulation.')
OPTIONS.AddGlobalArgument(
'local', False,
'run against local desktop chrome rather than device '
'(see also --local_binary and local_profile_dir)')
OPTIONS.AddGlobalArgument(
'noads', False, 'ignore ad resources in modeling')
OPTIONS.AddGlobalArgument(
'ad_rules', '', 'AdBlocker+ ad rules file.')
OPTIONS.AddGlobalArgument(
'tracking_rules', '', 'AdBlocker+ tracking rules file.')
OPTIONS.AddGlobalArgument(
'prefetch_delay_seconds', 5,
'delay after requesting load of prefetch page '
'(only when running full fetch)')
OPTIONS.AddGlobalArgument(
'headless', False, 'Do not display Chrome UI (only works in local mode).')
parser = argparse.ArgumentParser(description='Analyzes loading')
parser.add_argument('command', help=' '.join(COMMAND_MAP.keys()))
parser.add_argument('rest', nargs=argparse.REMAINDER)
args = parser.parse_args()
devil_chromium.Initialize()
COMMAND_MAP.get(args.command,
lambda _: InvalidCommand(args.command))(args.rest)
if __name__ == '__main__':
main()
| 5,718 | 0 | 253 |
e81227345046c3387bafe57f51f7ca3bcc8dc923 | 2,675 | py | Python | ding/worker/coordinator/resource_manager.py | sailxjx/DI-engine | c6763f8e2ba885a2a02f611195a1b5f8b50bff00 | [
"Apache-2.0"
] | 464 | 2021-07-08T07:26:33.000Z | 2022-03-31T12:35:16.000Z | ding/worker/coordinator/resource_manager.py | sailxjx/DI-engine | c6763f8e2ba885a2a02f611195a1b5f8b50bff00 | [
"Apache-2.0"
] | 177 | 2021-07-09T08:22:55.000Z | 2022-03-31T07:35:22.000Z | ding/worker/coordinator/resource_manager.py | sailxjx/DI-engine | c6763f8e2ba885a2a02f611195a1b5f8b50bff00 | [
"Apache-2.0"
] | 92 | 2021-07-08T12:16:37.000Z | 2022-03-31T09:24:41.000Z | import random
class NaiveResourceManager(object):
r"""
Overview:
the naive resource manager
Interface:
__init__, assign_collector, assign_learner, update
"""
def __init__(self) -> None:
r"""
Overview:
init the resouce manager
"""
self._worker_type = ['collector', 'learner']
self._resource_info = {k: {} for k in self._worker_type}
def assign_collector(self, collector_task: dict) -> dict:
r"""
Overview:
assign the collector_task randomly and return the resouce info
Arguments:
- collector_task (:obj:`dict`): the collector task to assign
"""
available_collector_list = list(self._resource_info['collector'].keys())
if len(available_collector_list) > 0:
selected_collector = random.sample(available_collector_list, 1)[0]
info = self._resource_info['collector'].pop(selected_collector)
return {'collector_id': selected_collector, 'resource_info': info}
else:
return None
def assign_learner(self, learner_task: dict) -> dict:
r"""
Overview:
assign the learner_task randomly and return the resouce info
Arguments:
- learner_task (:obj:`dict`): the learner task to assign
"""
available_learner_list = list(self._resource_info['learner'].keys())
if len(available_learner_list) > 0:
selected_learner = random.sample(available_learner_list, 1)[0]
info = self._resource_info['learner'].pop(selected_learner)
return {'learner_id': selected_learner, 'resource_info': info}
else:
return None
def update(self, name: str, worker_id: str, resource_info: dict) -> None:
r"""
Overview:
update the reource info
"""
assert name in self._worker_type, "invalid worker_type: {}".format(name)
self._resource_info[name][worker_id] = resource_info
| 37.152778 | 80 | 0.617196 | import random
class NaiveResourceManager(object):
r"""
Overview:
the naive resource manager
Interface:
__init__, assign_collector, assign_learner, update
"""
def __init__(self) -> None:
r"""
Overview:
init the resouce manager
"""
self._worker_type = ['collector', 'learner']
self._resource_info = {k: {} for k in self._worker_type}
def assign_collector(self, collector_task: dict) -> dict:
r"""
Overview:
assign the collector_task randomly and return the resouce info
Arguments:
- collector_task (:obj:`dict`): the collector task to assign
"""
available_collector_list = list(self._resource_info['collector'].keys())
if len(available_collector_list) > 0:
selected_collector = random.sample(available_collector_list, 1)[0]
info = self._resource_info['collector'].pop(selected_collector)
return {'collector_id': selected_collector, 'resource_info': info}
else:
return None
def assign_learner(self, learner_task: dict) -> dict:
r"""
Overview:
assign the learner_task randomly and return the resouce info
Arguments:
- learner_task (:obj:`dict`): the learner task to assign
"""
available_learner_list = list(self._resource_info['learner'].keys())
if len(available_learner_list) > 0:
selected_learner = random.sample(available_learner_list, 1)[0]
info = self._resource_info['learner'].pop(selected_learner)
return {'learner_id': selected_learner, 'resource_info': info}
else:
return None
def have_assigned(self, name: id, worker_id: str) -> bool:
assert name in self._worker_type, "invalid worker_type: {}".format(name)
if name == 'collector':
return worker_id in self._resource_info['collector']
elif name == 'learner':
return worker_id in self._resource_info['learner']
def delete(self, name: id, worker_id: str) -> bool:
assert name in self._worker_type, "invalid worker_type: {}".format(name)
if worker_id in self._resource_info[name]:
self._resource_info.pop(worker_id)
return True
else:
return False
def update(self, name: str, worker_id: str, resource_info: dict) -> None:
r"""
Overview:
update the reource info
"""
assert name in self._worker_type, "invalid worker_type: {}".format(name)
self._resource_info[name][worker_id] = resource_info
| 582 | 0 | 54 |
86fa212ba13d057b37a3828ca0036bad8f61bb80 | 523 | py | Python | database/serializers.py | taixingbi/tmp | e3f941f04f08279df59ef016debfe7eb826fc639 | [
"MIT"
] | null | null | null | database/serializers.py | taixingbi/tmp | e3f941f04f08279df59ef016debfe7eb826fc639 | [
"MIT"
] | 7 | 2020-06-06T01:22:35.000Z | 2022-02-10T10:22:37.000Z | database/serializers.py | taixingbi/tmp | e3f941f04f08279df59ef016debfe7eb826fc639 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from .models import Ml_test
#from .models import Account, Teleconference_transcribe
from rest_framework import filters
# class Teleconference_transcribeSerializer(serializers.HyperlinkedModelSerializer):
# class Meta:
# model = Teleconference_transcribe
# fields = ['filename', 'transcription', 'transcription_baseline'] | 32.6875 | 84 | 0.759082 | from rest_framework import serializers
from .models import Ml_test
#from .models import Account, Teleconference_transcribe
from rest_framework import filters
class Ml_testSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Ml_test
fields = ['email', 'name']
# class Teleconference_transcribeSerializer(serializers.HyperlinkedModelSerializer):
# class Meta:
# model = Teleconference_transcribe
# fields = ['filename', 'transcription', 'transcription_baseline'] | 0 | 118 | 23 |
a4be4402db5e8948e7572b20983b2d63d21cfd9c | 11,313 | py | Python | update.py | Hmaksu/Zoom-Duration-Calculator | 118dbc17997b54f398914fb399ca2c882b0d0969 | [
"MIT"
] | null | null | null | update.py | Hmaksu/Zoom-Duration-Calculator | 118dbc17997b54f398914fb399ca2c882b0d0969 | [
"MIT"
] | null | null | null | update.py | Hmaksu/Zoom-Duration-Calculator | 118dbc17997b54f398914fb399ca2c882b0d0969 | [
"MIT"
] | null | null | null | from tkinter import *
from tkinter import filedialog
from tkinter.ttk import *
import os
import xlrd
import xlsxwriter
root = Tk()
root.title("CivilCon")
root.iconbitmap("CC.ico")
root.geometry("500x500")
e = CivilCon(root)
root.mainloop()
| 45.616935 | 497 | 0.455847 | from tkinter import *
from tkinter import filedialog
from tkinter.ttk import *
import os
import xlrd
import xlsxwriter
root = Tk()
root.title("CivilCon")
root.iconbitmap("CC.ico")
root.geometry("500x500")
class CivilCon:
def __init__(self, master): #First Page
self.master = master
Label(self.master, text = "Kaç oturum var?").grid(row = 0, column = 0)
self.clicked = StringVar()
OptionMenu(self.master, self.clicked, "1", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10").grid( row = 0, column = 1)
Button(self.master, text = "Seç", command = self.session).grid(row = 0, column = 4)
Button(self.master, text = "Excel Dosyası", command = self.Excel).grid(row = 0, column = 3)
def Excel(self):
self.attachment_file_directory = filedialog.askopenfilename(initialdir = os.path, title = "Excel")
def session(self):
try:
if self.attachment_file_directory[-3:] == "xls":
for widget in self.master.winfo_children():
widget.destroy()
variables_for_dict = []
key_of_dict = []
for x in range(int(self.clicked.get())):
variables_for_dict.append("self.clicked_version1"+str(x))
variables_for_dict.append("self.clicked_version2"+str(x))
variables_for_dict.append("self.clicked_version3"+str(x))
variables_for_dict.append("self.clicked_version4"+str(x))
key_of_dict.append(StringVar())
key_of_dict.append(StringVar())
key_of_dict.append(StringVar())
key_of_dict.append(StringVar())
self.variable_dictionary = dict(zip(variables_for_dict, key_of_dict))
Label(self.master, text = "Başlangıç").grid(row = 0, column = 1)
Label(self.master, text = "|").grid(row = 0, column = 3)
Label(self.master, text = "Bitiş").grid(row = 0, column = 4)
Label(self.master, text = "Saat").grid(row = 1, column = 1)
Label(self.master, text = "Dakika").grid(row = 1, column = 2)
Label(self.master, text = "|").grid(row = 1, column = 3)
Label(self.master, text = "Saat").grid(row = 1, column = 4)
Label(self.master, text = "Dakika").grid(row = 1, column = 5)
for x in range(int(self.clicked.get())):
Label(self.master, text = str(x+1) + ". Oturum").grid(row = x+2, column = 0)
OptionMenu(self.master, self.variable_dictionary["self.clicked_version1"+str(x)] , "01", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24").grid( row = x+2, column = 1)
OptionMenu(self.master, self.variable_dictionary["self.clicked_version2"+str(x)], "00", "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59").grid( row = x+2, column = 2)
Label(self.master, text = "|").grid(row = x+2, column = 3)
OptionMenu(self.master, self.variable_dictionary["self.clicked_version3"+str(x)], "01", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24").grid( row = x+2, column = 4)
OptionMenu(self.master, self.variable_dictionary["self.clicked_version4"+str(x)], "00", "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "50", "51", "52", "53", "54", "55", "56", "57", "58", "59").grid( row = x+2, column = 5)
Button(self.master, text = "Başlat", command = self.start).grid(row = int(self.clicked.get())+10, column = 5)
else:
self.Excel()
except:
self.Excel()
def start(self):
sessions = []
for k, v in self.variable_dictionary.items():
sessions.append(v.get())
sessions_vol2 = []
for x in range(len(sessions)):
if x%2 == 0:
try:
sessions_vol2.append(sessions[x]+":"+sessions[x+1])
except:
sessions_vol2.append(sessions[-2]+":"+sessions[-1])
sessions = sessions_vol2
try:
path = self.attachment_file_directory
except:
self.Excel()
for widget in self.master.winfo_children():
widget.destroy()
Label(self.master, text = "Kaç oturum var?").grid(row = 0, column = 0)
self.clicked = StringVar()
OptionMenu(self.master, self.clicked, "1", "1", "2", "3", "4", "5", "6", "7", "8", "9", "10").grid( row = 0, column = 1)
Button(self.master, text = "Seç", command = self.session).grid(row = 0, column = 4)
Button(self.master, text = "Excel Dosyası", command = self.Excel).grid(row = 0, column = 3)
attendees = []
inputWorkbook = xlrd.open_workbook(path)
inputWorksheet = inputWorkbook.sheet_by_index(0)
for x in range(inputWorksheet.nrows-4):
x += 4
attendees.append(inputWorksheet.cell_value(x,0))
attendees.sort()
attendees_list_form = []
for x in attendees:
x = "CC | "+x
attendees_list_form.append(x.split(","))
for x in attendees_list_form:
for k in range(len(x)):
if x[k] == "":
x[k] = "info@hmaksu.com"
attendees_vol2 = []
k = 0
for x in range(len(attendees_list_form)):
attendees_list_form[x].pop()
attendees_list_form[x].pop()
try:
if attendees_list_form[x][0] == attendees_list_form[x+1][0] or attendees_list_form[x][1] == attendees_list_form[x+1][1]:
k += 1
continue
else:
if k == 0:
attendee = attendees_list_form[x]
attendee.sort()
attendees_vol2.append(attendees_list_form[x])
else:
attendee = attendees_list_form[x]
for t in range(k):
if k == t:
continue
else:
t += 1
attendee.append(attendees_list_form[x-t][-1])
attendee.append(attendees_list_form[x-t][-2])
attendee.sort()
attendees_vol2.append(attendee)
k = 0
except:
if k == 0:
attendee = attendees_list_form[x]
attendee.sort()
attendees_vol2.append(attendees_list_form[x])
else:
attendee = attendees_list_form[x]
for t in range(k):
if k == t:
continue
else:
t += 1
attendee.append(attendees_list_form[x-t][-1])
attendee.append(attendees_list_form[x-t][-2])
attendee.sort()
attendees_vol2.append(attendee)
attendee = []
attendees = []
attendee_vol3 = []
attendees_vol3 = []
for x in attendees_vol2:
attendee.append(x[-2])
attendee.append(x[-1])
attendee.append(x[0].split()[1])
attendee.append(x[-3].split()[1])
attendees.append(attendee)
attendee = []
attendee_vol3.append(x[-2])
attendee_vol3.append(x[-1])
for t in x:
if x[-2] == t or x[-1] == t:
continue
else:
attendee_vol3.append(t.split()[1])
attendees_vol3.append(attendee_vol3)
attendee_vol3 = []
outworkbook = xlsxwriter.Workbook("Sheet.xlsx")
outworksheet = outworkbook.add_worksheet()
outworksheet.write(0, 0, "İsim-Soyisim")
outworksheet.write(0, 1, "E-Posta Adresi")
sessions_vol2 = []
for x in range(len(sessions)):
try:
if x%2 == 0:
sessions_vol2.append(sessions[x]+" - "+sessions[x+1])
except:
sessions_vol2.append(sessions[-2]+" - "+sessions[-1])
sessions = sessions_vol2
for x in range(len(sessions)):
outworksheet.write(0, x+2, str(x+1)+". Oturum "+sessions[x])
for x in range(len(attendees)):
for k in range(len(attendees[x])):
if k < 2:
outworksheet.write(x+1, k, attendees[x][k])
for t in range(len(sessions)):
#print("="*30)
#print(attendees[x][3])
#print(attendees[x][2])
#print(sessions[t])
#print("="*30)
if int(attendees[x][3].replace(":","")[:-2]) < int(sessions[t].replace(":","")[:-7]) or int(attendees[x][2].replace(":","")[:-2]) > int(sessions[t].replace(":","")[7:]):
outworksheet.write(x+1, t+2, "Katılmadı")
else:
outworksheet.write(x+1, t+2, "Katıldı")
outworksheet.write(0, len(sessions)+2, "Toplam Süre")
for x in range(len(attendees_vol3)):
total_time = 0
for t in range(len(attendees_vol3[x])):
if t == 0 or t == 1:
continue
elif t%2 != 0:
total_time += int(attendees_vol3[x][t].replace(":","")[:2])*60+int(attendees_vol3[x][t].replace(":","")[2:4])-int(attendees_vol3[x][t-1].replace(":","")[:2])*60-int(attendees_vol3[x][t-1].replace(":","")[2:4])
outworksheet.write(x+1, len(sessions)+2, str(total_time))
outworkbook.close()
e = CivilCon(root)
root.mainloop()
| 10,907 | -6 | 157 |
7c1a8a729126d66fb236ad7b867867032e450e54 | 6,836 | py | Python | tools/svctool/svctool.py | ricaun/basicmac | 69e55e953b652ef26e52819ab77559e4a81baf70 | [
"BSD-3-Clause"
] | 1 | 2021-11-27T22:56:15.000Z | 2021-11-27T22:56:15.000Z | tools/svctool/svctool.py | ricaun/basicmac | 69e55e953b652ef26e52819ab77559e4a81baf70 | [
"BSD-3-Clause"
] | null | null | null | tools/svctool/svctool.py | ricaun/basicmac | 69e55e953b652ef26e52819ab77559e4a81baf70 | [
"BSD-3-Clause"
] | 1 | 2021-04-03T09:55:58.000Z | 2021-04-03T09:55:58.000Z | #!/usr/bin/env python3
# Copyright (C) 2016-2019 Semtech (International) AG. All rights reserved.
#
# This file is subject to the terms and conditions defined in file 'LICENSE',
# which is part of this source code package.
import os
import shlex
import sys
import re
import yaml
from typing import Callable,Dict,List,Optional,Set,Tuple
from typing import cast
from argparse import Namespace as NS # type alias
from cc import CommandCollection
if __name__ == '__main__':
ServiceTool().run()
| 36.169312 | 111 | 0.526624 | #!/usr/bin/env python3
# Copyright (C) 2016-2019 Semtech (International) AG. All rights reserved.
#
# This file is subject to the terms and conditions defined in file 'LICENSE',
# which is part of this source code package.
import os
import shlex
import sys
import re
import yaml
from typing import Callable,Dict,List,Optional,Set,Tuple
from typing import cast
from argparse import Namespace as NS # type alias
from cc import CommandCollection
class Service:
def __init__(self, svcid:str, fn:str) -> None:
self.id = svcid
self.srcs : List[str] = []
self.hooks : List[List[str]] = []
self.hookdefs : Dict[str,List[str]] = {}
self.require : List[str] = []
self.defines : List[Tuple[str,Optional[str]]] = []
self.fn = fn
with open(fn, 'r') as fh:
d = yaml.safe_load(fh)
for k, v in d.items():
if k == 'src':
if isinstance(v, list):
self.srcs.extend(v)
else:
self.srcs.append(v)
elif k == 'hooks':
if not isinstance(v, list):
v = [ v ]
self.hooks.extend([Service.parse_hook(h, fn) for h in v])
elif k == 'require':
if not isinstance(v, list):
v = [ v ]
self.require.extend(v)
elif k == 'define':
if not isinstance(v, list):
v = [ v ]
self.defines.extend([Service.parse_define(d, fn) for d in v])
elif k.startswith('hook.'):
h = k[5:]
if h not in self.hookdefs:
self.hookdefs[h] = []
if not isinstance(v, list):
v = [ v ]
self.hookdefs[h].extend(v)
else:
raise ValueError('%s: unknown key %s' % (fn, k))
@staticmethod
def parse_hook(hd:str, fn:str) -> List[str]:
m = re.match(r'^\s*(.+)\s+(\w+)\s*(\([^\)]+\))\s*$', hd)
if m:
return [ m.group(2), m.group(1) + ' %s ' + m.group(3) ]
else:
raise ValueError('%s: invalid function declaration "%s"' % (fn, hd))
@staticmethod
def parse_define(dd:str, fn:str) -> Tuple[str,Optional[str]]:
m = re.match(r'^([^=]+)(?:=(.*))?', dd)
if m:
return cast(Tuple[str,Optional[str]], m.groups())
else:
raise ValueError('%s: invalid define declaration "%s"' % (fn, dd))
class ServiceCollection:
def __init__(self) -> None:
self.svcs : Dict[str,Service] = {}
def add(self, svc:Service) -> None:
self.svcs[svc.id] = svc
def validate(self) -> None:
pass
def sources(self) -> List[str]:
return [src for svc in self.svcs.values() for src in svc.srcs]
def files(self) -> List[str]:
return [svc.fn for svc in self.svcs.values()]
def defines(self) -> List[str]:
return ['SVC_' + s for s in self.svcs.keys()] + [
'%s%s' % (k, '' if v is None else '=%s' % shlex.quote(v))
for svc in self.svcs.values() for k,v in svc.defines]
def hookdefs(self) -> Dict[str,Tuple[str,List[str]]]:
return { h[0]: (h[1], [hd for hds in (sv2.hookdefs.get(h[0])
for sv2 in self.svcs.values()) if hds is not None for hd in hds])
for sv1 in self.svcs.values() for h in sv1.hooks }
def unresolved(self) -> Set[str]:
return set([s for sl in [svc.require for svc in self.svcs.values()] for s in sl if s not in self.svcs])
class ServiceToolUtil:
@staticmethod
def arg(name:str) -> Callable:
if name == 'svc':
return CommandCollection.arg(name, type=str,
metavar='svcid',
nargs='*', help='service identifier')
if name == '--path':
return CommandCollection.arg('-p', '--path', type=str,
action='append',
help='paths to search for service definitions')
raise ValueError()
class ServiceTool:
def run(self) -> None:
CommandCollection.run(self)
@staticmethod
def load(svcid:str, paths:List[str]) -> Optional[Service]:
for p in paths:
fn = os.path.join(p, svcid + '.svc')
if os.path.isfile(fn):
return Service(svcid, fn)
return None
@staticmethod
def collect(args:NS) -> ServiceCollection:
sc = ServiceCollection()
ss = set(args.svc)
while len(ss):
s = ss.pop()
svc = ServiceTool.load(s, args.path or ['.'])
if svc is None:
raise ValueError('Cannot find service description for "%s"' % s)
sc.add(svc)
ss.update(sc.unresolved())
sc.validate()
return sc
@ServiceToolUtil.arg('svc')
@ServiceToolUtil.arg('--path')
@CommandCollection.cmd(help='validate the service configuration')
def check(self, args:NS) -> None:
try:
ServiceTool.collect(args)
except:
print(str(sys.exc_info()))
@ServiceToolUtil.arg('svc')
@ServiceToolUtil.arg('--path')
@CommandCollection.cmd(help='output a list of source files')
def sources(self, args:NS) -> None:
sc = ServiceTool.collect(args)
print(' '.join(sc.sources()))
@ServiceToolUtil.arg('svc')
@ServiceToolUtil.arg('--path')
@CommandCollection.cmd(help='output a list defines for the compiler')
def defines(self, args:NS) -> None:
sc = ServiceTool.collect(args)
print(' '.join(sc.defines()))
@ServiceToolUtil.arg('svc')
@CommandCollection.arg('-d', action='store_true', help='create a dependency file for make')
@CommandCollection.arg('-o', '--output', type=str, help='output file', required=True)
@ServiceToolUtil.arg('--path')
@CommandCollection.cmd(help='create the svcdef header file')
def svcdefs(self, args:NS) -> None:
sc = ServiceTool.collect(args)
with open(args.output, 'w') as fh:
fh.write('// Automatically generated by %s\n\n' % ' '.join(sys.argv))
for h, defs in sc.hookdefs().items():
fh.write('#define SVCHOOK_%s(...) do { %s } while (0)\n' % (h,
' '.join(['{ extern %s; %s(__VA_ARGS__); }'
% (defs[0] % f, f) for f in defs[1]])))
if args.d:
with open(os.path.splitext(args.output)[0] + '.d', 'w') as fh:
deps = sc.files()
fh.write('%s: %s\n\n' % (args.output, ' '.join(deps)))
for d in deps:
fh.write('%s:\n\n' % d)
if __name__ == '__main__':
ServiceTool().run()
| 4,923 | 1,106 | 307 |
060d91d98cf50dfe7b09fa612b44c1ea377349ae | 5,502 | py | Python | predict.py | pr-shukla/maddpg-keras | 8e3d1501f78ac2b78ee2c7053dc9299862386c17 | [
"MIT"
] | 4 | 2021-09-22T13:38:05.000Z | 2022-02-11T02:09:54.000Z | predict.py | pr-shukla/maddpg-keras | 8e3d1501f78ac2b78ee2c7053dc9299862386c17 | [
"MIT"
] | null | null | null | predict.py | pr-shukla/maddpg-keras | 8e3d1501f78ac2b78ee2c7053dc9299862386c17 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import math
from tensorflow.keras.models import load_model
from matplotlib import animation
from env_predict import *
from buffer import *
from model import *
from noise import *
dt = 0.4
v = 1.0
ve = 1.2
#Dimension of State Space for single agent
dim_agent_state = 5
num_agents = 3
#Dimension of State Space
dim_state = dim_agent_state*num_agents
#Number of Episodes
num_episodes = 3000
#Number of Steps
num_steps = 400
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
ac_models = []
cr_models = []
target_ac = []
target_cr = []
path = 'C:/Users/HP/Desktop/desktop_folders/MS_Project_Codes/maddpg/maddpg_models/'
for i in range(num_agents):
ac_models.append(load_model(path + 'actor'+str(i)+'.h5'))
cr_models.append(load_model(path + 'critic'+str(i)+'.h5'))
target_ac.append(load_model(path + 'target_actor'+str(i)+'.h5'))
target_cr.append(load_model(path + 'target_critic'+str(i)+'.h5'))
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
ag1_reward_list = []
ag2_reward_list = []
ev_reward_list = []
# Takes about 20 min to train
for ep in range(1):
env = environment()
prev_state = env.initial_obs()
episodic_reward = 0
ag1_reward = 0
ag2_reward = 0
ev_reward = 0
xp1 = []
yp1 = []
xp2 = []
yp2 = []
xce = []
yce = []
#while True:
for i in range(400):
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
actions = []
for j, model in enumerate(ac_models):
action = policy(tf_prev_state[:,5*j:5*(j+1)], ou_noise, model)
actions.append(float(action[0]))
# Recieve state and reward from environment.
#new_state, sys_state, ev_state = transition(prev_state, sys_state, actions, ev_state)
new_state = env.step(actions)
rewards = reward(new_state)
#buffer.record((prev_state, actions, rewards, new_state))
episodic_reward += sum(rewards)
ag1_reward += rewards[0]
ag2_reward += rewards[1]
ev_reward += rewards[2]
'''buffer.learn(ac_models, cr_models, target_ac, target_cr)
update_target(tau, ac_models, cr_models, target_ac, target_cr)'''
prev_state = new_state
xp1.append(env.p1_rx)
yp1.append(env.p1_ry)
xp2.append(env.p2_rx)
yp2.append(env.p2_ry)
xce.append(env.e_rx)
yce.append(env.e_ry)
d_p1_e = L(env.p1_rx, env.p1_ry, env.e_rx, env.e_ry)
d_p2_e = L(env.p2_rx, env.p2_ry, env.e_rx, env.e_ry)
if d_p1_e < 0.4 or d_p2_e < 0.4:
env = environment()
prev_state = env.initial_obs()
print("Captured")
#break
xc1 = [env.e_rx]
yc1 = [env.e_ry]
ep_reward_list.append(episodic_reward)
ag1_reward_list.append(ag1_reward)
ag2_reward_list.append(ag2_reward)
ev_reward_list.append(ev_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Trajectory plot will be generated")
avg_reward_list.append(avg_reward)
plt.plot(xp1,yp1)
plt.plot(xp2,yp2)
plt.plot(xce,yce)
plt.plot(xc1,yc1,'.')
plt.plot(xp1[-1],yp1[-1],'*')
plt.plot(xp2[-1],yp2[-1],'*')
plt.show()
print("Trajectory Animation will be generated")
# Creating animation of the complete episode during execution
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
ax = plt.axes(xlim=(-1, 11), ylim=(-1, 11))
line, = ax.plot([], [], 'go')
line1, = ax.plot([], [], 'go')
line2, = ax.plot([], [], 'ro')
# initialization function: plot the background of each frame
# animation function. This is called sequentially
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=600, interval=1, blit=True)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
anim.save('basic_animation.mp4', fps=20, extra_args=['-vcodec', 'libx264'])
# Plotting graph
# Episodes versus Avg. Rewards
plt.show()
| 26.839024 | 95 | 0.626136 | import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import matplotlib.pyplot as plt
import math
from tensorflow.keras.models import load_model
from matplotlib import animation
from env_predict import *
from buffer import *
from model import *
from noise import *
dt = 0.4
v = 1.0
ve = 1.2
#Dimension of State Space for single agent
dim_agent_state = 5
num_agents = 3
#Dimension of State Space
dim_state = dim_agent_state*num_agents
#Number of Episodes
num_episodes = 3000
#Number of Steps
num_steps = 400
std_dev = 0.2
ou_noise = OUActionNoise(mean=np.zeros(1), std_deviation=float(std_dev) * np.ones(1))
ac_models = []
cr_models = []
target_ac = []
target_cr = []
path = 'C:/Users/HP/Desktop/desktop_folders/MS_Project_Codes/maddpg/maddpg_models/'
for i in range(num_agents):
ac_models.append(load_model(path + 'actor'+str(i)+'.h5'))
cr_models.append(load_model(path + 'critic'+str(i)+'.h5'))
target_ac.append(load_model(path + 'target_actor'+str(i)+'.h5'))
target_cr.append(load_model(path + 'target_critic'+str(i)+'.h5'))
def policy(state, noise_object, model):
sampled_actions = tf.squeeze(model(state))
noise = noise_object()
# Adding noise to action
sampled_actions = sampled_actions.numpy() + 0
# We make sure action is within bounds
legal_action = np.clip(sampled_actions, -1.0, 1.0)
return [np.squeeze(legal_action)]
ep_reward_list = []
# To store average reward history of last few episodes
avg_reward_list = []
ag1_reward_list = []
ag2_reward_list = []
ev_reward_list = []
# Takes about 20 min to train
for ep in range(1):
env = environment()
prev_state = env.initial_obs()
episodic_reward = 0
ag1_reward = 0
ag2_reward = 0
ev_reward = 0
xp1 = []
yp1 = []
xp2 = []
yp2 = []
xce = []
yce = []
#while True:
for i in range(400):
tf_prev_state = tf.expand_dims(tf.convert_to_tensor(prev_state), 0)
actions = []
for j, model in enumerate(ac_models):
action = policy(tf_prev_state[:,5*j:5*(j+1)], ou_noise, model)
actions.append(float(action[0]))
# Recieve state and reward from environment.
#new_state, sys_state, ev_state = transition(prev_state, sys_state, actions, ev_state)
new_state = env.step(actions)
rewards = reward(new_state)
#buffer.record((prev_state, actions, rewards, new_state))
episodic_reward += sum(rewards)
ag1_reward += rewards[0]
ag2_reward += rewards[1]
ev_reward += rewards[2]
'''buffer.learn(ac_models, cr_models, target_ac, target_cr)
update_target(tau, ac_models, cr_models, target_ac, target_cr)'''
prev_state = new_state
xp1.append(env.p1_rx)
yp1.append(env.p1_ry)
xp2.append(env.p2_rx)
yp2.append(env.p2_ry)
xce.append(env.e_rx)
yce.append(env.e_ry)
d_p1_e = L(env.p1_rx, env.p1_ry, env.e_rx, env.e_ry)
d_p2_e = L(env.p2_rx, env.p2_ry, env.e_rx, env.e_ry)
if d_p1_e < 0.4 or d_p2_e < 0.4:
env = environment()
prev_state = env.initial_obs()
print("Captured")
#break
xc1 = [env.e_rx]
yc1 = [env.e_ry]
ep_reward_list.append(episodic_reward)
ag1_reward_list.append(ag1_reward)
ag2_reward_list.append(ag2_reward)
ev_reward_list.append(ev_reward)
# Mean of last 40 episodes
avg_reward = np.mean(ep_reward_list[-40:])
print("Trajectory plot will be generated")
avg_reward_list.append(avg_reward)
plt.plot(xp1,yp1)
plt.plot(xp2,yp2)
plt.plot(xce,yce)
plt.plot(xc1,yc1,'.')
plt.plot(xp1[-1],yp1[-1],'*')
plt.plot(xp2[-1],yp2[-1],'*')
plt.show()
print("Trajectory Animation will be generated")
# Creating animation of the complete episode during execution
# First set up the figure, the axis, and the plot element we want to animate
fig = plt.figure()
ax = plt.axes(xlim=(-1, 11), ylim=(-1, 11))
line, = ax.plot([], [], 'go')
line1, = ax.plot([], [], 'go')
line2, = ax.plot([], [], 'ro')
# initialization function: plot the background of each frame
def init():
line.set_data([], [])
line1.set_data([], [])
line2.set_data([], [])
return line, line1, line2,
# animation function. This is called sequentially
def animate(i):
x = xp1[i-1:i]
y = yp1[i-1:i]
x2 = xp2[i-1:i]
y2 = yp2[i-1:i]
x_ = xce[i-1:i]
y_ = yce[i-1:i]
line.set_data(x, y)
line1.set_data(x2, y2)
line2.set_data(x_, y_)
return line, line1, line2,
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=600, interval=1, blit=True)
# save the animation as an mp4. This requires ffmpeg or mencoder to be
# installed. The extra_args ensure that the x264 codec is used, so that
# the video can be embedded in html5. You may need to adjust this for
# your system: for more information, see
# http://matplotlib.sourceforge.net/api/animation_api.html
anim.save('basic_animation.mp4', fps=20, extra_args=['-vcodec', 'libx264'])
# Plotting graph
# Episodes versus Avg. Rewards
plt.show()
| 662 | 0 | 71 |
f55dcd389b1ce65f032913fdadc7cedcd8041d35 | 1,185 | py | Python | demo/blog/views.py | andrewebdev/django-ostinato | 2c435dea23319be6e9011e7381afca2b4092b5a2 | [
"MIT"
] | 5 | 2015-01-28T09:56:48.000Z | 2020-05-22T21:07:30.000Z | demo/blog/views.py | andrewebdev/django-ostinato | 2c435dea23319be6e9011e7381afca2b4092b5a2 | [
"MIT"
] | 18 | 2015-02-03T15:37:22.000Z | 2020-06-05T16:41:15.000Z | demo/blog/views.py | andrewebdev/django-ostinato | 2c435dea23319be6e9011e7381afca2b4092b5a2 | [
"MIT"
] | 2 | 2015-02-23T19:34:59.000Z | 2017-01-22T02:10:12.000Z | from ostinato.pages.views import PageView
from django.views.generic.detail import DetailView
from django.views.generic.dates import DateDetailView
from ostinato.pages.models import Page
from blog.models import Entry
| 28.902439 | 71 | 0.696203 | from ostinato.pages.views import PageView
from django.views.generic.detail import DetailView
from django.views.generic.dates import DateDetailView
from ostinato.pages.models import Page
from blog.models import Entry
class LandingPageView(PageView):
def get_context_data(self, **kwargs):
c = super(LandingPageView, self).get_context_data(**kwargs)
num = int(self.page.contents.max_latest_entries)
c['latest_entries'] = Entry.objects.published()[:num]
return c
class EntryPreviewView(DetailView):
model = Entry
context_object_name = "entry"
def get_context_data(self, **kwargs):
c = super(EntryPreviewView, self).get_context_data(**kwargs)
c['page'] = Page.objects.filter(template="blog.landingpage")[0]
return c
class EntryDetailView(DateDetailView):
model = Entry
date_field = "publish_date"
year_format = "%Y"
month_format = "%m"
day_format = "%d"
context_object_name = "entry"
def get_context_data(self, **kwargs):
c = super(EntryDetailView, self).get_context_data(**kwargs)
c['page'] = Page.objects.filter(template="blog.landingpage")[0]
return c
| 567 | 301 | 96 |
62eb9db8c4f68adc489a91c82ddfa41ecb1db6aa | 919 | py | Python | question_2/data_for_analysis/main.py | juliuskrahn/media-analysis-climate-change | a31834fe92e3c13f42f9c446f720c8e173cd4e12 | [
"MIT"
] | 1 | 2021-11-09T10:04:59.000Z | 2021-11-09T10:04:59.000Z | question_2/data_for_analysis/main.py | juliuskrahn/media-analysis-climate-change | a31834fe92e3c13f42f9c446f720c8e173cd4e12 | [
"MIT"
] | null | null | null | question_2/data_for_analysis/main.py | juliuskrahn/media-analysis-climate-change | a31834fe92e3c13f42f9c446f720c8e173cd4e12 | [
"MIT"
] | null | null | null | """Load article sample (1%) into spreadsheet for manual content analysis"""
import pandas as pd
import utils
from question_1.is_about_climate_change_sql_statement import is_about_climate_change_sql_statement
import os.path
if __name__ == "__main__":
main()
| 31.689655 | 98 | 0.618063 | """Load article sample (1%) into spreadsheet for manual content analysis"""
import pandas as pd
import utils
from question_1.is_about_climate_change_sql_statement import is_about_climate_change_sql_statement
import os.path
def main():
if not os.path.isdir("output"):
os.mkdir("output")
for publisher in utils.publishers:
with utils.db_conn() as conn:
df = pd.read_sql_query(
f"""
SELECT url, publisher, TO_CHAR(published, 'YYYY-MM-DD') AS published
FROM article
TABLESAMPLE BERNOULLI(2)
WHERE publisher = '{publisher}' AND (SELECT EXTRACT(YEAR FROM published)) >= 2015
AND {is_about_climate_change_sql_statement[publisher.language]};
"""
, conn)
df.to_excel(f"output/{publisher}.ods", engine="odf")
if __name__ == "__main__":
main()
| 632 | 0 | 23 |
bfc6f72eba9b045c2ed844c9865387f1e8e14d5d | 8,036 | py | Python | BE_PESCAO/philipp_plot.py | srio/shadow3-scripts | 10712641333c29ca9854e9cc60d86cb321f3762b | [
"MIT"
] | 1 | 2019-10-30T10:06:15.000Z | 2019-10-30T10:06:15.000Z | BE_PESCAO/philipp_plot.py | srio/shadow3-scripts | 10712641333c29ca9854e9cc60d86cb321f3762b | [
"MIT"
] | null | null | null | BE_PESCAO/philipp_plot.py | srio/shadow3-scripts | 10712641333c29ca9854e9cc60d86cb321f3762b | [
"MIT"
] | null | null | null | import numpy
from srxraylib.plot.gol import plot
import scipy.constants as codata
import xraylib
if __name__ == "__main__":
do_calculate_spectrum = True
diamond_thickness_in_mm = 0.8
outfile = "spectrumE.dat"
rho = 1.848
if do_calculate_spectrum:
energy, flux = create_spectrum()
energy, flux = diamond_filter(energy, flux, diamond_thickness_in_mm=diamond_thickness_in_mm)
f = open(outfile, "w")
for i in range(energy.size):
f.write("%g %g\n" % (energy[i], flux[i]))
f.close()
print("File %s written to disk." % outfile)
energy_for_pescao, flux_for_pescao = remove_points_for_pescao(energy, flux)
f = open("spectrumEF.dat", "w")
for i in range(energy_for_pescao.size):
f.write("%g %g\n" % (energy_for_pescao[i], flux_for_pescao[i]))
f.close()
print("File %s written to disk." % "spectrumEF.dat")
else: # just read file with spectrum
a = numpy.loadtxt(outfile)
energy = a[:,0]
flux = a[:,1]
spectral_power = flux * 1e3 * codata.e
estep = (energy[1] - energy[0])
integrated_power = (spectral_power.sum() * estep)
print("integrated power", integrated_power)
print("volumetric power", integrated_power / (0.8**2))
#
# NIST data
#
nist = nist_be()
print(nist.shape)
nist_interpolated = 10 ** numpy.interp(numpy.log10(energy), numpy.log10(1e6 * nist[:,0]), numpy.log10(rho * nist[:,2]))
# plot(1e6 * nist[:, 0], nist[:, 1],
# 1e6 * nist[:, 0], nist[:, 2],
# energy, nist_interpolated/rho, xlog=1, ylog=1,
# xtitle="Photon energy [eV]", ytitle="[cm2/g]")
#
# xraylib data
#
XRL_MU = numpy.zeros_like(energy)
XRL_MU_E = numpy.zeros_like(energy)
for i in range(energy.size):
XRL_MU[i] = rho * xraylib.CS_Total(xraylib.SymbolToAtomicNumber("Be"), 1e-3*energy[i])
XRL_MU_E[i] = rho * xraylib.CS_Energy(xraylib.SymbolToAtomicNumber("Be"), 1e-3*energy[i])
plot(
1e-3 * energy, XRL_MU,
1e-3 * energy, XRL_MU_E,
1e-3 * energy, nist_interpolated,
xlog=0, ylog=1, legend=["mu","mu_e","nist_e"],
xtitle="Photon energy [keV]", ytitle="mu [cm^-1]")
#
# loop on thicknesses
#
THICKNESS_MM = numpy.concatenate( (numpy.linspace(0,1,100),numpy.linspace(1,10,50)))
VOLUMETRIC_ABSORBED_POWER = numpy.zeros_like(THICKNESS_MM)
VOLUMETRIC_ABSORBED_POWER_E = numpy.zeros_like(THICKNESS_MM)
VOLUMETRIC_ABSORBED_POWER_NIST = numpy.zeros_like(THICKNESS_MM)
for i, thickness_mm in enumerate(THICKNESS_MM):
thickness_mm = THICKNESS_MM[i]
absorbed_fraction = 1.0 - numpy.exp(-XRL_MU * thickness_mm * 1e-1)
absorbed_fraction_e = 1.0 - numpy.exp(-XRL_MU_E * thickness_mm * 1e-1)
absorbed_fraction_nist = 1.0 - numpy.exp(-nist_interpolated * thickness_mm * 1e-1)
# plot(energy, absorbed_fraction, energy, absorbed_fraction_e)
absorbed_power = (flux * absorbed_fraction * codata.e * 1e3).sum() * estep
volumetric_absorbed_power = absorbed_power / (0.8 * 0.8 * thickness_mm)
absorbed_power_e = (flux * absorbed_fraction_e * codata.e * 1e3).sum() * estep
volumetric_absorbed_power_e = absorbed_power_e / (0.8 * 0.8 * thickness_mm)
absorbed_power_nist = (flux * absorbed_fraction_nist * codata.e * 1e3).sum() * estep
volumetric_absorbed_power_nist = absorbed_power_nist / (0.8 * 0.8 * thickness_mm)
VOLUMETRIC_ABSORBED_POWER[i] = volumetric_absorbed_power
VOLUMETRIC_ABSORBED_POWER_E[i] = volumetric_absorbed_power_e
VOLUMETRIC_ABSORBED_POWER_NIST[i] = volumetric_absorbed_power_nist
print(integrated_power, absorbed_power, volumetric_absorbed_power)
print(integrated_power, absorbed_power_e, volumetric_absorbed_power_e)
#
# load pescao results and make final plot
#
pescao = numpy.loadtxt("pescao_0p8.dat", skiprows=2)
plot(THICKNESS_MM, VOLUMETRIC_ABSORBED_POWER,
THICKNESS_MM, VOLUMETRIC_ABSORBED_POWER_E,
THICKNESS_MM, VOLUMETRIC_ABSORBED_POWER_NIST,
pescao[:,0], pescao[:,1]/(pescao[:,0] * 0.8 * 0.8),
xtitle="Depth [mm]", ytitle="Volumetric absorption [W/mm3]",
title="diamond window thickness = %g mm" % diamond_thickness_in_mm,
legend=["mu","mu_e","nist_e","Monte Carlo"])
| 37.551402 | 123 | 0.62556 | import numpy
from srxraylib.plot.gol import plot
import scipy.constants as codata
import xraylib
def create_spectrum():
#
# script to make the calculations (created by XOPPY:undulator_spectrum)
#
from orangecontrib.xoppy.util.xoppy_undulators import xoppy_calc_undulator_spectrum
energy, flux, spectral_power, cumulated_power = xoppy_calc_undulator_spectrum(
ELECTRONENERGY=6.0,
ELECTRONENERGYSPREAD=0.001,
ELECTRONCURRENT=0.2,
ELECTRONBEAMSIZEH=3.01836e-05,
ELECTRONBEAMSIZEV=3.63641e-06,
ELECTRONBEAMDIVERGENCEH=4.36821e-06,
ELECTRONBEAMDIVERGENCEV=1.37498e-06,
PERIODID=0.018,
NPERIODS=222,
KV=1.76,
KH=0.0,
KPHASE=0.0,
DISTANCE=27.5,
GAPH=0.0008,
GAPV=0.0008,
GAPH_CENTER=0.0,
GAPV_CENTER=0.0,
PHOTONENERGYMIN=3000.0,
PHOTONENERGYMAX=150000.0,
PHOTONENERGYPOINTS=2800,
METHOD=2,
USEEMITTANCES=1)
# example plot
from srxraylib.plot.gol import plot
plot(energy,flux,ytitle="Flux [photons/s/o.1%bw]",xtitle="Poton energy [eV]",title="Undulator Flux",
xlog=False,ylog=False,show=False)
plot(energy,spectral_power,ytitle="Power [W/eV]",xtitle="Poton energy [eV]",title="Undulator Spectral Power",
xlog=False,ylog=False,show=False)
plot(energy,cumulated_power,ytitle="Cumulated Power [W]",xtitle="Poton energy [eV]",title="Undulator Cumulated Power",
xlog=False,ylog=False,show=True)
#
# end script
#
return energy, flux
def nist_be():
return numpy.array([
[1.00000E-03, 6.041E+02, 6.035E+02],
[1.50000E-03, 1.797E+02, 1.791E+02],
[2.00000E-03, 7.469E+01, 7.422E+01],
[3.00000E-03, 2.127E+01, 2.090E+01],
[4.00000E-03, 8.685E+00, 8.367E+00],
[5.00000E-03, 4.369E+00, 4.081E+00],
[6.00000E-03, 2.527E+00, 2.260E+00],
[8.00000E-03, 1.124E+00, 8.839E-01],
[1.00000E-02, 6.466E-01, 4.255E-01],
[1.50000E-02, 3.070E-01, 1.143E-01],
[2.00000E-02, 2.251E-01, 4.780E-02],
[3.00000E-02, 1.792E-01, 1.898E-02],
[4.00000E-02, 1.640E-01, 1.438E-02],
[5.00000E-02, 1.554E-01, 1.401E-02],
[6.00000E-02, 1.493E-01, 1.468E-02],
[8.00000E-02, 1.401E-01, 1.658E-02],
[1.00000E-01, 1.328E-01, 1.836E-02],
[1.50000E-01, 1.190E-01, 2.157E-02],
[2.00000E-01, 1.089E-01, 2.353E-02],
[3.00000E-01, 9.463E-02, 2.548E-02],
[4.00000E-01, 8.471E-02, 2.620E-02],
[5.00000E-01, 7.739E-02, 2.639E-02],
[6.00000E-01, 7.155E-02, 2.627E-02],
[8.00000E-01, 6.286E-02, 2.565E-02],
[1.00000E+00, 5.652E-02, 2.483E-02],
[1.25000E+00, 5.054E-02, 2.373E-02],
[1.50000E+00, 4.597E-02, 2.268E-02],
[2.00000E+00, 3.938E-02, 2.083E-02],
[3.00000E+00, 3.138E-02, 1.806E-02],
[4.00000E+00, 2.664E-02, 1.617E-02],
[5.00000E+00, 2.347E-02, 1.479E-02],
[6.00000E+00, 2.121E-02, 1.377E-02],
[8.00000E+00, 1.819E-02, 1.233E-02],
[1.00000E+01, 1.627E-02, 1.138E-02],
[1.50000E+01, 1.361E-02, 1.001E-02],
[2.00000E+01, 1.227E-02, 9.294E-03]])
def diamond_filter(energy, flux, diamond_thickness_in_mm = 0.3):
XRL_MU = numpy.zeros_like(energy)
for i in range(energy.size):
XRL_MU[i] = 3.51 * xraylib.CS_Total(xraylib.SymbolToAtomicNumber("C"), 1e-3*energy[i])
return energy, flux * numpy.exp(- XRL_MU * diamond_thickness_in_mm * 1e-1)
def remove_points_for_pescao(x, y, ratio=1000.0):
ymax = y.max()
igood = numpy.argwhere(y > ymax / ratio)
return x[igood].copy(), y[igood].copy()
if __name__ == "__main__":
do_calculate_spectrum = True
diamond_thickness_in_mm = 0.8
outfile = "spectrumE.dat"
rho = 1.848
if do_calculate_spectrum:
energy, flux = create_spectrum()
energy, flux = diamond_filter(energy, flux, diamond_thickness_in_mm=diamond_thickness_in_mm)
f = open(outfile, "w")
for i in range(energy.size):
f.write("%g %g\n" % (energy[i], flux[i]))
f.close()
print("File %s written to disk." % outfile)
energy_for_pescao, flux_for_pescao = remove_points_for_pescao(energy, flux)
f = open("spectrumEF.dat", "w")
for i in range(energy_for_pescao.size):
f.write("%g %g\n" % (energy_for_pescao[i], flux_for_pescao[i]))
f.close()
print("File %s written to disk." % "spectrumEF.dat")
else: # just read file with spectrum
a = numpy.loadtxt(outfile)
energy = a[:,0]
flux = a[:,1]
spectral_power = flux * 1e3 * codata.e
estep = (energy[1] - energy[0])
integrated_power = (spectral_power.sum() * estep)
print("integrated power", integrated_power)
print("volumetric power", integrated_power / (0.8**2))
#
# NIST data
#
nist = nist_be()
print(nist.shape)
nist_interpolated = 10 ** numpy.interp(numpy.log10(energy), numpy.log10(1e6 * nist[:,0]), numpy.log10(rho * nist[:,2]))
# plot(1e6 * nist[:, 0], nist[:, 1],
# 1e6 * nist[:, 0], nist[:, 2],
# energy, nist_interpolated/rho, xlog=1, ylog=1,
# xtitle="Photon energy [eV]", ytitle="[cm2/g]")
#
# xraylib data
#
XRL_MU = numpy.zeros_like(energy)
XRL_MU_E = numpy.zeros_like(energy)
for i in range(energy.size):
XRL_MU[i] = rho * xraylib.CS_Total(xraylib.SymbolToAtomicNumber("Be"), 1e-3*energy[i])
XRL_MU_E[i] = rho * xraylib.CS_Energy(xraylib.SymbolToAtomicNumber("Be"), 1e-3*energy[i])
plot(
1e-3 * energy, XRL_MU,
1e-3 * energy, XRL_MU_E,
1e-3 * energy, nist_interpolated,
xlog=0, ylog=1, legend=["mu","mu_e","nist_e"],
xtitle="Photon energy [keV]", ytitle="mu [cm^-1]")
#
# loop on thicknesses
#
THICKNESS_MM = numpy.concatenate( (numpy.linspace(0,1,100),numpy.linspace(1,10,50)))
VOLUMETRIC_ABSORBED_POWER = numpy.zeros_like(THICKNESS_MM)
VOLUMETRIC_ABSORBED_POWER_E = numpy.zeros_like(THICKNESS_MM)
VOLUMETRIC_ABSORBED_POWER_NIST = numpy.zeros_like(THICKNESS_MM)
for i, thickness_mm in enumerate(THICKNESS_MM):
thickness_mm = THICKNESS_MM[i]
absorbed_fraction = 1.0 - numpy.exp(-XRL_MU * thickness_mm * 1e-1)
absorbed_fraction_e = 1.0 - numpy.exp(-XRL_MU_E * thickness_mm * 1e-1)
absorbed_fraction_nist = 1.0 - numpy.exp(-nist_interpolated * thickness_mm * 1e-1)
# plot(energy, absorbed_fraction, energy, absorbed_fraction_e)
absorbed_power = (flux * absorbed_fraction * codata.e * 1e3).sum() * estep
volumetric_absorbed_power = absorbed_power / (0.8 * 0.8 * thickness_mm)
absorbed_power_e = (flux * absorbed_fraction_e * codata.e * 1e3).sum() * estep
volumetric_absorbed_power_e = absorbed_power_e / (0.8 * 0.8 * thickness_mm)
absorbed_power_nist = (flux * absorbed_fraction_nist * codata.e * 1e3).sum() * estep
volumetric_absorbed_power_nist = absorbed_power_nist / (0.8 * 0.8 * thickness_mm)
VOLUMETRIC_ABSORBED_POWER[i] = volumetric_absorbed_power
VOLUMETRIC_ABSORBED_POWER_E[i] = volumetric_absorbed_power_e
VOLUMETRIC_ABSORBED_POWER_NIST[i] = volumetric_absorbed_power_nist
print(integrated_power, absorbed_power, volumetric_absorbed_power)
print(integrated_power, absorbed_power_e, volumetric_absorbed_power_e)
#
# load pescao results and make final plot
#
pescao = numpy.loadtxt("pescao_0p8.dat", skiprows=2)
plot(THICKNESS_MM, VOLUMETRIC_ABSORBED_POWER,
THICKNESS_MM, VOLUMETRIC_ABSORBED_POWER_E,
THICKNESS_MM, VOLUMETRIC_ABSORBED_POWER_NIST,
pescao[:,0], pescao[:,1]/(pescao[:,0] * 0.8 * 0.8),
xtitle="Depth [mm]", ytitle="Volumetric absorption [W/mm3]",
title="diamond window thickness = %g mm" % diamond_thickness_in_mm,
legend=["mu","mu_e","nist_e","Monte Carlo"])
| 3,519 | 0 | 92 |
7fbd230fad03dd0a383182ed4e5574ce0bb47687 | 833 | py | Python | wyr/generators/twitter.py | kcsaff/wyr | 7f7a924f38dc627b2a1c1fc014c0324a75696d06 | [
"MIT"
] | null | null | null | wyr/generators/twitter.py | kcsaff/wyr | 7f7a924f38dc627b2a1c1fc014c0324a75696d06 | [
"MIT"
] | null | null | null | wyr/generators/twitter.py | kcsaff/wyr | 7f7a924f38dc627b2a1c1fc014c0324a75696d06 | [
"MIT"
] | null | null | null | import random
import html
from functools import cached_property
from wyr.console import Console
| 27.766667 | 80 | 0.655462 | import random
import html
from functools import cached_property
from wyr.console import Console
class TweetGrabber(object):
def __init__(self, keys, console=None):
self.__keys = keys
if console is None:
console = Console()
self.__console = console
def random_tweet(self, query):
results = self.client.api.search(query, tweet_mode='extended')
if results:
self.__console.okay(f'Found {len(results)} tweets matching {query}')
rc = random.choice(results)
if hasattr(rc, 'retweeted_status'):
return html.unescape(rc.retweeted_status.full_text)
else:
return html.unescape(rc.full_text)
@cached_property
def client(self):
from tweebot import TwitterClient
return TwitterClient(self.__keys)
| 604 | 107 | 23 |
39bdcd851d1e101d6ce64bed3b83cb9b64c547f3 | 1,955 | py | Python | examples/get_bitmex_data.py | mstumberger/Quantdom | 2649aba90c741618a75900691480ddb720c461f4 | [
"Apache-2.0"
] | 1 | 2018-10-04T17:10:40.000Z | 2018-10-04T17:10:40.000Z | examples/get_bitmex_data.py | mstumberger/Quantdom | 2649aba90c741618a75900691480ddb720c461f4 | [
"Apache-2.0"
] | null | null | null | examples/get_bitmex_data.py | mstumberger/Quantdom | 2649aba90c741618a75900691480ddb720c461f4 | [
"Apache-2.0"
] | null | null | null | import requests
import math
import pandas as pd
from datetime import datetime
from datetime import timedelta
import requests
interval = 1
symbol = 'XBTUSD'
# get data from
timestamp_from = 1514761200
# till
timestamp_now = 1536530400
max_back_time = 0
max_bars = 10080
max_bars_time = ((interval * 60) * max_bars)
time_to_iterate = timestamp_now - timestamp_from
baseURI = "https://www.bitmex.com/api/v1"
endpoint = "/trade/bucketed"
time_ago = datetime.now() - timedelta(minutes=150)
request = requests.get(baseURI + endpoint, params={'binSize': '1m', 'symbol': 'XBTUSD', 'count': 750, 'startTime': time_ago})
print("data: start:", datetime.fromtimestamp(timestamp_from), "end:", datetime.fromtimestamp(timestamp_now))
data_frames = []
for x in range(int(math.ceil(time_to_iterate / max_bars_time))):
if x > 0:
if (max_back_time - max_bars_time) > timestamp_from:
max_back_time, timestamp_now = (max_back_time - max_bars_time), max_back_time
else:
max_back_time, timestamp_now = timestamp_from, max_back_time
elif x == 0:
if time_to_iterate < max_bars_time:
max_back_time = timestamp_from
else:
max_back_time = timestamp_now - max_bars_time
print("SPLIT TIMING", "start:", datetime.fromtimestamp(max_back_time), "end:", datetime.fromtimestamp(timestamp_now))
r = requests.get('https://www.bitmex.com/api/udf/history?symbol={}&resolution={}&from={}&to={}'.format(symbol, interval, max_back_time, timestamp_now)).json()
data = {
'Date': r['t'],
'Open': r['o'],
'High': r['o'],
'Low': r['o'],
'Close': r['c'],
'Adj Close': r['o'],
'Volume': r['v']
}
columns = ['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']
df = pd.DataFrame(data, columns=columns)
df['Date'] = pd.to_datetime(df['Date'], unit='s')
data_frames.append(df)
print(pd.concat(data_frames))
| 31.031746 | 162 | 0.658312 | import requests
import math
import pandas as pd
from datetime import datetime
from datetime import timedelta
import requests
interval = 1
symbol = 'XBTUSD'
# get data from
timestamp_from = 1514761200
# till
timestamp_now = 1536530400
max_back_time = 0
max_bars = 10080
max_bars_time = ((interval * 60) * max_bars)
time_to_iterate = timestamp_now - timestamp_from
baseURI = "https://www.bitmex.com/api/v1"
endpoint = "/trade/bucketed"
time_ago = datetime.now() - timedelta(minutes=150)
request = requests.get(baseURI + endpoint, params={'binSize': '1m', 'symbol': 'XBTUSD', 'count': 750, 'startTime': time_ago})
print("data: start:", datetime.fromtimestamp(timestamp_from), "end:", datetime.fromtimestamp(timestamp_now))
data_frames = []
for x in range(int(math.ceil(time_to_iterate / max_bars_time))):
if x > 0:
if (max_back_time - max_bars_time) > timestamp_from:
max_back_time, timestamp_now = (max_back_time - max_bars_time), max_back_time
else:
max_back_time, timestamp_now = timestamp_from, max_back_time
elif x == 0:
if time_to_iterate < max_bars_time:
max_back_time = timestamp_from
else:
max_back_time = timestamp_now - max_bars_time
print("SPLIT TIMING", "start:", datetime.fromtimestamp(max_back_time), "end:", datetime.fromtimestamp(timestamp_now))
r = requests.get('https://www.bitmex.com/api/udf/history?symbol={}&resolution={}&from={}&to={}'.format(symbol, interval, max_back_time, timestamp_now)).json()
data = {
'Date': r['t'],
'Open': r['o'],
'High': r['o'],
'Low': r['o'],
'Close': r['c'],
'Adj Close': r['o'],
'Volume': r['v']
}
columns = ['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']
df = pd.DataFrame(data, columns=columns)
df['Date'] = pd.to_datetime(df['Date'], unit='s')
data_frames.append(df)
print(pd.concat(data_frames))
| 0 | 0 | 0 |
3ff79e66feee5dba038657ba5493972b25ff3838 | 934 | py | Python | funcionalidade/migrations/0001_initial.py | LeandroMelloo/curso_completo_django_rest_framework_alura | 3d319db12e955049361dd0d3673958a277778a84 | [
"Apache-2.0"
] | null | null | null | funcionalidade/migrations/0001_initial.py | LeandroMelloo/curso_completo_django_rest_framework_alura | 3d319db12e955049361dd0d3673958a277778a84 | [
"Apache-2.0"
] | null | null | null | funcionalidade/migrations/0001_initial.py | LeandroMelloo/curso_completo_django_rest_framework_alura | 3d319db12e955049361dd0d3673958a277778a84 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 3.2.6 on 2021-08-05 22:52
from django.db import migrations, models
| 29.1875 | 95 | 0.524625 | # Generated by Django 3.2.6 on 2021-08-05 22:52
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Funcionalidade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('nome', models.CharField(max_length=100, unique=True)),
('visualizar', models.BooleanField()),
('incluir', models.BooleanField()),
('excluir', models.BooleanField()),
('alterar', models.BooleanField()),
('inativar', models.BooleanField()),
('ativo', models.BooleanField(default=True)),
],
options={
'db_table': 'adm_funcionalidade',
'managed': True,
},
),
]
| 0 | 820 | 23 |
18b75a173943329d828fd3d13ff3565c010de306 | 9,545 | py | Python | Bayesian methods for Machine Learning/VAE.py | gesuwen/Machine-Learning | 02a93e4cc32a6707c018386f2f745f9937f94adc | [
"MIT"
] | null | null | null | Bayesian methods for Machine Learning/VAE.py | gesuwen/Machine-Learning | 02a93e4cc32a6707c018386f2f745f9937f94adc | [
"MIT"
] | null | null | null | Bayesian methods for Machine Learning/VAE.py | gesuwen/Machine-Learning | 02a93e4cc32a6707c018386f2f745f9937f94adc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# A Variational Autoencoder trained on the MNIST dataset.
import tensorflow as tf
import keras
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, Lambda, InputLayer, concatenate
from keras.models import Model, Sequential
from keras import backend as K
from keras.datasets import mnist
from keras.utils import np_utils
# Variational Lower Bound
def vlb_binomial(x, x_decoded_mean, t_mean, t_log_var):
"""Returns the value of Variational Lower Bound
The inputs are tf.Tensor
x: (batch_size x number_of_pixels) matrix with one image per row with zeros and ones
x_decoded_mean: (batch_size x number_of_pixels) mean of the distribution p(x | t), real numbers from 0 to 1
t_mean: (batch_size x latent_dim) mean vector of the (normal) distribution q(t | x)
t_log_var: (batch_size x latent_dim) logarithm of the variance vector of the (normal) distribution q(t | x)
Returns:
A tf.Tensor with one element (averaged across the batch), VLB
"""
klterm=0.5*K.sum(-1-t_log_var+K.square(t_mean)+K.exp(t_log_var),axis=1)#batch_size
reconst=K.sum(K.binary_crossentropy(x,x_decoded_mean),axis=1)
return K.mean(klterm+reconst)
# Sampling from the distribution
# q(t | x) = N(t_mean, exp(t_log_var))
# with reparametrization trick.
def sampling(args):
"""Returns sample from a distribution N(args[0], diag(args[1]))
The sample should be computed with reparametrization trick.
The inputs are tf.Tensor
args[0]: (batch_size x latent_dim) mean of the desired distribution
args[1]: (batch_size x latent_dim) logarithm of the variance vector of the desired distribution
Returns:
A tf.Tensor of size (batch_size x latent_dim), the samples.
"""
t_mean, t_log_var = args
output = tf.random_normal(t_mean.get_shape())
output = output * tf.exp(0.5 * t_log_var) + t_mean
return output
if __name__ == '__main__':
# Start tf session so we can run code.
sess = tf.InteractiveSession()
# Connect keras to the created session.
K.set_session(sess)
batch_size = 100
original_dim = 784 # Number of pixels in MNIST images.
latent_dim = 100 # d, dimensionality of the latent code t.
intermediate_dim = 256 # Size of the hidden layer.
epochs = 20
x = Input(batch_shape=(batch_size, original_dim))
encoder = create_encoder(original_dim)
get_t_mean = Lambda(lambda h: h[:, :latent_dim])
get_t_log_var = Lambda(lambda h: h[:, latent_dim:])
h = encoder(x)
t_mean = get_t_mean(h)
t_log_var = get_t_log_var(h)
t = Lambda(sampling)([t_mean, t_log_var])
decoder = create_decoder(latent_dim)
x_decoded_mean = decoder(t)
loss = vlb_binomial(x, x_decoded_mean, t_mean, t_log_var)
vae = Model(x, x_decoded_mean)
# Keras will provide input (x) and output (x_decoded_mean) to the function that
# should construct loss, but since our function also depends on other
# things (e.g. t_means), it is easier to build the loss in advance and pass
# a function that always returns it.
vae.compile(optimizer=keras.optimizers.RMSprop(lr=0.001), loss=lambda x, y: loss)
# Load and prepare the data
# train the VAE on MNIST digits
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# One hot encoding.
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
# Training the model
hist = vae.fit(x=x_train, y=x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, x_test),
verbose=2)
# Visualize reconstructions for train and validation data
fig = plt.figure(figsize=(10, 10))
for fid_idx, (data, title) in enumerate(
zip([x_train, x_test], ['Train', 'Validation'])):
n = 10 # figure with 10 x 2 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * 2))
decoded = sess.run(x_decoded_mean, feed_dict={x: data[:batch_size, :]})
for i in range(10):
figure[i * digit_size: (i + 1) * digit_size,
:digit_size] = data[i, :].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
digit_size:] = decoded[i, :].reshape(digit_size, digit_size)
ax = fig.add_subplot(1, 2, fid_idx + 1)
ax.imshow(figure, cmap='Greys_r')
ax.set_title(title)
ax.axis('off')
plt.show()
# Hallucinating new data
# generate new samples of images from your trained VAE
n_samples = 10 # To pass automatic grading please use at least 2 samples here.
# sampled_im_mean is a tf.Tensor of size 10 x 784 with 10 random
# images sampled from the vae model.
sampled_im_mean = decoder(tf.random_normal((n_samples,latent_dim)))
sampled_im_mean_np = sess.run(sampled_im_mean)
# Show the sampled images.
plt.figure()
for i in range(n_samples):
ax = plt.subplot(n_samples // 5 + 1, 5, i + 1)
plt.imshow(sampled_im_mean_np[i, :].reshape(28, 28), cmap='gray')
ax.axis('off')
plt.show()
# Conditional VAE
# Implement CVAE model
# One-hot labels placeholder.
x = Input(batch_shape=(batch_size, original_dim))
label = Input(batch_shape=(batch_size, 10))
cond_encoder = create_encoder(original_dim+10)
cond_h = cond_encoder(concatenate([x, label]))
cond_t_mean = get_t_mean(cond_h) # Mean of the latent code (without label) for cvae model.
cond_t_log_var = get_t_log_var(cond_h) # Logarithm of the variance of the latent code (without label) for cvae model.
cond_t = Lambda(sampling)([cond_t_mean, cond_t_log_var])
cond_decoder = create_decoder(latent_dim+10)
cond_x_decoded_mean = cond_decoder(concatenate([cond_t, label])) # Final output of the cvae model.
# Define the loss and the model
conditional_loss = vlb_binomial(x, cond_x_decoded_mean, cond_t_mean, cond_t_log_var)
cvae = Model([x, label], cond_x_decoded_mean)
cvae.compile(optimizer=keras.optimizers.RMSprop(lr=0.001), loss=lambda x, y: conditional_loss)
# Train the model
hist = cvae.fit(x=[x_train, y_train],
y=x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=([x_test, y_test], x_test),
verbose=2)
# Visualize reconstructions for train and validation data
fig = plt.figure(figsize=(10, 10))
for fid_idx, (x_data, y_data, title) in enumerate(
zip([x_train, x_test], [y_train, y_test], ['Train', 'Validation'])):
n = 10 # figure with 10 x 2 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * 2))
decoded = sess.run(cond_x_decoded_mean,
feed_dict={x: x_data[:batch_size, :],
label: y_data[:batch_size, :]})
for i in range(10):
figure[i * digit_size: (i + 1) * digit_size,
:digit_size] = x_data[i, :].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
digit_size:] = decoded[i, :].reshape(digit_size, digit_size)
ax = fig.add_subplot(1, 2, fid_idx + 1)
ax.imshow(figure, cmap='Greys_r')
ax.set_title(title)
ax.axis('off')
plt.show()
# Conditionally hallucinate data
# Prepare one hot labels of form
# 0 0 0 0 0 1 1 1 1 1 2 2 2 2 2 ...
# to sample five zeros, five ones, etc
curr_labels = np.eye(10)
curr_labels = np.repeat(curr_labels, 5, axis=0) # Its shape is 50 x 10.
# cond_sampled_im_mean is a tf.Tensor of size 50 x 784 with 5 random zeros,
# then 5 random ones, etc sampled from the cvae model.
cond_sampled_im_mean = cond_decoder(concatenate([tf.random_normal((50,latent_dim)), tf.convert_to_tensor(curr_labels, dtype=tf.float32)]))
cond_sampled_im_mean_np = sess.run(cond_sampled_im_mean)
# Show the sampled images.
plt.figure(figsize=(10, 10))
global_idx = 0
for digit in range(10):
for _ in range(5):
ax = plt.subplot(10, 5, global_idx + 1)
plt.imshow(cond_sampled_im_mean_np[global_idx, :].reshape(28, 28), cmap='gray')
ax.axis('off')
global_idx += 1
plt.show() | 40.617021 | 142 | 0.647774 | # -*- coding: utf-8 -*-
# A Variational Autoencoder trained on the MNIST dataset.
import tensorflow as tf
import keras
import numpy as np
import matplotlib.pyplot as plt
from keras.layers import Input, Dense, Lambda, InputLayer, concatenate
from keras.models import Model, Sequential
from keras import backend as K
from keras.datasets import mnist
from keras.utils import np_utils
# Variational Lower Bound
def vlb_binomial(x, x_decoded_mean, t_mean, t_log_var):
"""Returns the value of Variational Lower Bound
The inputs are tf.Tensor
x: (batch_size x number_of_pixels) matrix with one image per row with zeros and ones
x_decoded_mean: (batch_size x number_of_pixels) mean of the distribution p(x | t), real numbers from 0 to 1
t_mean: (batch_size x latent_dim) mean vector of the (normal) distribution q(t | x)
t_log_var: (batch_size x latent_dim) logarithm of the variance vector of the (normal) distribution q(t | x)
Returns:
A tf.Tensor with one element (averaged across the batch), VLB
"""
klterm=0.5*K.sum(-1-t_log_var+K.square(t_mean)+K.exp(t_log_var),axis=1)#batch_size
reconst=K.sum(K.binary_crossentropy(x,x_decoded_mean),axis=1)
return K.mean(klterm+reconst)
def create_encoder(input_dim):
# Encoder network.
# We instantiate these layers separately so as to reuse them later
encoder = Sequential(name='encoder')
encoder.add(InputLayer([input_dim]))
encoder.add(Dense(intermediate_dim, activation='relu'))
encoder.add(Dense(2 * latent_dim))
return encoder
# Sampling from the distribution
# q(t | x) = N(t_mean, exp(t_log_var))
# with reparametrization trick.
def sampling(args):
"""Returns sample from a distribution N(args[0], diag(args[1]))
The sample should be computed with reparametrization trick.
The inputs are tf.Tensor
args[0]: (batch_size x latent_dim) mean of the desired distribution
args[1]: (batch_size x latent_dim) logarithm of the variance vector of the desired distribution
Returns:
A tf.Tensor of size (batch_size x latent_dim), the samples.
"""
t_mean, t_log_var = args
output = tf.random_normal(t_mean.get_shape())
output = output * tf.exp(0.5 * t_log_var) + t_mean
return output
def create_decoder(input_dim):
# Decoder network
# We instantiate these layers separately so as to reuse them later
decoder = Sequential(name='decoder')
decoder.add(InputLayer([input_dim]))
decoder.add(Dense(intermediate_dim, activation='relu'))
decoder.add(Dense(original_dim, activation='sigmoid'))
return decoder
if __name__ == '__main__':
# Start tf session so we can run code.
sess = tf.InteractiveSession()
# Connect keras to the created session.
K.set_session(sess)
batch_size = 100
original_dim = 784 # Number of pixels in MNIST images.
latent_dim = 100 # d, dimensionality of the latent code t.
intermediate_dim = 256 # Size of the hidden layer.
epochs = 20
x = Input(batch_shape=(batch_size, original_dim))
encoder = create_encoder(original_dim)
get_t_mean = Lambda(lambda h: h[:, :latent_dim])
get_t_log_var = Lambda(lambda h: h[:, latent_dim:])
h = encoder(x)
t_mean = get_t_mean(h)
t_log_var = get_t_log_var(h)
t = Lambda(sampling)([t_mean, t_log_var])
decoder = create_decoder(latent_dim)
x_decoded_mean = decoder(t)
loss = vlb_binomial(x, x_decoded_mean, t_mean, t_log_var)
vae = Model(x, x_decoded_mean)
# Keras will provide input (x) and output (x_decoded_mean) to the function that
# should construct loss, but since our function also depends on other
# things (e.g. t_means), it is easier to build the loss in advance and pass
# a function that always returns it.
vae.compile(optimizer=keras.optimizers.RMSprop(lr=0.001), loss=lambda x, y: loss)
# Load and prepare the data
# train the VAE on MNIST digits
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# One hot encoding.
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
# Training the model
hist = vae.fit(x=x_train, y=x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, x_test),
verbose=2)
# Visualize reconstructions for train and validation data
fig = plt.figure(figsize=(10, 10))
for fid_idx, (data, title) in enumerate(
zip([x_train, x_test], ['Train', 'Validation'])):
n = 10 # figure with 10 x 2 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * 2))
decoded = sess.run(x_decoded_mean, feed_dict={x: data[:batch_size, :]})
for i in range(10):
figure[i * digit_size: (i + 1) * digit_size,
:digit_size] = data[i, :].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
digit_size:] = decoded[i, :].reshape(digit_size, digit_size)
ax = fig.add_subplot(1, 2, fid_idx + 1)
ax.imshow(figure, cmap='Greys_r')
ax.set_title(title)
ax.axis('off')
plt.show()
# Hallucinating new data
# generate new samples of images from your trained VAE
n_samples = 10 # To pass automatic grading please use at least 2 samples here.
# sampled_im_mean is a tf.Tensor of size 10 x 784 with 10 random
# images sampled from the vae model.
sampled_im_mean = decoder(tf.random_normal((n_samples,latent_dim)))
sampled_im_mean_np = sess.run(sampled_im_mean)
# Show the sampled images.
plt.figure()
for i in range(n_samples):
ax = plt.subplot(n_samples // 5 + 1, 5, i + 1)
plt.imshow(sampled_im_mean_np[i, :].reshape(28, 28), cmap='gray')
ax.axis('off')
plt.show()
# Conditional VAE
# Implement CVAE model
# One-hot labels placeholder.
x = Input(batch_shape=(batch_size, original_dim))
label = Input(batch_shape=(batch_size, 10))
cond_encoder = create_encoder(original_dim+10)
cond_h = cond_encoder(concatenate([x, label]))
cond_t_mean = get_t_mean(cond_h) # Mean of the latent code (without label) for cvae model.
cond_t_log_var = get_t_log_var(cond_h) # Logarithm of the variance of the latent code (without label) for cvae model.
cond_t = Lambda(sampling)([cond_t_mean, cond_t_log_var])
cond_decoder = create_decoder(latent_dim+10)
cond_x_decoded_mean = cond_decoder(concatenate([cond_t, label])) # Final output of the cvae model.
# Define the loss and the model
conditional_loss = vlb_binomial(x, cond_x_decoded_mean, cond_t_mean, cond_t_log_var)
cvae = Model([x, label], cond_x_decoded_mean)
cvae.compile(optimizer=keras.optimizers.RMSprop(lr=0.001), loss=lambda x, y: conditional_loss)
# Train the model
hist = cvae.fit(x=[x_train, y_train],
y=x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=([x_test, y_test], x_test),
verbose=2)
# Visualize reconstructions for train and validation data
fig = plt.figure(figsize=(10, 10))
for fid_idx, (x_data, y_data, title) in enumerate(
zip([x_train, x_test], [y_train, y_test], ['Train', 'Validation'])):
n = 10 # figure with 10 x 2 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * 2))
decoded = sess.run(cond_x_decoded_mean,
feed_dict={x: x_data[:batch_size, :],
label: y_data[:batch_size, :]})
for i in range(10):
figure[i * digit_size: (i + 1) * digit_size,
:digit_size] = x_data[i, :].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
digit_size:] = decoded[i, :].reshape(digit_size, digit_size)
ax = fig.add_subplot(1, 2, fid_idx + 1)
ax.imshow(figure, cmap='Greys_r')
ax.set_title(title)
ax.axis('off')
plt.show()
# Conditionally hallucinate data
# Prepare one hot labels of form
# 0 0 0 0 0 1 1 1 1 1 2 2 2 2 2 ...
# to sample five zeros, five ones, etc
curr_labels = np.eye(10)
curr_labels = np.repeat(curr_labels, 5, axis=0) # Its shape is 50 x 10.
# cond_sampled_im_mean is a tf.Tensor of size 50 x 784 with 5 random zeros,
# then 5 random ones, etc sampled from the cvae model.
cond_sampled_im_mean = cond_decoder(concatenate([tf.random_normal((50,latent_dim)), tf.convert_to_tensor(curr_labels, dtype=tf.float32)]))
cond_sampled_im_mean_np = sess.run(cond_sampled_im_mean)
# Show the sampled images.
plt.figure(figsize=(10, 10))
global_idx = 0
for digit in range(10):
for _ in range(5):
ax = plt.subplot(10, 5, global_idx + 1)
plt.imshow(cond_sampled_im_mean_np[global_idx, :].reshape(28, 28), cmap='gray')
ax.axis('off')
global_idx += 1
plt.show() | 625 | 0 | 46 |
76673c3b226ffe79dd6282ebdcd2d60cbe0a1ca2 | 2,098 | py | Python | python/betacal/__init__.py | REFRAME/betacal | 7c4a733a1f5b52a8a1700a8e793ac75ec16c9177 | [
"MIT"
] | 8 | 2018-07-19T21:15:45.000Z | 2021-07-09T09:44:19.000Z | python/betacal/__init__.py | REFRAME/betacal | 7c4a733a1f5b52a8a1700a8e793ac75ec16c9177 | [
"MIT"
] | 2 | 2017-11-14T12:32:37.000Z | 2021-03-11T20:53:39.000Z | python/betacal/__init__.py | REFRAME/betacal | 7c4a733a1f5b52a8a1700a8e793ac75ec16c9177 | [
"MIT"
] | 3 | 2017-02-09T05:08:12.000Z | 2020-05-27T12:40:25.000Z | from .beta_calibration import _BetaCal, _BetaAMCal, _BetaABCal
from sklearn.base import BaseEstimator, RegressorMixin
class BetaCalibration(BaseEstimator, RegressorMixin):
"""Wrapper class for the three Beta regression models introduced in
Kull, M., Silva Filho, T.M. and Flach, P. Beta calibration: a well-founded
and easily implemented improvement on logistic calibration for binary
classifiers. AISTATS 2017.
Parameters
----------
parameters : string
Determines which parameters will be calculated by the model. Possible
values are: "abm" (default), "am" and "ab"
Attributes
----------
calibrator_ :
Internal calibrator object. The type depends on the value of parameters.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Currently, no sample weighting is done by the models.
Returns
-------
self : object
Returns an instance of self.
"""
self.calibrator_.fit(X, y, sample_weight)
return self
def predict(self, S):
"""Predict new values.
Parameters
----------
S : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
: array, shape (n_samples,)
The predicted values.
"""
return self.calibrator_.predict(S)
| 30.405797 | 80 | 0.591992 | from .beta_calibration import _BetaCal, _BetaAMCal, _BetaABCal
from sklearn.base import BaseEstimator, RegressorMixin
class BetaCalibration(BaseEstimator, RegressorMixin):
"""Wrapper class for the three Beta regression models introduced in
Kull, M., Silva Filho, T.M. and Flach, P. Beta calibration: a well-founded
and easily implemented improvement on logistic calibration for binary
classifiers. AISTATS 2017.
Parameters
----------
parameters : string
Determines which parameters will be calculated by the model. Possible
values are: "abm" (default), "am" and "ab"
Attributes
----------
calibrator_ :
Internal calibrator object. The type depends on the value of parameters.
"""
def __init__(self, parameters="abm"):
if parameters == "abm":
self.calibrator_ = _BetaCal()
elif parameters == "am":
self.calibrator_ = _BetaAMCal()
elif parameters == "ab":
self.calibrator_ = _BetaABCal()
else:
raise ValueError('Unknown parameters', parameters)
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Currently, no sample weighting is done by the models.
Returns
-------
self : object
Returns an instance of self.
"""
self.calibrator_.fit(X, y, sample_weight)
return self
def predict(self, S):
"""Predict new values.
Parameters
----------
S : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
: array, shape (n_samples,)
The predicted values.
"""
return self.calibrator_.predict(S)
| 321 | 0 | 26 |
c77ab43acfa82cf80be4632e5a48c50eb706dbb7 | 3,304 | py | Python | src/sequential/adult/adult_costs.py | ppnaumann/CSCF | ea8af1f2fdec3a90a041324a32893d5dadc7e14b | [
"MIT"
] | null | null | null | src/sequential/adult/adult_costs.py | ppnaumann/CSCF | ea8af1f2fdec3a90a041324a32893d5dadc7e14b | [
"MIT"
] | null | null | null | src/sequential/adult/adult_costs.py | ppnaumann/CSCF | ea8af1f2fdec3a90a041324a32893d5dadc7e14b | [
"MIT"
] | null | null | null | import numpy as np
from feature_cost_model.action_cost import ActionCost
| 35.913043 | 86 | 0.65224 | import numpy as np
from feature_cost_model.action_cost import ActionCost
class IncreaseAgeCosts(ActionCost):
def __init__(self, features, dependency_graph=None):
self.features = features
feature_idx = self.features["age"]
super().__init__(feature_idx, dependency_graph)
def _get_costs(self, old_state, current_state):
change_value = current_state[self.feature_idx] - old_state[self.feature_idx]
assert (
type(change_value) == float or type(change_value) == np.float64
), type(change_value)
return abs(change_value)
class IncreaseCapitalGainCosts(ActionCost):
def __init__(self, features, dependency_graph=None):
self.features = features
feature_idx = self.features["capital_gain"]
super().__init__(feature_idx, dependency_graph)
def _get_costs(self, old_state, current_state):
change_value = abs(
current_state[self.feature_idx] - old_state[self.feature_idx]
)
return change_value / 500
class IncreaseEducationCosts(ActionCost):
def __init__(self, features, dependency_graph=None):
self.features = features
feature_idx = self.features["education"]
self.general_costs = [
0.0, # nothing to School
3.0, # School to HS
3.0, # HS to college
1.0, # college to prof-school
2.0, # prof-school to assoc
3.5, # assoc to bachelors
2.5, # bachelors to masters
5.0, # masters to doctorate
]
self.education_level_order = [0, 1, 2, 3, 4, 5, 6, 7]
super().__init__(feature_idx, dependency_graph)
def _get_costs(self, old_state, current_state):
new_level = self.education_level_order.index(current_state[self.feature_idx])
previous_level = self.education_level_order.index(old_state[self.feature_idx])
if new_level == previous_level:
return 0.0
else:
# return the cumulative costs to get that degree from the current one
return float(sum(self.general_costs[previous_level + 1 : new_level + 1]))
class ChangeWorkHrsCosts(ActionCost):
def __init__(self, features, dependency_graph=None):
self.features = features
feature_idx = self.features["hours_per_week"]
super().__init__(feature_idx, dependency_graph)
def _get_costs(self, old_state, current_state):
change_value = current_state[self.feature_idx] - old_state[self.feature_idx]
# discount based on direction, reducing is easier than increasing
# TODO think about this
discount = 1.0
if change_value < 0.0:
# reducing is free of costs
discount = 0.0
# # TODO 0.0
# return 0.0
return abs(change_value) * discount
class ChangeCategoricalCosts(ActionCost):
def __init__(self, feature_idx, features, dependency_graph=None):
self.features = features
super().__init__(feature_idx, dependency_graph)
def _get_costs(self, old_state, current_state):
old_value = old_state[self.feature_idx]
new_value = current_state[self.feature_idx]
if old_value == new_value:
return 0.0
else:
return 5.0 | 2,754 | 92 | 380 |