hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d37c1bade401f4ac9ccafb5794c0f6500ef2d571 | 9,415 | py | Python | VHDLTest/VHDLTest.py | Malcolmnixon/VhdlTest | f17b981e21345444571418d067a61d23325162d3 | [
"MIT"
] | null | null | null | VHDLTest/VHDLTest.py | Malcolmnixon/VhdlTest | f17b981e21345444571418d067a61d23325162d3 | [
"MIT"
] | 15 | 2020-08-03T15:15:11.000Z | 2020-08-27T02:41:17.000Z | VHDLTest/VHDLTest.py | Malcolmnixon/VhdlTest | f17b981e21345444571418d067a61d23325162d3 | [
"MIT"
] | null | null | null | """Module for VHDLTest application class."""
import argparse
import sys
from typing import Optional, Dict
from junit_xml import TestSuite, TestCase
from datetime import datetime
from .simulator.SimulatorBase import SimulatorBase
from .simulator.SimulatorFactory import SimulatorFactory
from .Configuration import Configuration
from .logger.Log import Log
from .runner.RunResults import RunCategory
from .runner.RunResults import RunResults
class VHDLTest(object):
"""VHDLTest application class."""
_log: Optional[Log]
_config: Optional[Configuration]
_simulator: Optional[SimulatorBase]
_compile_result: Optional[RunResults]
_test_result: Dict[str, RunResults]
# VHDLTest version
version = "0.2.0"
def __init__(self) -> None:
"""Initialize a new VHDLTest instance."""
self._args = None
self._log = None
self._config = None
self._simulator = None
self._compile_result = None
self._test_results = {}
self._test_count = 0
self._test_passed = 0
self._test_failed = 0
self._total_duration = 0.0
self._elapsed_duration = 0.0
def parse_arguments(self) -> None:
"""Parse command-line arguments into _args."""
# Construct the argument parser
parser = argparse.ArgumentParser(
prog='VHDL Test-bench Runner (VHDLTest)',
description='''Runs VHDL Test-benches and generates a report of the
passes and failures. Reference documentation is located
at https://github.com/Malcolmnixon/VhdlTest''')
parser.add_argument('-c', '--config', help='Configuration file')
parser.add_argument('-l', '--log', help='Write to log file')
parser.add_argument('-j', '--junit', help='Generate JUnit xml file')
parser.add_argument('-t', '--tests', nargs='+', help='List of test-benches to run')
parser.add_argument('-s', '--simulator', default='', help='Specify simulator (E.G. GHDL)')
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Verbose logging of output')
parser.add_argument('--exit-0', default=False, action='store_true', help='Exit with code 0 even if tests fail')
parser.add_argument('--version', default=False, action='store_true', help='Display version information')
# If no arguments are provided then print the help information
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
# Parse the arguments
self._args = parser.parse_args()
# Check for version
if self._args.version:
print(f'VHDL Test-bench Runner (VHDLTest) version {VHDLTest.version}')
sys.exit(0)
# Ensure we have a configuration
if self._args.config is None:
parser.print_help()
sys.exit(1)
def compile_source(self) -> None:
"""Compile VHDL source files into library."""
# Compile the code
self._log.write(f'Compiling files using {self._simulator.name}...\n')
self._compile_result = self._simulator.compile(self._config)
# Print compile log on verbose or compile warning/error
level = RunCategory.TEXT if self._args.verbose or self._compile_result.warning else RunCategory.INFO
self._compile_result.print(self._log, level)
# On compile error write error message
if self._compile_result.error:
self._log.write(Log.error,
'Error: Compile of source files failed',
Log.end,
'\n\n')
sys.exit(1)
# Report compile success
self._log.write(Log.success, 'done', Log.end, '\n\n')
def run_tests(self) -> None:
"""Run VHDL test benches and gather results."""
# Run the tests
self._test_results = {}
self._test_passed = 0
self._test_failed = 0
self._total_duration = 0.0
for test in self._config.tests:
# Log starting the test
self._log.write(f'Starting {test}\n')
# Run the test and save the result
result = self._simulator.test(self._config, test)
self._test_results[test] = result
self._total_duration += result.duration
# Print test log on verbose or test warning/error
level = RunCategory.TEXT if self._args.verbose or result.warning else RunCategory.INFO
result.print(self._log, level)
# Log the result
if result.error:
self._log.write(Log.error, 'fail ', Log.end, f'{test} ({result.duration:.1f} seconds)\n')
self._test_failed += 1
else:
self._log.write(Log.success, 'pass ', Log.end, f'{test} ({result.duration:.1f} seconds)\n')
self._test_passed += 1
# Add separator after test
self._log.write('\n')
def emit_junit(self) -> None:
"""Emit JUnit report file containing test results."""
# Print generating message
self._log.write(f'Generating JUnit output {self._args.junit}\n')
# Create the test cases
test_cases = []
for test in self._config.tests:
result = self._test_results[test]
# Create the test case
test_case = TestCase(test, classname=test, elapsed_sec=result.duration, stdout=result.output)
# Detect failures or errors
if result.failure:
# Test failed, could not get results
test_case.add_failure_info(output=result.error_info)
elif result.error:
# Test detected error
test_case.add_error_info(message=result.error_info)
test_cases.append(test_case)
# Create the test suite
test_suite = TestSuite('testsuite', test_cases)
# Write test suite to file
with open(self._args.junit, 'w') as f:
TestSuite.to_file(f, [test_suite])
# Report compile success
self._log.write(Log.success, 'done', Log.end, '\n\n')
def print_summary(self) -> None:
"""Print test summary information to log."""
# Print summary list
self._log.write('==== Summary ========================================\n')
for test in self._config.tests:
result = self._test_results[test]
if result.error:
self._log.write(Log.error, 'fail ', Log.end, f'{test} ({result.duration:.1f} seconds)\n')
else:
self._log.write(Log.success, 'pass ', Log.end, f'{test} ({result.duration:.1f} seconds)\n')
# Print summary statistics
self._log.write('=====================================================\n')
if self._test_count == 0:
self._log.write(Log.warning, 'No tests were run!', Log.end, '\n')
if self._test_passed != 0:
self._log.write(Log.success, 'pass ', Log.end, f'{self._test_passed} of {self._test_count}\n')
if self._test_failed != 0:
self._log.write(Log.error, 'fail ', Log.end, f'{self._test_failed} of {self._test_count}\n')
# Print time information
self._log.write('=====================================================\n')
self._log.write(f'Total time was {self._total_duration:.1f} seconds\n')
self._log.write(f'Elapsed time was {self._elapsed_duration:.1f} seconds\n')
self._log.write('=====================================================\n')
# Print final warning if any failed
if self._test_failed != 0:
self._log.write(Log.error, 'Some failed!', Log.end, '\n')
def run(self) -> None:
"""Run all VHDLTest steps."""
# Parse arguments
self.parse_arguments()
# Construct the logger
self._log = Log()
if self._args.log is not None:
self._log.add_log_file(self._args.log)
# Print the banner and capture the start time
self._log.write('VHDL Test-bench Runner (VHDLTest)\n\n')
elapsed_start = datetime.now()
# Read the configuration
self._config = Configuration(self._args.config)
# Override configuration with command line arguments
if self._args.tests:
self._config.tests = self._args.tests
# Count the number of tests
self._test_count = len(self._config.tests)
# Create a simulator
self._simulator = SimulatorFactory.create_simulator(self._args.simulator)
if self._simulator is None:
self._log.write(Log.error,
'Error: Simulator not found. Please add simulator to the path',
Log.end,
'\n')
sys.exit(1)
# Compile the code
self.compile_source()
# Run the tests
self.run_tests()
elapsed_end = datetime.now()
self._elapsed_duration = (elapsed_end - elapsed_start).total_seconds()
# Generate JUnit output
if self._args.junit is not None:
self.emit_junit()
# Print summary list
self.print_summary()
# Generate error code if necessary
if self._test_failed != 0 and not self._args.exit_0:
sys.exit(1)
| 38.744856 | 119 | 0.590335 | """Module for VHDLTest application class."""
import argparse
import sys
from typing import Optional, Dict
from junit_xml import TestSuite, TestCase
from datetime import datetime
from .simulator.SimulatorBase import SimulatorBase
from .simulator.SimulatorFactory import SimulatorFactory
from .Configuration import Configuration
from .logger.Log import Log
from .runner.RunResults import RunCategory
from .runner.RunResults import RunResults
class VHDLTest(object):
"""VHDLTest application class."""
_log: Optional[Log]
_config: Optional[Configuration]
_simulator: Optional[SimulatorBase]
_compile_result: Optional[RunResults]
_test_result: Dict[str, RunResults]
# VHDLTest version
version = "0.2.0"
def __init__(self) -> None:
"""Initialize a new VHDLTest instance."""
self._args = None
self._log = None
self._config = None
self._simulator = None
self._compile_result = None
self._test_results = {}
self._test_count = 0
self._test_passed = 0
self._test_failed = 0
self._total_duration = 0.0
self._elapsed_duration = 0.0
def parse_arguments(self) -> None:
"""Parse command-line arguments into _args."""
# Construct the argument parser
parser = argparse.ArgumentParser(
prog='VHDL Test-bench Runner (VHDLTest)',
description='''Runs VHDL Test-benches and generates a report of the
passes and failures. Reference documentation is located
at https://github.com/Malcolmnixon/VhdlTest''')
parser.add_argument('-c', '--config', help='Configuration file')
parser.add_argument('-l', '--log', help='Write to log file')
parser.add_argument('-j', '--junit', help='Generate JUnit xml file')
parser.add_argument('-t', '--tests', nargs='+', help='List of test-benches to run')
parser.add_argument('-s', '--simulator', default='', help='Specify simulator (E.G. GHDL)')
parser.add_argument('-v', '--verbose', default=False, action='store_true', help='Verbose logging of output')
parser.add_argument('--exit-0', default=False, action='store_true', help='Exit with code 0 even if tests fail')
parser.add_argument('--version', default=False, action='store_true', help='Display version information')
# If no arguments are provided then print the help information
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
# Parse the arguments
self._args = parser.parse_args()
# Check for version
if self._args.version:
print(f'VHDL Test-bench Runner (VHDLTest) version {VHDLTest.version}')
sys.exit(0)
# Ensure we have a configuration
if self._args.config is None:
parser.print_help()
sys.exit(1)
def compile_source(self) -> None:
"""Compile VHDL source files into library."""
# Compile the code
self._log.write(f'Compiling files using {self._simulator.name}...\n')
self._compile_result = self._simulator.compile(self._config)
# Print compile log on verbose or compile warning/error
level = RunCategory.TEXT if self._args.verbose or self._compile_result.warning else RunCategory.INFO
self._compile_result.print(self._log, level)
# On compile error write error message
if self._compile_result.error:
self._log.write(Log.error,
'Error: Compile of source files failed',
Log.end,
'\n\n')
sys.exit(1)
# Report compile success
self._log.write(Log.success, 'done', Log.end, '\n\n')
def run_tests(self) -> None:
"""Run VHDL test benches and gather results."""
# Run the tests
self._test_results = {}
self._test_passed = 0
self._test_failed = 0
self._total_duration = 0.0
for test in self._config.tests:
# Log starting the test
self._log.write(f'Starting {test}\n')
# Run the test and save the result
result = self._simulator.test(self._config, test)
self._test_results[test] = result
self._total_duration += result.duration
# Print test log on verbose or test warning/error
level = RunCategory.TEXT if self._args.verbose or result.warning else RunCategory.INFO
result.print(self._log, level)
# Log the result
if result.error:
self._log.write(Log.error, 'fail ', Log.end, f'{test} ({result.duration:.1f} seconds)\n')
self._test_failed += 1
else:
self._log.write(Log.success, 'pass ', Log.end, f'{test} ({result.duration:.1f} seconds)\n')
self._test_passed += 1
# Add separator after test
self._log.write('\n')
def emit_junit(self) -> None:
"""Emit JUnit report file containing test results."""
# Print generating message
self._log.write(f'Generating JUnit output {self._args.junit}\n')
# Create the test cases
test_cases = []
for test in self._config.tests:
result = self._test_results[test]
# Create the test case
test_case = TestCase(test, classname=test, elapsed_sec=result.duration, stdout=result.output)
# Detect failures or errors
if result.failure:
# Test failed, could not get results
test_case.add_failure_info(output=result.error_info)
elif result.error:
# Test detected error
test_case.add_error_info(message=result.error_info)
test_cases.append(test_case)
# Create the test suite
test_suite = TestSuite('testsuite', test_cases)
# Write test suite to file
with open(self._args.junit, 'w') as f:
TestSuite.to_file(f, [test_suite])
# Report compile success
self._log.write(Log.success, 'done', Log.end, '\n\n')
def print_summary(self) -> None:
"""Print test summary information to log."""
# Print summary list
self._log.write('==== Summary ========================================\n')
for test in self._config.tests:
result = self._test_results[test]
if result.error:
self._log.write(Log.error, 'fail ', Log.end, f'{test} ({result.duration:.1f} seconds)\n')
else:
self._log.write(Log.success, 'pass ', Log.end, f'{test} ({result.duration:.1f} seconds)\n')
# Print summary statistics
self._log.write('=====================================================\n')
if self._test_count == 0:
self._log.write(Log.warning, 'No tests were run!', Log.end, '\n')
if self._test_passed != 0:
self._log.write(Log.success, 'pass ', Log.end, f'{self._test_passed} of {self._test_count}\n')
if self._test_failed != 0:
self._log.write(Log.error, 'fail ', Log.end, f'{self._test_failed} of {self._test_count}\n')
# Print time information
self._log.write('=====================================================\n')
self._log.write(f'Total time was {self._total_duration:.1f} seconds\n')
self._log.write(f'Elapsed time was {self._elapsed_duration:.1f} seconds\n')
self._log.write('=====================================================\n')
# Print final warning if any failed
if self._test_failed != 0:
self._log.write(Log.error, 'Some failed!', Log.end, '\n')
def run(self) -> None:
"""Run all VHDLTest steps."""
# Parse arguments
self.parse_arguments()
# Construct the logger
self._log = Log()
if self._args.log is not None:
self._log.add_log_file(self._args.log)
# Print the banner and capture the start time
self._log.write('VHDL Test-bench Runner (VHDLTest)\n\n')
elapsed_start = datetime.now()
# Read the configuration
self._config = Configuration(self._args.config)
# Override configuration with command line arguments
if self._args.tests:
self._config.tests = self._args.tests
# Count the number of tests
self._test_count = len(self._config.tests)
# Create a simulator
self._simulator = SimulatorFactory.create_simulator(self._args.simulator)
if self._simulator is None:
self._log.write(Log.error,
'Error: Simulator not found. Please add simulator to the path',
Log.end,
'\n')
sys.exit(1)
# Compile the code
self.compile_source()
# Run the tests
self.run_tests()
elapsed_end = datetime.now()
self._elapsed_duration = (elapsed_end - elapsed_start).total_seconds()
# Generate JUnit output
if self._args.junit is not None:
self.emit_junit()
# Print summary list
self.print_summary()
# Generate error code if necessary
if self._test_failed != 0 and not self._args.exit_0:
sys.exit(1)
| 0 | 0 | 0 |
c37feadf74679190eb890bfecd62db1e0a762240 | 777 | py | Python | tests/test_result/test_result_unwrap.py | ksurta/returns | 9746e569303f214d035462ae3dffe5c49abdcfa7 | [
"BSD-2-Clause"
] | null | null | null | tests/test_result/test_result_unwrap.py | ksurta/returns | 9746e569303f214d035462ae3dffe5c49abdcfa7 | [
"BSD-2-Clause"
] | null | null | null | tests/test_result/test_result_unwrap.py | ksurta/returns | 9746e569303f214d035462ae3dffe5c49abdcfa7 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import pytest
from returns.primitives.exceptions import UnwrapFailedError
from returns.result import Failure, Success
def test_unwrap_success():
"""Ensures that unwrap works for Success container."""
assert Success(5).unwrap() == 5
def test_unwrap_failure():
"""Ensures that unwrap works for Failure container."""
with pytest.raises(UnwrapFailedError):
assert Failure(5).unwrap()
def test_unwrap_failure_with_exception():
"""Ensures that unwrap raises from the original exception."""
expected_exception = ValueError('error')
with pytest.raises(UnwrapFailedError) as excinfo:
Failure(expected_exception).unwrap()
assert 'ValueError: error' in str(
excinfo.getrepr(), # noqa: WPS441
)
| 26.793103 | 65 | 0.711712 | # -*- coding: utf-8 -*-
import pytest
from returns.primitives.exceptions import UnwrapFailedError
from returns.result import Failure, Success
def test_unwrap_success():
"""Ensures that unwrap works for Success container."""
assert Success(5).unwrap() == 5
def test_unwrap_failure():
"""Ensures that unwrap works for Failure container."""
with pytest.raises(UnwrapFailedError):
assert Failure(5).unwrap()
def test_unwrap_failure_with_exception():
"""Ensures that unwrap raises from the original exception."""
expected_exception = ValueError('error')
with pytest.raises(UnwrapFailedError) as excinfo:
Failure(expected_exception).unwrap()
assert 'ValueError: error' in str(
excinfo.getrepr(), # noqa: WPS441
)
| 0 | 0 | 0 |
02d21a38f0036383f6dff42c08bba71fd2a41cbd | 7,056 | py | Python | gridpath/system/reliability/local_capacity/local_capacity_balance.py | blue-marble/gridpath | 66560ab084e1e2f4800e270090d5efc8f6ff01a6 | [
"Apache-2.0"
] | 44 | 2020-10-27T19:05:44.000Z | 2022-03-22T17:17:37.000Z | gridpath/system/reliability/local_capacity/local_capacity_balance.py | blue-marble/gridpath | 66560ab084e1e2f4800e270090d5efc8f6ff01a6 | [
"Apache-2.0"
] | 67 | 2020-10-08T22:36:53.000Z | 2022-03-22T22:58:33.000Z | gridpath/system/reliability/local_capacity/local_capacity_balance.py | blue-marble/gridpath | 66560ab084e1e2f4800e270090d5efc8f6ff01a6 | [
"Apache-2.0"
] | 21 | 2020-10-08T23:23:48.000Z | 2022-03-28T01:21:21.000Z | # Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Constraint total local capacity contribution to be more than or equal to the
requirement.
"""
from __future__ import print_function
from builtins import next
import csv
import os.path
from pyomo.environ import Var, Constraint, Expression, NonNegativeReals, value
from db.common_functions import spin_on_database_lock
from gridpath.auxiliary.dynamic_components import \
local_capacity_balance_provision_components
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
m.Total_Local_Capacity_from_All_Sources_Expression_MW = Expression(
m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT,
rule=lambda mod, z, p:
sum(getattr(mod, component)[z, p] for component
in getattr(d, local_capacity_balance_provision_components)
)
)
m.Local_Capacity_Shortage_MW = Var(
m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT,
within=NonNegativeReals
)
m.Local_Capacity_Shortage_MW_Expression = Expression(
m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT,
rule=violation_expression_rule
)
def local_capacity_requirement_rule(mod, z, p):
"""
Total local capacity provision must be greater than or equal to the
requirement
:param mod:
:param z:
:param p:
:return:
"""
return mod.Total_Local_Capacity_from_All_Sources_Expression_MW[z, p] \
+ mod.Local_Capacity_Shortage_MW_Expression[z, p] \
>= mod.local_capacity_requirement_mw[z, p]
m.Local_Capacity_Constraint = Constraint(
m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT,
rule=local_capacity_requirement_rule
)
def export_results(scenario_directory, subproblem, stage, m, d):
"""
:param scenario_directory:
:param subproblem:
:param stage:
:param m:
:param d:
:return:
"""
with open(os.path.join(scenario_directory, str(subproblem), str(stage), "results",
"local_capacity.csv"), "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["local_capacity_zone", "period",
"discount_factor", "number_years_represented",
"local_capacity_requirement_mw",
"local_capacity_provision_mw",
"local_capacity_shortage_mw"])
for (z, p) in m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT:
writer.writerow([
z,
p,
m.discount_factor[p],
m.number_years_represented[p],
float(m.local_capacity_requirement_mw[z, p]),
value(
m.Total_Local_Capacity_from_All_Sources_Expression_MW[z, p]
),
value(m.Local_Capacity_Shortage_MW_Expression[z, p])
])
def import_results_into_database(
scenario_id, subproblem, stage, c, db, results_directory, quiet
):
"""
:param scenario_id:
:param c:
:param db:
:param results_directory:
:param quiet:
:return:
"""
if not quiet:
print("system local_capacity total")
# Local capacity contribution
nullify_sql = """
UPDATE results_system_local_capacity
SET local_capacity_requirement_mw = NULL,
local_capacity_provision_mw = NULL,
local_capacity_shortage_mw = NULL
WHERE scenario_id = ?
AND subproblem_id = ?
AND stage_id = ?;
""".format(scenario_id, subproblem, stage)
spin_on_database_lock(conn=db, cursor=c, sql=nullify_sql,
data=(scenario_id, subproblem, stage),
many=False)
results = []
with open(os.path.join(results_directory,
"local_capacity.csv"), "r") as \
surface_file:
reader = csv.reader(surface_file)
next(reader) # skip header
for row in reader:
local_capacity_zone = row[0]
period = row[1]
discount_factor = row[2]
number_years = row[3]
local_capacity_req_mw = row[4]
local_capacity_prov_mw = row[5]
shortage_mw = row[6]
results.append(
(local_capacity_req_mw, local_capacity_prov_mw,
shortage_mw,
discount_factor, number_years,
scenario_id, local_capacity_zone, period)
)
update_sql = """
UPDATE results_system_local_capacity
SET local_capacity_requirement_mw = ?,
local_capacity_provision_mw = ?,
local_capacity_shortage_mw = ?,
discount_factor = ?,
number_years_represented = ?
WHERE scenario_id = ?
AND local_capacity_zone = ?
AND period = ?"""
spin_on_database_lock(conn=db, cursor=c, sql=update_sql, data=results)
# Update duals
duals_results = []
with open(os.path.join(results_directory, "Local_Capacity_Constraint.csv"),
"r") as local_capacity_duals_file:
reader = csv.reader(local_capacity_duals_file)
next(reader) # skip header
for row in reader:
duals_results.append(
(row[2], row[0], row[1], scenario_id, subproblem, stage)
)
duals_sql = """
UPDATE results_system_local_capacity
SET dual = ?
WHERE local_capacity_zone = ?
AND period = ?
AND scenario_id = ?
AND subproblem_id = ?
AND stage_id = ?;"""
spin_on_database_lock(conn=db, cursor=c, sql=duals_sql, data=duals_results)
# Calculate marginal carbon cost per MMt
mc_sql = """
UPDATE results_system_local_capacity
SET local_capacity_marginal_cost_per_mw =
dual / (discount_factor * number_years_represented)
WHERE scenario_id = ?
AND subproblem_id = ?
AND stage_id = ?;
"""
spin_on_database_lock(conn=db, cursor=c, sql=mc_sql,
data=(scenario_id, subproblem, stage),
many=False)
| 32.366972 | 86 | 0.628827 | # Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Constraint total local capacity contribution to be more than or equal to the
requirement.
"""
from __future__ import print_function
from builtins import next
import csv
import os.path
from pyomo.environ import Var, Constraint, Expression, NonNegativeReals, value
from db.common_functions import spin_on_database_lock
from gridpath.auxiliary.dynamic_components import \
local_capacity_balance_provision_components
def add_model_components(m, d, scenario_directory, subproblem, stage):
"""
:param m:
:param d:
:return:
"""
m.Total_Local_Capacity_from_All_Sources_Expression_MW = Expression(
m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT,
rule=lambda mod, z, p:
sum(getattr(mod, component)[z, p] for component
in getattr(d, local_capacity_balance_provision_components)
)
)
m.Local_Capacity_Shortage_MW = Var(
m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT,
within=NonNegativeReals
)
def violation_expression_rule(mod, z, p):
return mod.Local_Capacity_Shortage_MW[z, p] * \
mod.local_capacity_allow_violation[z]
m.Local_Capacity_Shortage_MW_Expression = Expression(
m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT,
rule=violation_expression_rule
)
def local_capacity_requirement_rule(mod, z, p):
"""
Total local capacity provision must be greater than or equal to the
requirement
:param mod:
:param z:
:param p:
:return:
"""
return mod.Total_Local_Capacity_from_All_Sources_Expression_MW[z, p] \
+ mod.Local_Capacity_Shortage_MW_Expression[z, p] \
>= mod.local_capacity_requirement_mw[z, p]
m.Local_Capacity_Constraint = Constraint(
m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT,
rule=local_capacity_requirement_rule
)
def export_results(scenario_directory, subproblem, stage, m, d):
"""
:param scenario_directory:
:param subproblem:
:param stage:
:param m:
:param d:
:return:
"""
with open(os.path.join(scenario_directory, str(subproblem), str(stage), "results",
"local_capacity.csv"), "w", newline="") as f:
writer = csv.writer(f)
writer.writerow(["local_capacity_zone", "period",
"discount_factor", "number_years_represented",
"local_capacity_requirement_mw",
"local_capacity_provision_mw",
"local_capacity_shortage_mw"])
for (z, p) in m.LOCAL_CAPACITY_ZONE_PERIODS_WITH_REQUIREMENT:
writer.writerow([
z,
p,
m.discount_factor[p],
m.number_years_represented[p],
float(m.local_capacity_requirement_mw[z, p]),
value(
m.Total_Local_Capacity_from_All_Sources_Expression_MW[z, p]
),
value(m.Local_Capacity_Shortage_MW_Expression[z, p])
])
def save_duals(m):
m.constraint_indices["Local_Capacity_Constraint"] = \
["local_capacity_zone", "period", "dual"]
def import_results_into_database(
scenario_id, subproblem, stage, c, db, results_directory, quiet
):
"""
:param scenario_id:
:param c:
:param db:
:param results_directory:
:param quiet:
:return:
"""
if not quiet:
print("system local_capacity total")
# Local capacity contribution
nullify_sql = """
UPDATE results_system_local_capacity
SET local_capacity_requirement_mw = NULL,
local_capacity_provision_mw = NULL,
local_capacity_shortage_mw = NULL
WHERE scenario_id = ?
AND subproblem_id = ?
AND stage_id = ?;
""".format(scenario_id, subproblem, stage)
spin_on_database_lock(conn=db, cursor=c, sql=nullify_sql,
data=(scenario_id, subproblem, stage),
many=False)
results = []
with open(os.path.join(results_directory,
"local_capacity.csv"), "r") as \
surface_file:
reader = csv.reader(surface_file)
next(reader) # skip header
for row in reader:
local_capacity_zone = row[0]
period = row[1]
discount_factor = row[2]
number_years = row[3]
local_capacity_req_mw = row[4]
local_capacity_prov_mw = row[5]
shortage_mw = row[6]
results.append(
(local_capacity_req_mw, local_capacity_prov_mw,
shortage_mw,
discount_factor, number_years,
scenario_id, local_capacity_zone, period)
)
update_sql = """
UPDATE results_system_local_capacity
SET local_capacity_requirement_mw = ?,
local_capacity_provision_mw = ?,
local_capacity_shortage_mw = ?,
discount_factor = ?,
number_years_represented = ?
WHERE scenario_id = ?
AND local_capacity_zone = ?
AND period = ?"""
spin_on_database_lock(conn=db, cursor=c, sql=update_sql, data=results)
# Update duals
duals_results = []
with open(os.path.join(results_directory, "Local_Capacity_Constraint.csv"),
"r") as local_capacity_duals_file:
reader = csv.reader(local_capacity_duals_file)
next(reader) # skip header
for row in reader:
duals_results.append(
(row[2], row[0], row[1], scenario_id, subproblem, stage)
)
duals_sql = """
UPDATE results_system_local_capacity
SET dual = ?
WHERE local_capacity_zone = ?
AND period = ?
AND scenario_id = ?
AND subproblem_id = ?
AND stage_id = ?;"""
spin_on_database_lock(conn=db, cursor=c, sql=duals_sql, data=duals_results)
# Calculate marginal carbon cost per MMt
mc_sql = """
UPDATE results_system_local_capacity
SET local_capacity_marginal_cost_per_mw =
dual / (discount_factor * number_years_represented)
WHERE scenario_id = ?
AND subproblem_id = ?
AND stage_id = ?;
"""
spin_on_database_lock(conn=db, cursor=c, sql=mc_sql,
data=(scenario_id, subproblem, stage),
many=False)
| 234 | 0 | 50 |
f0373a29f5f02ba321b5fe5d527ef7e872d1364e | 89,064 | py | Python | knossos/tasks.py | TheMatthew/knossos | 70463d8a4ae1d6cd6f3d0fd9fba4037d94d26bd2 | [
"Apache-2.0"
] | null | null | null | knossos/tasks.py | TheMatthew/knossos | 70463d8a4ae1d6cd6f3d0fd9fba4037d94d26bd2 | [
"Apache-2.0"
] | null | null | null | knossos/tasks.py | TheMatthew/knossos | 70463d8a4ae1d6cd6f3d0fd9fba4037d94d26bd2 | [
"Apache-2.0"
] | null | null | null | ## Copyright 2017 Knossos authors, see NOTICE file
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from __future__ import absolute_import, print_function
import os
import sys
import platform
import logging
import subprocess
import shutil
import glob
import stat
import json
import tempfile
import threading
import random
import time
import re
import hashlib
import semantic_version
from . import center, util, progress, nebula, repo, vplib, settings
from .repo import Repo
from .qt import QtCore, QtWidgets, read_file
translate = QtCore.QCoreApplication.translate
# TODO: Optimize, make sure all paths are relative (no mod should be able to install to C:\evil)
# TODO: Add error messages.
# TODO: make sure all paths are relative (no mod should be able to install to C:\evil)
| 35.811821 | 522 | 0.519862 | ## Copyright 2017 Knossos authors, see NOTICE file
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
from __future__ import absolute_import, print_function
import os
import sys
import platform
import logging
import subprocess
import shutil
import glob
import stat
import json
import tempfile
import threading
import random
import time
import re
import hashlib
import semantic_version
from . import center, util, progress, nebula, repo, vplib, settings
from .repo import Repo
from .qt import QtCore, QtWidgets, read_file
translate = QtCore.QCoreApplication.translate
class FetchTask(progress.MultistepTask):
background = True
_public = None
_private = None
_steps = 2
def __init__(self):
super(FetchTask, self).__init__()
self.title = 'Fetching mod list...'
self.done.connect(self.finish)
self.add_work(['#public', '#private'])
def init1(self):
pass
def work1(self, part):
progress.update(0.1, 'Fetching "%s"...' % part)
try:
data = Repo()
data.is_link = True
if part == '#private':
if not center.settings['neb_user']:
return
client = nebula.NebulaClient()
mods = client.get_private_mods()
data.base = '#private'
data.set(mods)
self._private = data
elif part == '#public':
raw_data = None
for link in center.REPOS:
raw_data = util.get(link, raw=True)
if raw_data:
break
if not raw_data:
return
data.base = raw_data.url
data.parse(raw_data.text)
self._public = data
except Exception:
logging.exception('Failed to decode "%s"!', part)
return
def init2(self):
self.add_work((None,))
def work2(self, _):
if not self._public:
return
data = Repo()
data.merge(self._public)
if self._private:
data.merge(self._private)
data.save_json(os.path.join(center.settings_path, 'mods.json'))
center.mods = data
def finish(self):
if not self.aborted:
center.main_win.update_mod_list()
class LoadLocalModsTask(progress.Task):
background = True
can_abort = False
def __init__(self):
super(LoadLocalModsTask, self).__init__(threads=3)
self.done.connect(self.finish)
self.title = 'Loading installed mods...'
if center.settings['base_path'] is None:
logging.warning('A LoadLocalModsTask was launched even though no base path was set!')
else:
center.installed.clear()
self.add_work([center.settings['base_path']] + center.settings['base_dirs'])
def work(self, path):
mods = center.installed
subs = []
mod_file = None
try:
for base in os.listdir(path):
sub = os.path.join(path, base)
if os.path.isdir(sub) and not sub.endswith('.dis'):
subs.append(sub)
elif base.lower() == 'mod.json':
mod_file = sub
except FileNotFoundError:
logging.warning('The directory "%s" does not exist anymore!' % path)
except PermissionError:
logging.warning('Failed to scan "%s" during mod load because access was denied!' % path)
if mod_file:
try:
mod = repo.InstalledMod.load(mod_file)
mods.add_mod(mod)
except Exception:
logging.exception('Failed to parse "%s"!', sub)
if subs:
self.add_work(subs)
def finish(self):
center.main_win.update_mod_list()
class CheckFilesTask(progress.MultistepTask):
background = True
_mod = None
_check_results = None
_missing_image_mods = None
_steps = 2
def __init__(self, pkgs, mod=None):
super(CheckFilesTask, self).__init__()
self.title = 'Checking %d packages...' % len(pkgs)
self.pkgs = pkgs
self._mod = mod
self._missing_image_mods = set()
self.done.connect(self.finish)
self._threads = 1
def init1(self):
pkgs = []
for pkg in self.pkgs:
mod = pkg.get_mod()
pkgs.append((mod.folder, pkg))
self.add_work(pkgs)
def work1(self, data):
modpath, pkg = data
pkg_files = pkg.filelist
count = float(len(pkg_files))
success = 0
checked = 0
summary = {
'ok': [],
'corrupt': [],
'missing': []
}
for info in pkg_files:
mypath = util.ipath(os.path.join(modpath, info['filename']))
if os.path.isfile(mypath):
progress.update(checked / count, 'Checking "%s"...' % (info['filename']))
if util.check_hash(info['checksum'], mypath, False):
success += 1
summary['ok'].append(info['filename'])
else:
summary['corrupt'].append(info['filename'])
else:
summary['missing'].append(info['filename'])
checked += 1
self.post((pkg, success, checked, summary))
def init2(self):
# Save the results from step 1.
self._check_results = self.get_results()
self.add_work(('',))
def work2(self, d):
fnames = set()
mods = set()
loose = []
# Collect all filenames
for pkg, s, c, m in self._check_results:
mod = pkg.get_mod()
modpath = mod.folder
mods.add(mod)
# Ignore files are generated by Knossos. Only mod.json files that are in the location where we expect it to
# be are ignored. All other mod.json files will be considered loose
fnames.add(os.path.join(modpath, 'mod.json'))
for info in pkg.filelist:
# relative paths are valid here but we only want the filename
fnames.add(os.path.normpath(os.path.join(modpath, info['filename'])))
# Check for loose files.
for mod in mods:
modpath = mod.folder
changed = False
for prop in ('logo', 'tile', 'banner'):
img_path = getattr(mod, prop)
if img_path:
# Remove the image if it's just an empty file
if os.path.isfile(img_path) and os.stat(img_path).st_size == 0:
os.unlink(img_path)
setattr(mod, prop, None)
changed = True
for prop in ('screenshots', 'attachments'):
im_paths = getattr(mod, prop)
for i, path in enumerate(im_paths):
# Remove the image if it's just an empty file
if os.path.isfile(path) and os.stat(path).st_size == 0:
os.unlink(path)
im_paths[i] = None
changed = True
setattr(mod, prop, im_paths)
if changed:
self._missing_image_mods.add(mod)
mod.save()
for path, dirs, files in os.walk(modpath):
for item in files:
name = os.path.join(path, item)
if name not in fnames and not item.startswith(('__k_plibs', 'knossos.')):
loose.append(name)
self._check_results.append((None, 0, 0, {'loose': loose}))
self._results = self._check_results
def finish(self):
bad_packages = []
for result in self._check_results:
if result[0] is None:
# This is the entry which contains the loose files
continue
if result[1] != result[2]:
# If the number of checked files is different than the number of valid files then there is something
# wrong with this package
bad_packages.append(result[0])
if len(bad_packages) > 0:
msg = "An error was detected while validating the game file integrity. The following packages are invalid:"
for pkg in bad_packages:
msg += "\n - Package %s of mod %s" % (pkg.name, pkg.get_mod().title)
if self._missing_image_mods:
names = []
for mod in self._missing_image_mods:
names.append(mod.title)
if mod.packages:
bad_packages.append(mod.packages[0])
msg += "\n\n%s are missing images." % util.human_list(names)
msg += "\n\nThese mods are invalid and need to be redownloaded before they can be played without errors.\n"
msg += "Do that now?"
res = QtWidgets.QMessageBox.question(None, 'Knossos', msg)
if res == QtWidgets.QMessageBox.Yes:
run_task(InstallTask(bad_packages, self._mod))
else:
QtWidgets.QMessageBox.information(None, 'Knossos', 'No problems were detected.')
# TODO: Optimize, make sure all paths are relative (no mod should be able to install to C:\evil)
# TODO: Add error messages.
class InstallTask(progress.MultistepTask):
_pkgs = None
_pkg_names = None
_mods = None
_editable = None
_dls = None
_copies = None
_steps = 4
_error = False
_7z_lock = None
check_after = True
def __init__(self, pkgs, mod=None, check_after=True, editable={}):
super(InstallTask, self).__init__()
self._mods = set()
self._pkgs = []
self._pkg_names = []
self.check_after = check_after
self._editable = editable
if sys.platform == 'win32':
self._7z_lock = threading.Lock()
if mod is not None:
self.mods = [mod]
self._slot_prog = {}
for pkg in pkgs:
try:
pmod = center.installed.query(pkg.get_mod())
if pmod.dev_mode:
# Don't modify mods which are in dev mode!
continue
except repo.ModNotFound:
pass
ins_pkg = center.installed.add_pkg(pkg)
pmod = ins_pkg.get_mod()
self._pkgs.append(ins_pkg)
self._mods.add(pmod)
self._pkg_names.append((pmod.mid, ins_pkg.name))
for item in ins_pkg.files.values():
self._slot_prog[id(item)] = ('%s: %s' % (pmod.title, item['filename']), 0, 'Checking...')
center.main_win.update_mod_list()
self.done.connect(self.finish)
self.title = 'Installing mods...'
def abort(self):
super(InstallTask, self).abort()
util.cancel_downloads()
def finish(self):
if self.mods:
title = self.mods[0].title
else:
title = 'UNKNOWN'
if self.aborted:
if self._cur_step == 1:
# Need to remove all those temporary directories.
for ar in self.get_results():
try:
shutil.rmtree(ar['tpath'])
except Exception:
logging.exception('Failed to remove "%s"!' % ar['tpath'])
else:
QtWidgets.QMessageBox.critical(None, 'Knossos',
self.tr('The mod installation was aborted before it could finish. ' +
'Uninstall the partially installed mod %s or verify the file integrity.' % title))
elif self._error:
msg = self.tr(
'An error occured during the installation of %s. It might be partially installed.\n' % title +
'Please run a file integrity check or reinstall (uninstall + install) it.'
)
QtWidgets.QMessageBox.critical(None, 'Knossos', msg)
if not isinstance(self, UpdateTask) and self.check_after:
run_task(LoadLocalModsTask())
def init1(self):
if center.settings['neb_user']:
for mod in self._mods:
if mod.mid in self._editable:
mod.dev_mode = True
self._threads = 3
self.add_work(self._mods)
def work1(self, mod):
modpath = mod.folder
mfiles = mod.get_files()
mnames = [f['filename'] for f in mfiles] + ['knossos.bmp', 'mod.json']
self._local.slot = id(mod)
self._slot_prog[id(mod)] = (mod.title, 0, '')
archives = set()
progress.start_task(0, 0.9, '%s')
progress.update(0, 'Checking %s...' % mod.title)
kpath = os.path.join(modpath, 'mod.json')
if os.path.isfile(kpath):
try:
with open(kpath, 'r') as stream:
info = json.load(stream)
except Exception:
logging.exception('Failed to parse mod.json!')
info = None
if info is not None and info['version'] != str(mod.version):
logging.error('Overwriting "%s" (%s) with version %s.' % (mod.mid, info['version'], mod.version))
if os.path.isdir(modpath):
# TODO: Figure out if we want to handle these files (i.e. remove them)
for path, dirs, files in os.walk(modpath):
relpath = path[len(modpath):].lstrip('/\\')
for item in files:
itempath = util.pjoin(relpath, item)
if not itempath.startswith('kn_') and itempath not in mnames:
logging.info('File "%s" is left over.', itempath)
else:
logging.debug('Folder %s for %s does not yet exist.', mod, modpath)
os.makedirs(modpath)
amount = float(len(mfiles))
copies = []
pkg_folders = {}
# query_all is a generator so the exception will be thrown when looping over the result
try:
inst_mods = list(center.installed.query_all(mod.mid))
for mv in inst_mods:
if mv.dev_mode:
pf = pkg_folders.setdefault(mv, {})
for pkg in mv.packages:
pf[pkg.name] = pkg.folder
except repo.ModNotFound as exc:
if exc.mid != mod.mid:
logging.exception('Dependency error during mod installation! Tried to install %s.' % mod)
inst_mods = []
for i, info in enumerate(mfiles):
if (mod.mid, info['package']) not in self._pkg_names:
continue
progress.update(i / amount, 'Checking %s: %s...' % (mod.title, info['filename']))
# Check if we already have this file
if mod.dev_mode and mod in pkg_folders:
dest_path = util.ipath(os.path.join(mod.folder, pkg_folders[mod][info['package']], info['filename']))
else:
dest_path = util.ipath(os.path.join(mod.folder, info['filename']))
found = os.path.isfile(dest_path) and util.check_hash(info['checksum'], dest_path)
if not found:
for mv in inst_mods:
if mv.dev_mode:
try:
itempath = util.ipath(os.path.join(mv.folder, pkg_folders[mv][info['package']], info['filename']))
except KeyError:
itempath = util.ipath(os.path.join(mv.folder, info['filename']))
else:
itempath = util.ipath(os.path.join(mv.folder, info['filename']))
if os.path.isfile(itempath) and util.check_hash(info['checksum'], itempath):
copies.append((mod, info['package'], info['filename'], itempath))
found = True
break
if not found:
archives.add((mod.mid, info['package'], info['archive']))
logging.debug('%s: %s is missing/broken for %s.', info['package'], info['filename'], mod)
self.post((archives, copies))
progress.finish_task()
progress.start_task(0.9, 0, 'Downloading logos...')
# Make sure the images are in the mod folder.
for prop in ('logo', 'tile', 'banner'):
img_path = getattr(mod, prop)
if img_path:
ext = os.path.splitext(img_path)[1]
dest = os.path.join(mod.folder, 'kn_' + prop + ext)
# Remove the image if it's just an empty file
if os.path.isfile(dest) and os.stat(dest).st_size == 0:
os.unlink(dest)
if '://' in img_path and not os.path.isfile(dest):
# That's a URL
util.safe_download(img_path, dest)
setattr(mod, prop, dest)
for prop in ('screenshots', 'attachments'):
im_paths = getattr(mod, prop)
for i, path in enumerate(im_paths):
ext = os.path.splitext(path)[1]
dest = os.path.join(mod.folder, 'kn_' + prop + '_' + str(i) + ext)
# Remove the image if it's just an empty file
if os.path.isfile(dest) and os.stat(dest).st_size == 0:
os.unlink(dest)
if '://' in path and not os.path.isfile(dest):
util.safe_download(path, dest)
im_paths[i] = dest
progress.finish_task()
progress.update(1, 'Done preparing')
def init2(self):
archives = set()
copies = []
downloads = []
for a, c in self.get_results():
archives |= a
copies.extend(c)
self._copies = copies
for pkg in self._pkgs:
mod = pkg.get_mod()
for oitem in pkg.files.values():
if (mod.mid, pkg.name, oitem['filename']) in archives:
item = oitem.copy()
item['mod'] = mod
item['pkg'] = pkg
item['_id'] = id(oitem)
downloads.append(item)
else:
del self._slot_prog[id(oitem)]
if len(archives) == 0:
logging.info('Nothing to do for this InstallTask!')
elif len(downloads) == 0:
logging.error('Somehow we didn\'t find any downloads for this InstallTask!')
self._error = True
self._threads = 0
self.add_work(downloads)
def work2(self, archive):
self._local.slot = archive['_id']
with tempfile.TemporaryDirectory() as tpath:
arpath = os.path.join(tpath, archive['filename'])
modpath = archive['mod'].folder
retries = 10
done = False
urls = list(archive['urls'])
random.shuffle(urls)
stream = open(arpath, 'wb')
while retries > 0:
retries -= 1
for url in urls:
progress.start_task(0, 0.97, '%s')
progress.update(0, 'Ready')
if not util.download(url, stream, continue_=True):
if self.aborted:
return
logging.error('Download of "%s" failed!', url)
time.sleep(0.3)
continue
progress.finish_task()
progress.update(0.97, 'Checking "%s"...' % archive['filename'])
stream.close()
if util.check_hash(archive['checksum'], arpath):
done = True
retries = 0
break
else:
logging.error('File "%s" is corrupted!', url)
stream = open(arpath, 'wb')
time.sleep(2)
if not done:
logging.error('Missing file "%s"!', archive['filename'])
self._error = True
return
if self.aborted:
return
cpath = os.path.join(tpath, 'content')
os.mkdir(cpath)
needed_files = filter(lambda item: item['archive'] == archive['filename'], archive['pkg'].filelist)
done = False
if sys.platform == 'win32':
# Apparently I can't run multiple 7z instances on Windows. If I do, I always get the error
# "The archive can't be opened because it is still in use by another process."
# I have no idea why. It works fine on Linux and Mac OS.
# TODO: Is there a better solution?
progress.update(0.98, 'Waiting...')
self._7z_lock.acquire()
progress.update(0.98, 'Extracting...')
logging.debug('Extracting %s into %s', archive['filename'], modpath)
if util.extract_archive(arpath, cpath):
done = True
# Look for missing files
for item in needed_files:
src_path = util.ipath(os.path.join(cpath, item['orig_name']))
if not os.path.isfile(src_path):
logging.warning('Missing file "%s" from archive "%s" for package "%s" (%s)!',
item['orig_name'], archive['filename'], archive['pkg'].name, archive['mod'].title)
done = False
break
if sys.platform == 'win32':
self._7z_lock.release()
if not done:
logging.error('Failed to unpack archive "%s" for package "%s" (%s)!',
archive['filename'], archive['pkg'].name, archive['mod'].title)
shutil.rmtree(cpath, ignore_errors=True)
self._error = True
return
dev_mode = archive['pkg'].get_mod().dev_mode
for item in archive['pkg'].filelist:
if item['archive'] != archive['filename']:
continue
src_path = util.ipath(os.path.join(cpath, item['orig_name']))
if dev_mode:
dest_path = util.ipath(os.path.join(modpath, archive['pkg'].folder, item['filename']))
else:
dest_path = util.ipath(os.path.join(modpath, item['filename']))
try:
dparent = os.path.dirname(dest_path)
if not os.path.isdir(dparent):
os.makedirs(dparent)
if dev_mode and archive['pkg'].is_vp:
progress.start_task(0.98, 0.02, '%s')
util.extract_vp_file(src_path, os.path.join(modpath, archive['pkg'].folder))
progress.finish_task()
# Avoid confusing CheckTask with a missing VP file.
archive['pkg'].filelist = []
else:
# This move might fail on Windows with Permission Denied errors.
# "[WinError 32] The process cannot access the file because it is being used by another process"
# Just try it again several times to account of AV scanning and similar problems.
tries = 5
while tries > 0:
try:
shutil.move(src_path, dest_path)
break
except Exception as e:
logging.warning('Initial move for "%s" failed (%s)!' % (src_path, str(e)))
tries -= 1
if tries == 0:
raise
else:
time.sleep(1)
except Exception:
logging.exception('Failed to move file "%s" from archive "%s" for package "%s" (%s) to its destination %s!',
src_path, archive['filename'], archive['pkg'].name, archive['mod'].title, dest_path)
self._error = True
# Copy the remaining empty dirs and symlinks.
for path, dirs, files in os.walk(cpath):
path = os.path.relpath(path, cpath)
for name in dirs:
src_path = os.path.join(cpath, path, name)
dest_path = util.ipath(os.path.join(modpath, archive.get('dest', ''), path, name))
if os.path.islink(src_path):
if not os.path.lexists(dest_path):
linkto = os.readlink(src_path)
os.symlink(linkto, dest_path)
elif not os.path.exists(dest_path):
os.makedirs(dest_path)
for name in files:
src_path = os.path.join(cpath, path, name)
if os.path.islink(src_path):
dest_path = util.ipath(os.path.join(modpath, archive.get('dest', ''), path, name))
if not os.path.lexists(dest_path):
linkto = os.readlink(src_path)
os.symlink(linkto, dest_path)
progress.update(1, 'Done.')
def init3(self):
self.add_work((None,))
def work3(self, _):
self._slot_prog['copies'] = ('Copy old files', 0, 'Waiting...')
self._local.slot = 'copies'
pkg_folders = {}
count = float(len(self._copies))
try:
for i, info in enumerate(self._copies):
mod, pkg_name, fn, src = info
progress.update(i / count, fn)
if mod.dev_mode:
if mod not in pkg_folders:
pkg_folders[mod] = {}
for pkg in mod.packages:
pkg_folders[mod][pkg.name] = pkg.folder
dest = os.path.join(mod.folder, pkg_folders[mod][pkg_name], fn)
else:
dest = os.path.join(mod.folder, fn)
if not os.path.isfile(dest):
dest_parent = os.path.dirname(dest)
if not os.path.isdir(dest_parent):
os.makedirs(dest_parent)
logging.debug('Copying %s to %s', src, dest)
util.safe_copy(src, dest)
except Exception:
logging.exception('Failed to copy an old file!')
self._error = True
progress.update(1, 'Error!')
else:
progress.update(1, 'Done')
def init4(self):
self.add_work((None,))
def work4(self, _):
first = True
# Generate mod.json files.
for mod in self._mods:
try:
mod.save()
except Exception:
logging.exception('Failed to generate mod.json file for %s!' % mod.mid)
try:
util.post(center.API + 'track', data={
'counter': 'install_mod',
'mid': mod.mid,
'version': str(mod.version),
'dependency': 'false' if first else 'true'
})
except Exception:
pass
first = False
# TODO: make sure all paths are relative (no mod should be able to install to C:\evil)
class UninstallTask(progress.MultistepTask):
_pkgs = None
_mods = None
_steps = 2
check_after = True
def __init__(self, pkgs, mods=[]):
super(UninstallTask, self).__init__()
self._pkgs = []
self._mods = []
if len(pkgs) > 0:
for pkg in pkgs:
try:
self._pkgs.append(center.installed.query(pkg))
except repo.ModNotFound:
logging.exception('Someone tried to uninstall a non-existant package (%s, %s)! Skipping it...', pkg.get_mod().mid, pkg.name)
for mod in mods:
try:
self._mods.append(center.installed.query(mod))
except repo.ModNotFound:
logging.exception('Someone tried to uninstall a non-existant %s!', mod)
self.done.connect(self.finish)
self.title = 'Uninstalling mods...'
def init1(self):
self.add_work(self._pkgs)
def work1(self, pkg):
mod = pkg.get_mod()
for item in pkg.filelist:
path = util.ipath(os.path.join(mod.folder, item['filename']))
if not os.path.isfile(path):
logging.warning('File "%s" for mod "%s" (%s) is missing during uninstall!', item['filename'], mod.title, mod.mid)
else:
os.unlink(path)
def init2(self):
mods = set(self._mods)
# Unregister uninstalled pkgs.
for pkg in self._pkgs:
mods.add(pkg.get_mod())
center.installed.del_pkg(pkg)
self.add_work(mods)
def work2(self, mod):
modpath = mod.folder
try:
if isinstance(mod, repo.IniMod):
shutil.rmtree(modpath)
elif len(mod.packages) == 0:
# Remove our files
my_files = [os.path.join(modpath, 'mod.json'), mod.logo, mod.tile, mod.banner]
my_files += mod.screenshots + mod.attachments
for path in my_files:
if path and os.path.isfile(path):
os.unlink(path)
libs = os.path.join(modpath, '__k_plibs')
if os.path.isdir(libs):
# Delete any symlinks before running shutil.rmtree().
for link in os.listdir(libs):
item = os.path.join(libs, link)
if os.path.islink(item):
os.unlink(item)
shutil.rmtree(libs)
center.installed.del_mod(mod)
try:
util.post(center.API + 'track', data={
'counter': 'uninstall_mod',
'mid': mod.mid,
'version': str(mod.version),
})
except Exception:
pass
elif not os.path.isdir(modpath):
logging.error('Mod %s still has packages but mod folder "%s" is gone!' % (mod, modpath))
else:
mod.save()
except Exception:
logging.exception('Failed to uninstall mod from "%s"!' % modpath)
self._error = True
# Remove empty directories.
for path, dirs, files in os.walk(modpath, topdown=False):
if len(dirs) == 0 and len(files) == 0:
os.rmdir(path)
def finish(self):
# Update the local mod list which will remove the uninstalled mod
run_task(LoadLocalModsTask())
class RemoveModFolder(progress.Task):
_error = None
_success = False
def __init__(self, mod):
super(RemoveModFolder, self).__init__()
self._mod = mod
self.title = 'Deleting %s...' % mod.folder
self.done.connect(self.finish)
self.add_work(('',))
def work(self, dummy):
items = []
path = self._mod.folder
for sub, d, f in os.walk(path):
for name in f:
items.append(os.path.join(path, sub, name))
count = float(len(items))
try:
for i, name in enumerate(items):
progress.update(i / count, 'Deleting files...')
util.safe_unlink(name)
# Delete the remaining empty directories and other stuff
shutil.rmtree(path)
except Exception as exc:
logging.exception('Failed to delete mod folder for %s!' % self._mod.mid)
self._error = str(exc)
else:
progress.update(1, 'Done')
self._success = True
def finish(self):
if self._success:
QtWidgets.QMessageBox.information(None, 'Knossos', 'Successfully deleted folder for %s %s.' % (self._mod.title, self._mod.version))
elif self._error:
QtWidgets.QMessageBox.critical(None, 'Knossos', 'Failed to delete %s. Reason:\n%s' % (self._mod.folder, self._error))
else:
QtWidgets.QMessageBox.critical(None, 'Knossos', 'Failed to delete %s.' % self._mod.folder)
# Update the local mod list which will remove the uninstalled mod
run_task(LoadLocalModsTask())
class UpdateTask(InstallTask):
_old_mod = None
_new_mod = None
__check_after = True
def __init__(self, mod, pkgs=None, check_after=True):
self.__check_after = check_after
self._new_mod = center.mods.query(mod.mid)
if not pkgs:
old_pkgs = [pkg.name for pkg in mod.packages]
pkgs = []
for pkg in self._new_mod.packages:
if pkg.name in old_pkgs or pkg.status == 'required':
pkgs.append(pkg)
# carry the dev_mode setting over to the new version
editable = {}
if isinstance(mod, repo.InstalledMod) and mod.dev_mode:
editable.add(mod.mid)
self._old_mod = mod
super(UpdateTask, self).__init__(pkgs, self._new_mod, check_after=False, editable=editable)
def work4(self, _):
super(UpdateTask, self).work4(_)
# We can't use _new_mod here since it's a Mod but we need an InstalledMod here.
new_mod = None
for mod in self._mods:
if mod.mid == self._old_mod.mid:
new_mod = mod
break
if new_mod:
fso_path = settings.get_fso_profile_path()
old_settings = os.path.join(fso_path, os.path.basename(self._old_mod.folder))
new_settings = os.path.join(fso_path, os.path.basename(new_mod.folder))
# If we have generated files for the old mod copy them over to the new one (i.e. checkpoints and other script generated stuff).
if os.path.isdir(old_settings) and not os.path.isdir(new_settings):
shutil.copytree(old_settings, new_settings)
else:
logging.error('Failed to find new modpath during update of %s!' % self._old_mod)
def finish(self):
super(UpdateTask, self).finish()
if not self.aborted and not self._error:
# The new version has been succesfully installed, remove the old version.
if len(self._old_mod.get_dependents()) == 0:
if self._old_mod.version != self._new_mod.version:
run_task(UninstallTask(self._old_mod.packages))
else:
logging.debug('Not uninstalling %s after update because it still has dependents.', self._old_mod)
run_task(LoadLocalModsTask())
class RewriteModMetadata(progress.Task):
_threads = 1
def __init__(self, mods):
super(RewriteModMetadata, self).__init__()
self._reasons = []
self.title = 'Rewriting local metadata...'
self.done.connect(self.finish)
self.add_work(mods)
def work(self, mod):
if mod.dev_mode:
# Skip mods in dev mode to avoid overwriting local changes.
return
if mod.mid == 'FS2':
create_retail_mod(mod.folder)
return
try:
rmod = center.mods.query(mod)
except repo.ModNotFound:
self._reasons.append((mod, 'not found'))
return
installed_pkgs = [pkg.name for pkg in mod.packages]
new_mod = repo.InstalledMod.convert(rmod)
for pkg in rmod.packages:
if pkg.name in installed_pkgs:
new_mod.add_pkg(pkg)
try:
new_mod.save()
except Exception:
self._reasons.append((mod, 'save failed'))
return
# We have to load the user settings again
try:
new_mod = repo.InstalledMod.load(os.path.join(new_mod.folder, 'mod.json'))
except Exception:
self._reasons.append((mod, 'reload failed'))
return
# Make sure the images are in the mod folder.
for prop in ('logo', 'tile', 'banner'):
img_path = getattr(new_mod, prop)
if img_path:
ext = os.path.splitext(img_path)[1]
dest = os.path.join(new_mod.folder, 'kn_' + prop + ext)
# Remove the image if it's just an empty file
if os.path.isfile(dest) and os.stat(dest).st_size == 0:
os.unlink(dest)
if '://' in img_path and not os.path.isfile(dest):
# That's a URL
util.safe_download(img_path, dest)
setattr(new_mod, prop, dest)
for prop in ('screenshots', 'attachments'):
im_paths = getattr(new_mod, prop)
for i, path in enumerate(im_paths):
ext = os.path.splitext(path)[1]
dest = os.path.join(new_mod.folder, 'kn_' + prop + '_' + str(i) + ext)
# Remove the image if it's just an empty file
if os.path.isfile(dest) and os.stat(dest).st_size == 0:
os.unlink(dest)
if '://' in path and not os.path.isfile(dest):
util.safe_download(path, dest)
im_paths[i] = dest
center.installed.add_mod(new_mod)
def finish(self):
if self._reasons:
msg = 'Some mod metadata could not be saved.\n\n'
for mod, r in self._reasons:
msg += mod.title + ': '
if r == 'not found':
msg += 'Not found on Nebula\n'
elif r == 'save failed':
msg += 'mod.json could not be written\n'
elif r == 'reload failed':
msg += 'mod.json could not be read\n'
QtWidgets.QMessageBox.critical(None, 'Knossos', msg)
else:
QtWidgets.QMessageBox.information(None, 'Knossos', 'Done.')
center.main_win.update_mod_list()
class UploadTask(progress.MultistepTask):
can_abort = True
_steps = 2
_client = None
_mod = None
_private = False
_dir = None
_login_failed = False
_duplicate = False
_reason = None
_msg = None
_success = False
_msg_table = {
'invalid version': 'the version number specified for this release is invalid!',
'outdated version': 'there is already a release with the same or newer version on the nebula.',
'unsupported archive checksum': 'your client sent an invalid checksum. You probably need to update.',
'archive missing': 'one of your archives failed to upload.'
}
_question = QtCore.Signal()
_question_result = False
_question_cond = None
def __init__(self, mod, private=False):
super(UploadTask, self).__init__()
self.title = 'Uploading mod...'
self.mods = [mod]
self._mod = mod.copy()
self._private = private
self._threads = 2
self._slot_prog = {
'total': ('Status', 0, 'Waiting...')
}
self.done.connect(self.finish)
self._question.connect(self.show_question)
self._question_cond = threading.Condition()
def abort(self, user=False):
self._local.slot = 'total'
progress.update(1, 'Aborted')
if self._client:
self._client.abort_uploads()
if user:
self._reason = 'aborted'
super(UploadTask, self).abort()
def show_question(self):
res = QtWidgets.QMessageBox.question(None, 'Knossos', 'This mod has already been uploaded. If you continue, ' +
'your metadata changes will be uploaded but the files will not be updated. Continue?')
self._question_result = res == QtWidgets.QMessageBox.Yes
with self._question_cond:
self._question_cond.notify()
def init1(self):
self._local.slot = 'total'
try:
progress.update(0, 'Performing sanity checks...')
if self._mod.mtype == 'mod' and self._mod.parent != 'FS2':
# Make sure TC mods depend on their parents
found = False
for pkg in self._mod.packages:
for dep in pkg.dependencies:
if dep['id'] == self._mod.parent:
found = True
break
if found:
break
if not found:
self._mod.packages[0].dependencies.append({
'id': self._mod.parent,
'version': '*',
'packages': []
})
# TODO: Verify dependencies against the online repo, not against the local one.
try:
self._mod.resolve_deps(recursive=False)
except repo.ModNotFound:
self._reason = 'broken deps'
self.abort()
return
if self._mod.mtype in ('mod', 'tc'):
if self._mod.custom_build:
# TODO: This should be clarified in the dev tab UI.
self._reason = 'custom build'
self.abort()
return
try:
exes = self._mod.get_executables()
except Exception:
exes = []
if len(exes) == 0:
self._reason = 'no exes'
self.abort()
return
progress.update(0.1, 'Logging in...')
self._dir = tempfile.TemporaryDirectory()
self._client = client = nebula.NebulaClient()
try:
editable = client.is_editable(self._mod.mid)
except nebula.InvalidLoginException:
self._login_failed = True
self.abort()
progress.update(0.1, 'Failed to login!')
return
if not editable['result']:
self._reason = 'unauthorized'
self.abort()
return
progress.update(0.11, 'Updating metadata...')
if editable['missing']:
client.create_mod(self._mod)
else:
client.update_mod(self._mod)
progress.update(0.13, 'Performing pre-flight checks...')
try:
client.preflight_release(self._mod, self._private)
except nebula.RequestFailedException as exc:
if exc.args[0] == 'duplicated version':
with self._question_cond:
self._question.emit()
self._question_cond.wait()
if not self._question_result:
self._reason = 'aborted'
self.abort()
return
self._duplicate = True
else:
raise
if not self._duplicate:
progress.update(0.15, 'Scanning files...')
archives = []
fnames = {}
conflicts = {}
for pkg in self._mod.packages:
ar_name = pkg.name + '.7z'
pkg_path = os.path.join(self._mod.folder, pkg.folder)
pkg.filelist = []
for sub, dirs, files in os.walk(pkg_path):
relsub = os.path.relpath(sub, pkg_path)
for fn in files:
if pkg.is_vp and fn.lower().endswith('.vp'):
self._reason = 'vp inception'
self.abort()
return
relpath = os.path.join(relsub, fn).replace('\\', '/')
pkg.filelist.append({
'filename': relpath,
'archive': ar_name,
'orig_name': relpath,
'checksum': None
})
if not pkg.is_vp:
# VP conflicts don't cause problems and are most likely intentional
# which is why we ignore them.
if relpath in fnames:
l = conflicts.setdefault(relpath, [fnames[relpath].name])
l.append(pkg.name)
fnames[relpath] = pkg
if len(pkg.filelist) == 0:
self._reason = 'empty pkg'
self._msg = pkg.name
self.abort()
return
archives.append(pkg)
self._slot_prog[pkg.name] = (pkg.name + '.7z', 0, 'Waiting...')
if conflicts:
msg = ''
for name in sorted(conflicts.keys()):
msg += '\n%s is in %s' % (name, util.human_list(conflicts[name]))
self._reason = 'conflict'
self._msg = msg
self.abort()
return
self._slot_prog['#checksums'] = ('Checksums', 0, '')
self._local.slot = '#checksums'
fc = float(sum([len(pkg.filelist) for pkg in self._mod.packages]))
done = 0
for pkg in self._mod.packages:
pkg_path = os.path.join(self._mod.folder, pkg.folder)
for fn in pkg.filelist:
progress.update(done / fc, fn['filename'])
try:
fn['checksum'] = util.gen_hash(os.path.join(pkg_path, fn['filename']))
except Exception:
logging.exception('Failed to generate checksum for file %s in package %s!' % (fn['filename'], pkg.name))
self._reason = 'file unreadable'
self._msg = (fn['filename'], pkg.name)
self.abort()
return
done += 1
progress.update(1, 'Done')
self._local.slot = 'total'
progress.update(0.2, 'Uploading...')
self.add_work(archives)
except nebula.AccessDeniedException:
self._reason = 'unauthorized'
self.abort()
except nebula.RequestFailedException as exc:
if exc.args[0] not in self._msg_table:
logging.exception('Failed request to nebula during upload!')
self._reason = exc.args[0]
self.abort()
except Exception:
logging.exception('Error during upload initalisation!')
self._reason = 'unknown'
self.abort()
def work1(self, pkg):
self._local.slot = pkg.name
ar_name = pkg.name + '.7z'
ar_path = os.path.join(self._dir.name, ar_name)
vp_checksum = None
try:
progress.update(0, 'Comparing...')
hasher = hashlib.new('sha512')
if pkg.is_vp:
hasher.update(b'ISVP')
else:
hasher.update(b'NOVP')
for item in sorted(pkg.filelist, key=lambda a: a['filename']):
line = '%s#%s\n' % (item['filename'], item['checksum'])
hasher.update(line.encode('utf8'))
content_ck = hasher.hexdigest()
store_name = os.path.join(self._mod.folder, 'kn_upload-%s' % pkg.name)
if self.aborted:
return
create_ar = True
if os.path.isfile(store_name + '.7z') and os.path.isfile(store_name + '.json'):
try:
with open(store_name + '.json', 'r') as stream:
data = json.load(stream)
if data and data.get('hash') == content_ck:
create_ar = False
del data
except Exception:
logging.exception('Failed to parse metadata for cached archive %s!' % store_name)
if create_ar:
is_uploaded, meta = self._client.is_uploaded(content_checksum=content_ck)
if is_uploaded:
# The file is already uploaded
pkg.files[ar_name] = {
'filename': ar_name,
'dest': '',
'checksum': ('sha256', meta['checksum']),
'filesize': meta['filesize']
}
is_done = True
if pkg.is_vp:
if meta['vp_checksum']:
vp_name = pkg.name + '.vp'
pkg.filelist = [{
'filename': vp_name,
'archive': ar_name,
'orig_name': vp_name,
'checksum': ('sha256', meta['vp_checksum'])
}]
else:
# The existing upload is identical but not a VP.
is_done = False
is_uploaded = False
if is_done:
progress.update(1, 'Done!')
return
progress.update(0, 'Packing...')
if pkg.is_vp:
vp_name = os.path.basename(pkg.folder) + '.vp'
vp_path = os.path.join(self._dir.name, vp_name)
vp = vplib.VpWriter(vp_path)
pkg_path = os.path.join(self._mod.folder, pkg.folder)
for item in pkg.filelist:
vp.add_file(item['filename'], os.path.join(pkg_path, item['filename']))
progress.start_task(0.0, 0.1, '%s')
try:
vp.write()
except vplib.EmptyFileException as exc:
self._reason = 'empty file in vp'
self._msg = exc.file
self.abort()
return
progress.update(1, 'Calculating checksum...')
progress.finish_task()
vp_checksum = util.gen_hash(vp_path)
pkg.filelist = [{
'filename': vp_name,
'archive': ar_name,
'orig_name': vp_name,
'checksum': vp_checksum
}]
if is_uploaded:
progress.update(1, 'Done!')
return
progress.start_task(0.1, 0.3, '%s')
else:
progress.start_task(0.0, 0.4, '%s')
if self.aborted:
return
_7z_msg = ''
if pkg.is_vp:
p = util.Popen([util.SEVEN_PATH, 'a', '-bsp1', ar_path, vp_name],
cwd=self._dir.name, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
p = util.Popen([util.SEVEN_PATH, 'a', '-bsp1', ar_path, '.'],
cwd=os.path.join(self._mod.folder, pkg.folder), stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
line_re = re.compile(r'^\s*([0-9]+)%')
buf = ''
while p.poll() is None:
if self.aborted:
p.terminate()
return
while '\r' not in buf:
line = p.stdout.read(10)
if not line:
break
buf += line.decode('utf8', 'replace')
buf = buf.split('\r')
line = buf.pop(0)
buf = '\r'.join(buf)
m = line_re.match(line)
if m:
progress.update(int(m.group(1)) / 100., 'Compressing...')
else:
_7z_msg += line
if p.returncode != 0:
logging.error('Failed to build %s! (%s)' % (ar_name, _7z_msg))
self._reason = 'bad archive'
self._msg = pkg.name
self.abort()
return
if self.aborted:
return
shutil.move(ar_path, store_name + '.7z')
with open(store_name + '.json', 'w') as stream:
json.dump({'hash': content_ck}, stream)
progress.finish_task()
progress.start_task(0.4, 0.6, '%s')
progress.update(0, 'Preparing upload...')
pkg.files[ar_name] = {
'filename': ar_name,
'dest': '',
'checksum': util.gen_hash(store_name + '.7z'),
'filesize': os.stat(store_name + '.7z').st_size
}
retries = 3
while retries > 0:
retries -= 1
if self.aborted:
return
try:
self._client.multiupload_file(ar_name, store_name + '.7z', content_checksum=content_ck, vp_checksum=vp_checksum)
break
except nebula.RequestFailedException:
logging.exception('Failed upload, retrying...')
progress.finish_task()
progress.update(1, 'Done!')
except nebula.RequestFailedException:
logging.exception('Failed request to nebula during upload!')
self._reason = 'archive missing'
self.abort()
except Exception:
logging.exception('Unknown error during package packing!')
self._reason = 'unknown'
self.abort()
def init2(self):
self._local.slot = 'total'
try:
progress.update(0.8, 'Finishing...')
if self._duplicate:
self._client.update_release(self._mod, self._private)
else:
self._client.create_release(self._mod, self._private)
progress.update(1, 'Done')
self._success = True
except nebula.AccessDeniedException:
self._reason = 'unauthorized'
self.abort()
except nebula.RequestFailedException as exc:
if exc.args[0] not in self._msg_table:
logging.exception('Failed request to nebula during upload!')
self._reason = exc.args[0]
self.abort()
def work2(self):
pass
def finish(self):
try:
if self._success:
for item in os.listdir(self._mod.folder):
if item.startswith('kn_upload-'):
logging.debug('Removing %s...' % item)
util.safe_unlink(os.path.join(self._mod.folder, item))
if self._dir:
logging.debug('Removing temporary directory...')
util.retry_helper(self._dir.cleanup)
except OSError:
# This is not a critical error so we only log it for now
logging.exception('Failed to remove temporary folder after upload!')
if self._login_failed:
message = 'Failed to login!'
elif self._reason == 'unauthorized':
message = 'You are not authorized to edit this mod!'
elif self._success:
message = 'Successfully uploaded mod!'
elif self._reason in self._msg_table:
message = "Your mod couldn't be uploaded because %s" % self._msg_table[self._reason]
elif self._reason == 'conflict':
message = "I can't upload this mod because at least one file is contained in multiple packages.\n"
message += self._msg
elif self._reason == 'empty pkg':
message = 'The package %s is empty!' % self._msg
elif self._reason == 'no exes':
message = 'The mod has no executables selected!'
elif self._reason == 'bad archive':
message = 'Failed to pack %s!' % self._msg
elif self._reason == 'custom build':
message = "You can't upload a mod which depends on a local FSO build. Please go to your mod's " + \
"FSO settings and select a build from the dropdown list."
elif self._reason == 'broken deps':
message = "The dependencies specified in your mod could not be resolved!"
elif self._reason == 'vp inception':
message = "You're telling me to put a VP into a VP... I don't think that's a good idea. Check your package settings! Aborted."
elif self._reason == 'empty file in vp':
message = "An empty file was detected! It's impossible to put empty files into VPs. Please either fill %s with data or remove it." % self._msg
elif self._reason == 'file unreadable':
message = "The file %s in package %s could not be read!" % self._msg
elif self._reason == 'aborted':
return
else:
message = 'An unexpected error occured! Sorry...'
center.main_win.browser_ctrl.bridge.taskMessage.emit(message)
class GOGExtractTask(progress.Task):
can_abort = False
_reason = None
def __init__(self, gog_path, dest_path):
super(GOGExtractTask, self).__init__()
self.done.connect(self.finish)
self.add_work([(gog_path, dest_path)])
self.title = 'Installing FS2 from GOG...'
self._dest_path = dest_path
try:
self._makedirs(dest_path)
except Exception:
logging.exception('Failed to create data path!')
QtWidgets.QMessageBox.critical(None, 'Knossos', 'Failed to create %s!' % dest_path)
self.abort()
return
create_retail_mod(self._dest_path)
self.mods = [center.installed.query('FS2')]
self._slot_prog = {
'total': ('Status', 0, 'Waiting...')
}
def work(self, paths):
gog_path, dest_path = paths
self._local.slot = 'total'
try:
progress.update(0.03, 'Looking for InnoExtract...')
data = util.get(center.INNOEXTRACT_LINK)
try:
data = json.loads(data)
except Exception:
logging.exception('Failed to read JSON data!')
return
link = None
path = None
for plat, info in data.items():
if sys.platform.startswith(plat):
link, path = info[:2]
break
if link is None:
logging.error('Couldn\'t find an innoextract download for "%s"!', sys.platform)
return
if not os.path.exists(dest_path):
try:
os.makedirs(dest_path)
except Exception:
logging.exception('Failed to create data path!')
self._reason = 'dest_path'
return
inno = os.path.join(dest_path, os.path.basename(path))
with tempfile.TemporaryDirectory() as tempdir:
archive = os.path.join(tempdir, os.path.basename(link))
progress.start_task(0.03, 0.10, 'Downloading InnoExtract...')
if not util.safe_download(link, os.path.join(dest_path, archive)):
self._reason = 'download'
return
progress.finish_task()
progress.update(0.13, 'Extracting InnoExtract...')
try:
util.extract_archive(archive, tempdir)
shutil.move(os.path.join(tempdir, path), inno)
except Exception:
logging.exception('Failed to extract innoextract!')
self._reason = 'extract'
return
# Make it executable
mode = os.stat(inno).st_mode
os.chmod(inno, mode | stat.S_IXUSR)
progress.start_task(0.15, 0.75, 'Extracting FS2: %s')
try:
cmd = [inno, '-L', '-s', '-p', '-e', gog_path]
logging.info('Running %s...', ' '.join(cmd))
opts = dict()
if sys.platform.startswith('win'):
si = subprocess.STARTUPINFO()
si.dwFlags = subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = subprocess.SW_HIDE
opts['startupinfo'] = si
opts['stdin'] = subprocess.PIPE
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=dest_path, **opts)
if sys.platform.startswith('win'):
p.stdin.close()
buf = ''
while p.poll() is None:
while '\r' not in buf:
line = p.stdout.read(10)
if not line:
break
buf += line.decode('utf8', 'replace')
buf = buf.split('\r')
line = buf.pop(0)
buf = '\r'.join(buf)
if 'MiB/s' in line:
try:
if ']' in line:
line = line.split(']')[1]
line = line.strip().split('MiB/s')[0] + 'MiB/s'
percent = float(line.split('%')[0]) / 100
progress.update(percent, line)
except Exception:
logging.exception('Failed to process InnoExtract output!')
else:
if line.strip() == 'not a supported Inno Setup installer':
self.post(-1)
return
logging.info('InnoExtract: %s', line)
except Exception:
logging.exception('InnoExtract failed!')
self._reason = 'innoextract'
return
progress.finish_task()
if not verify_retail_vps(os.path.join(dest_path, 'app')):
self._reason = 'vps'
progress.update(0.95, 'Failed! Cleanup...')
try:
shutil.rmtree(dest_path, ignore_errors=True)
except Exception:
logging.exception('Cleanup failed after missing VPs!')
return
progress.update(0.95, 'Moving files...')
self._makedirs(os.path.join(dest_path, 'data/players'))
self._makedirs(os.path.join(dest_path, 'data/movies'))
for item in glob.glob(os.path.join(dest_path, 'app', '*.vp')):
shutil.move(item, os.path.join(dest_path, os.path.basename(item)))
for item in glob.glob(os.path.join(dest_path, 'app/data/players', '*.hcf')):
shutil.move(item, os.path.join(dest_path, 'data/players', os.path.basename(item)))
for item in glob.glob(os.path.join(dest_path, 'app/data2', '*.mve')):
shutil.move(item, os.path.join(dest_path, 'data/movies', os.path.basename(item)))
for item in glob.glob(os.path.join(dest_path, 'app/data3', '*.mve')):
shutil.move(item, os.path.join(dest_path, 'data/movies', os.path.basename(item)))
progress.update(0.99, 'Cleanup...')
os.unlink(inno)
shutil.rmtree(os.path.join(dest_path, 'app'), ignore_errors=True)
shutil.rmtree(os.path.join(dest_path, 'tmp'), ignore_errors=True)
self.post(dest_path)
except Exception:
logging.exception('Unknown exception during GOG unpacking!')
def _makedirs(self, path):
if not os.path.isdir(path):
os.makedirs(path)
def finish(self):
results = self.get_results()
if len(results) < 1:
if self._reason == 'dest_path':
msg = 'Failed to create the destination directory!'
elif self._reason == 'download':
msg = 'Failed to download InnoExtract! Make sure your AV or firewall isn\'t blocking Knossos!'
elif self._reason == 'extract':
msg = 'Failed to extract InnoExtract! Make sure your AV or firewall isn\'t blocking Knossos!'
elif self._reason == 'innoextract':
msg = 'Failed to extract the installer with InnoExtract! Make sure you selected the correct installer ' \
+ 'and your AV isn\'t interfering!'
elif self._reason == 'vps':
msg = 'The installer does not contain the retail files! Please make sure you selected the correct installer!'
else:
msg = 'Unpacking the GOG installer failed for unkown reasons! Make sure Knossos can write to the ' + \
'selected data path or contact ngld for more information.'
QtWidgets.QMessageBox.critical(None, translate('tasks', 'Error'), msg)
elif results[0] == -1:
QtWidgets.QMessageBox.critical(None, translate('tasks', 'Error'), self.tr(
'The selected file wasn\'t a proper Inno Setup installer. Are you sure you selected the right file?'))
else:
center.main_win.update_mod_list()
center.main_win.browser_ctrl.bridge.retailInstalled.emit()
if not center.installed.has('FSO'):
try:
fso = center.mods.query('FSO')
run_task(InstallTask(fso.resolve_deps()))
except repo.ModNotFound:
logging.warning('Installing retail files but FSO is missing!')
return
path = os.path.join(self._dest_path, 'mod.json')
if os.path.isfile(path):
os.unlink(path)
if center.installed.has('FS2'):
center.installed.del_mod(center.installed.query('FS2'))
class GOGCopyTask(progress.Task):
can_abort = False
_reason = None
def __init__(self, gog_path, dest_path):
super(GOGCopyTask, self).__init__()
self.done.connect(self.finish)
self.add_work([(gog_path, dest_path)])
self.title = 'Copying retail files...'
self._dest_path = dest_path
self._makedirs(dest_path)
create_retail_mod(self._dest_path)
self.mods = [center.installed.query('FS2')]
self._slot_prog = {
'total': ('Status', 0, 'Waiting...')
}
def work(self, paths):
gog_path, dest_path = paths
self._local.slot = 'total'
try:
if not verify_retail_vps(gog_path):
self._reason = 'vps'
return
progress.update(0, 'Creating directories...')
self._makedirs(os.path.join(dest_path, 'data/players'))
self._makedirs(os.path.join(dest_path, 'data/movies'))
progress.update(1 / 4., 'Copying VPs...')
for item in glob.glob(os.path.join(gog_path, '*.vp')):
shutil.copyfile(item, os.path.join(dest_path, os.path.basename(item)))
progress.update(2 / 4., 'Copying player profiles...')
for item in glob.glob(os.path.join(gog_path, 'data/players', '*.hcf')):
shutil.copyfile(item, os.path.join(dest_path, 'data/players', os.path.basename(item)))
progress.update(3 / 4., 'Copying cutscenes...')
for ext in ('mve', 'ogg'):
for sub in ('data', 'data2', 'data3'):
for item in glob.glob(os.path.join(gog_path, sub, '*.' + ext)):
shutil.copyfile(item, os.path.join(dest_path, 'data/movies', os.path.basename(item)))
progress.update(1, 'Done')
self._reason = 'done'
except Exception:
logging.exception('Unknown exception during copying of retail files!')
def _makedirs(self, path):
if not os.path.isdir(path):
os.makedirs(path)
def finish(self):
if self._reason == 'done':
center.main_win.update_mod_list()
center.main_win.browser_ctrl.bridge.retailInstalled.emit()
if not center.installed.has('FSO'):
try:
fso = center.mods.query('FSO')
run_task(InstallTask(fso.resolve_deps()))
except repo.ModNotFound:
logging.warning('Installing retail files but FSO is missing!')
return
elif self._reason == 'vps':
msg = 'The selected directory does not contain the required retail VPs.'
else:
msg = 'Copying the retail files failed. Please make sure Knossos can write to the data path.'
path = os.path.join(self._dest_path, 'mod.json')
if os.path.isfile(path):
os.unlink(path)
if center.installed.has('FS2'):
center.installed.del_mod(center.installed.query('FS2'))
QtWidgets.QMessageBox.critical(None, 'Error', msg)
class CheckUpdateTask(progress.Task):
background = True
def __init__(self):
super(CheckUpdateTask, self).__init__()
self.add_work(('',))
self.title = 'Checking for updates...'
def work(self, item):
progress.update(0, 'Checking for updates...')
update_base = util.pjoin(center.UPDATE_LINK, 'stable')
version = util.get(update_base + '/version?arch=' + platform.machine())
if version is None:
logging.error('Update check failed!')
return
try:
version = semantic_version.Version(version)
except Exception:
logging.exception('Failed to parse remote version!')
return
cur_version = semantic_version.Version(center.VERSION)
if version > cur_version:
center.signals.update_avail.emit(version)
class WindowsUpdateTask(progress.Task):
def __init__(self):
super(WindowsUpdateTask, self).__init__()
self.done.connect(self.finish)
self.add_work(('',))
self.title = 'Installing update...'
def work(self, item):
# Download it.
update_base = util.pjoin(center.UPDATE_LINK, 'stable')
dir_name = tempfile.mkdtemp()
updater = os.path.join(dir_name, 'knossos_updater.exe')
progress.start_task(0, 0.98, 'Downloading update...')
with open(updater, 'wb') as stream:
util.download(update_base + '/updater.exe', stream)
progress.finish_task()
progress.update(0.99, 'Launching updater...')
try:
import win32api
win32api.ShellExecute(0, 'open', updater, '/D=' + os.getcwd(), os.path.dirname(updater), 1)
except Exception:
logging.exception('Failed to launch updater!')
self.post(False)
else:
self.post(True)
center.app.quit()
def finish(self):
res = self.get_results()
if len(res) < 1 or not res[0]:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('Failed to launch the update!'))
class MacUpdateTask(progress.Task):
def __init__(self):
super(MacUpdateTask, self).__init__()
self.done.connect(self.finish)
self.add_work(('',))
self.title = 'Installing update...'
def work(self, item):
update_base = util.pjoin(center.UPDATE_LINK, 'stable')
updater = os.path.expandvars('$HOME/Downloads/Knossos.dmg')
progress.start_task(0, 0.98, 'Downloading update...')
with open(updater, 'wb') as stream:
util.download(update_base + '/Knossos.dmg', stream)
progress.finish_task()
progress.update(0.99, 'Opening update...')
try:
subprocess.call(['open', updater])
except Exception:
logging.exception('Failed to launch updater!')
self.post(False)
else:
self.post(True)
center.app.quit()
def finish(self):
res = self.get_results()
if len(res) < 1 or not res[0]:
QtWidgets.QMessageBox.critical(None, 'Knossos', self.tr('Failed to launch the update!'))
class CopyFolderTask(progress.Task):
def __init__(self, src_path, dest_path):
super(CopyFolderTask, self).__init__()
self.add_work(((src_path, dest_path),))
self.title = 'Copying folder...'
def work(self, p):
src_path, dest_path = p
if not os.path.isdir(src_path):
logging.error('CopyFolderTask(): The src_path "%s" is not a folder!' % src_path)
return
progress.update(0, 'Scanning...')
dest_base = os.path.dirname(dest_path)
if not os.path.isdir(dest_base):
os.makedirs(dest_base)
plan = []
total_size = 0.0
for src_prefix, dirs, files in os.walk(src_path):
dest_prefix = os.path.join(dest_path, os.path.relpath(src_prefix, src_path))
for sub in dirs:
sdest = os.path.join(dest_prefix, sub)
try:
os.mkdir(sdest)
except OSError:
logging.exception('Failed to mkdir %s.' % sdest)
for sub in files:
sdest = os.path.join(dest_prefix, sub)
ssrc = os.path.join(src_prefix, sub)
plan.append((ssrc, sdest))
total_size += os.stat(ssrc).st_size
bytes_done = 0
for src, dest in plan:
progress.update(bytes_done / total_size, os.path.relpath(src, src_path))
# Don't overwrite anything
if not os.path.isfile(dest):
util.safe_copy(src, dest)
bytes_done += os.stat(src).st_size
class VpExtractionTask(progress.Task):
def __init__(self, installed_mod, ini_mod):
super(VpExtractionTask, self).__init__()
self.mod = installed_mod
self.ini_mod = ini_mod
self.title = 'Extracting VP files...'
self._threads = 1 # VP extraction does not benefit from multiple threads
for vp_file in os.listdir(ini_mod.folder):
# We only look at vp files
if not vp_file.lower().endswith(".vp"):
continue
vp_path = os.path.join(ini_mod.folder, vp_file)
self.add_work((vp_path,))
def work(self, vp_file):
base_filename = os.path.basename(vp_file).replace(".vp", "")
dest_folder = os.path.join(self.mod.folder, base_filename)
progress.start_task(0.0, 1.0, 'Extracting %s')
util.extract_vp_file(vp_file, dest_folder)
progress.finish_task()
# Collect the extracted vp files so we can use that once extraction has finished
self.post(vp_file)
class ApplyEngineFlagsTask(progress.Task):
# Limit to one thread because access to flags.lch causes
# conflicts otherwise.
# TODO: Will implement a better solution later
_threads = 1
def __init__(self, mods, flags, custom_flags):
super(ApplyEngineFlagsTask, self).__init__()
self.custom_flags = custom_flags
self.flags = flags
self.title = 'Applying engine flags...'
self.done.connect(self.finish)
self.add_work(mods)
def work(self, mod):
key = '%s#%s' % (mod.mid, mod.version)
flag_info = None
try:
exes = mod.get_executables()
for exe in exes:
if not exe['label']:
flag_info = settings.get_fso_flags(exe['file'])
break
except Exception:
logging.exception('Failed to retrieve flags for %s!' % mod)
return
finally:
progress.update(1, '')
if not flag_info:
logging.warn('Failed to retrieve flags for %s!' % mod)
return
self.post((key, flag_info))
def finish(self):
res = self.get_results()
for key, flag_info in res:
known_flags = set()
for section in flag_info['flags'].values():
for flag in section:
known_flags.add(flag['name'])
build_flags = center.settings['fso_flags'].setdefault(key, {})
for flag, val in self.flags.items():
if flag in known_flags:
build_flags[flag] = val
build_flags['#custom'] = self.custom_flags
center.save_settings()
QtWidgets.QMessageBox.information(None, 'Knossos', 'The settings were successfully applied to all builds.')
class FixUserBuildSelectionTask(progress.Task):
# Since we're doing next to no I/O, using multiple threads is pointless here.
_threads = 1
def __init__(self):
super(FixUserBuildSelectionTask, self).__init__()
self.title = 'Fixing build selections...'
self.done.connect(self.finish)
self._engine_cache = {}
self.add_work(center.installed.get_list())
def work(self, mod):
if mod.mtype not in ('mod', 'tc') or not mod.user_exe:
# Nothing to do here
return
spec = None
engine_id = None
for pkg in mod.packages:
for dep in pkg.dependencies:
is_engine = self._engine_cache.get(dep['id'])
if is_engine is None:
try:
engine = center.installed.query(dep['id'])
except repo.ModNotFound:
logging.warn('Build %s not found!' % dep['id'])
is_engine = False
else:
is_engine = engine.mtype == 'engine'
self._engine_cache[dep['id']] = is_engine
if is_engine:
if dep['version']:
spec = util.Spec(dep['version'])
else:
spec = util.Spec('*')
engine_id = dep['id']
break
if spec:
break
if not spec:
# No build requirement found.
logging.warn('Engine dependency not found for %s!' % mod)
return
if mod.user_exe[0] != engine_id or not spec.match(semantic_version.Version(mod.user_exe[1])):
logging.debug('Removed user build from %s.' % mod)
mod.user_exe = None
mod.save_user()
def finish(self):
QtWidgets.QMessageBox.information(None, 'Knossos', 'Done.')
class FixImagesTask(progress.Task):
_failed = 0
_fixed = 0
def __init__(self, do_devs=False):
super(FixImagesTask, self).__init__()
self._do_devs = do_devs
self.title = 'Fixing mod images...'
self.done.connect(self.finish)
self.add_work(center.installed.get_list())
def work(self, mod):
if mod.dev_mode != self._do_devs:
# Only process dev mods if _do_devs is True (and in that case also ignore all normal mods)
return
if mod.mid == 'FS2':
# Just look for missing images
missing = False
for prop in ('tile', 'banner'):
path = getattr(mod, prop)
if not path or not os.path.isfile(path):
missing = True
break
if not missing:
for path in mod.screenshots:
if not os.path.isfile(path):
missing = True
break
if missing:
# If anything's missing, just write everything again.
create_retail_mod(mod.folder)
return
try:
rmod = center.mods.query(mod)
except repo.ModNotFound:
rmod = repo.InstalledMod()
changed = False
done = 0
count = 0
for prop in ('logo', 'tile', 'banner'):
if getattr(mod, prop):
count += 1
for prop in ('screenshots', 'attachments'):
count += len(getattr(mod, prop))
# Make sure the images are in the mod folder.
for prop in ('logo', 'tile', 'banner'):
img_path = getattr(mod, prop)
r_path = getattr(rmod, prop)
# Only process available images
if img_path or r_path:
# Remove the image if it's just an empty file
if img_path and os.path.isfile(img_path) and os.stat(img_path).st_size == 0:
os.unlink(img_path)
# Fix the reference if the file is missing or not recorded in the local metadata.
if not img_path or not os.path.isfile(img_path):
changed = True
if r_path and '://' in r_path:
# Download the image
ext = os.path.splitext(r_path)[1]
dest = os.path.join(mod.folder, 'kn_' + prop + ext)
progress.start_task(done / count, 1 / count, '%s')
if util.safe_download(r_path, dest):
setattr(mod, prop, dest)
self._fixed += 1
else:
# Download failed
setattr(mod, prop, None)
self._failed += 1
progress.finish_task()
else:
# Local image is missing and we can't download it. Just remove the reference.
setattr(mod, prop, None)
done += 1
for prop in ('screenshots', 'attachments'):
im_paths = getattr(mod, prop)
r_paths = getattr(rmod, prop)
if len(im_paths) != len(r_paths):
# Somehow the lists don't match. Start from scratch
im_paths = [None] * len(r_paths)
for i, path in enumerate(im_paths):
# Remove the image if it's just an empty file
if path and os.path.isfile(path) and os.stat(path).st_size == 0:
os.unlink(path)
# Fix the reference if the file is missing or not recorded in the local metadata.
if not path or not os.path.isfile(path):
changed = True
if '://' in r_paths[i]:
# Download the image
ext = os.path.splitext(r_paths[i])[1]
dest = os.path.join(mod.folder, 'kn_' + prop + '_' + str(i) + ext)
progress.start_task(done / count, 1 / count, '%s')
if util.safe_download(r_paths[i], dest):
im_paths[i] = dest
self._fixed += 1
else:
# Download failed
im_paths[i] = None
self._failed += 1
progress.finish_task()
else:
im_paths[i] = None
done += 1
if changed:
mod.save()
def finish(self):
QtWidgets.QMessageBox.information(None, 'Knossos',
'Done. %d images fixed, %d images failed' % (self._fixed, self._failed))
def run_task(task, cb=None):
def wrapper():
cb(task.get_results())
if cb is not None:
task.done.connect(wrapper)
center.signals.task_launched.emit(task)
center.pmaster.add_task(task)
return task
def create_retail_mod(dest_path):
# Remember to run tools/common/update_file_list.py if you add new files!
files = {
'tile': ':/html/images/retail_data/mod-retail.png',
'banner': ':/html/images/retail_data/banner-retail.png',
}
screenshots = [':/html/images/retail_data/screen01.jpg', ':/html/images/retail_data/screen02.jpg', ':/html/images/retail_data/screen03.jpg', ':/html/images/retail_data/screen04.jpg', ':/html/images/retail_data/screen05.jpg', ':/html/images/retail_data/screen06.jpg', ':/html/images/retail_data/screen07.jpg', ':/html/images/retail_data/screen08.jpg', ':/html/images/retail_data/screen09.jpg', ':/html/images/retail_data/screen10.jpg', ':/html/images/retail_data/screen11.jpg', ':/html/images/retail_data/screen12.jpg']
mod = repo.InstalledMod({
'title': 'Retail FS2',
'id': 'FS2',
'version': '1.20',
'type': 'tc',
'description':
'[b][i]The year is 2367, thirty two years after the Great War. Or at least that is what YOU thought was the Great War. ' +
'The endless line of Shivan capital ships, bombers and fighters with super advanced technology was nearly overwhelming.\n\n' +
'As the Terran and Vasudan races finish rebuilding their decimated societies, a disturbance lurks in the not-so-far ' +
'reaches of the Gamma Draconis system.\n\nYour nemeses have arrived... and they are wondering what happened to ' +
'their scouting party.[/i][/b]\n\n[hr]FreeSpace 2 is a 1999 space combat simulation computer game developed by Volition as ' +
'the sequel to Descent: FreeSpace – The Great War. It was completed ahead of schedule in less than a year, and ' +
'released to very positive reviews.\n\nThe game continues on the story from Descent: FreeSpace, once again ' +
'thrusting the player into the role of a pilot fighting against the mysterious aliens, the Shivans. While defending ' +
'the human race and its alien Vasudan allies, the player also gets involved in putting down a rebellion. The game ' +
'features large numbers of fighters alongside gigantic capital ships in a battlefield fraught with beams, shells and ' +
'missiles in detailed star systems and nebulae.',
'release_thread': 'http://www.hard-light.net/forums/index.php',
'videos': ['https://www.youtube.com/watch?v=ufViyhrXzTE'],
'first_release': '1999-09-30',
'last_update': '1999-12-03',
'folder': dest_path
})
mod.add_pkg(repo.InstalledPackage({
'name': 'Content',
'status': 'required',
'folder': '.',
'dependencies': [{
'id': 'FSO',
'version': '>=3.8.0-1'
}]
}))
for prop, path in files.items():
ext = os.path.splitext(path)[1]
im_path = os.path.join(dest_path, 'kn_' + prop + ext)
with open(im_path, 'wb') as stream:
stream.write(read_file(path, decode=False))
setattr(mod, prop, im_path)
for i, path in enumerate(screenshots):
ext = os.path.splitext(path)[1]
im_path = os.path.join(dest_path, 'kn_screen_' + str(i) + ext)
with open(im_path, 'wb') as stream:
stream.write(read_file(path, decode=False))
mod.screenshots.append(im_path)
center.installed.add_mod(mod)
mod.save()
return mod
def verify_retail_vps(path):
retail_vps = [
'root_fs2.vp', 'smarty_fs2.vp', 'sparky_fs2.vp', 'sparky_hi_fs2.vp', 'stu_fs2.vp',
'tango1_fs2.vp', 'tango2_fs2.vp', 'tango3_fs2.vp', 'warble_fs2.vp'
]
try:
# Make sure we ignore casing
filenames = [item.lower() for item in os.listdir(path)]
except FileNotFoundError:
return False
for name in retail_vps:
if name not in filenames:
return False
return True
| 83,107 | 3,870 | 773 |
86754a63d7a6db43ceb6f68fae04ee73681aaf77 | 15,325 | py | Python | sdk/python/pulumi_azure/authorization/assignment.py | suresh198526/pulumi-azure | bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/authorization/assignment.py | suresh198526/pulumi-azure | bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/authorization/assignment.py | suresh198526/pulumi-azure | bf27206a38d7a5c58b3c2c57ec8769fe3d0fc5d7 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['Assignment']
| 57.182836 | 472 | 0.691289 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['Assignment']
class Assignment(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
principal_id: Optional[pulumi.Input[str]] = None,
role_definition_id: Optional[pulumi.Input[str]] = None,
role_definition_name: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[str]] = None,
skip_service_principal_aad_check: Optional[pulumi.Input[bool]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Assigns a given Principal (User or Group) to a given Role.
## Example Usage
### Using A Built-In Role)
```python
import pulumi
import pulumi_azure as azure
primary = azure.core.get_subscription()
example_client_config = azure.core.get_client_config()
example_assignment = azure.authorization.Assignment("exampleAssignment",
scope=primary.id,
role_definition_name="Reader",
principal_id=example_client_config.object_id)
```
### Custom Role & Service Principal)
```python
import pulumi
import pulumi_azure as azure
primary = azure.core.get_subscription()
example_client_config = azure.core.get_client_config()
example_role_definition = azure.authorization.RoleDefinition("exampleRoleDefinition",
role_definition_id="00000000-0000-0000-0000-000000000000",
scope=primary.id,
permissions=[azure.authorization.RoleDefinitionPermissionArgs(
actions=["Microsoft.Resources/subscriptions/resourceGroups/read"],
not_actions=[],
)],
assignable_scopes=[primary.id])
example_assignment = azure.authorization.Assignment("exampleAssignment",
name="00000000-0000-0000-0000-000000000000",
scope=primary.id,
role_definition_id=example_role_definition.role_definition_resource_id,
principal_id=example_client_config.object_id)
```
### Custom Role & User)
```python
import pulumi
import pulumi_azure as azure
primary = azure.core.get_subscription()
example_client_config = azure.core.get_client_config()
example_role_definition = azure.authorization.RoleDefinition("exampleRoleDefinition",
role_definition_id="00000000-0000-0000-0000-000000000000",
scope=primary.id,
permissions=[azure.authorization.RoleDefinitionPermissionArgs(
actions=["Microsoft.Resources/subscriptions/resourceGroups/read"],
not_actions=[],
)],
assignable_scopes=[primary.id])
example_assignment = azure.authorization.Assignment("exampleAssignment",
name="00000000-0000-0000-0000-000000000000",
scope=primary.id,
role_definition_id=example_role_definition.role_definition_resource_id,
principal_id=example_client_config.object_id)
```
### Custom Role & Management Group)
```python
import pulumi
import pulumi_azure as azure
primary = azure.core.get_subscription()
example_client_config = azure.core.get_client_config()
example_group = azure.management.get_group()
example_role_definition = azure.authorization.RoleDefinition("exampleRoleDefinition",
role_definition_id="00000000-0000-0000-0000-000000000000",
scope=primary.id,
permissions=[azure.authorization.RoleDefinitionPermissionArgs(
actions=["Microsoft.Resources/subscriptions/resourceGroups/read"],
not_actions=[],
)],
assignable_scopes=[primary.id])
example_assignment = azure.authorization.Assignment("exampleAssignment",
name="00000000-0000-0000-0000-000000000000",
scope=data["azurerm_management_group"]["primary"]["id"],
role_definition_id=example_role_definition.role_definition_resource_id,
principal_id=example_client_config.object_id)
```
## Import
Role Assignments can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:authorization/assignment:Assignment example /subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Authorization/roleAssignments/00000000-0000-0000-0000-000000000000
```
- for scope `Subscription`, the id format is `/subscriptions/00000000-0000-0000-0000-000000000000/providers/Microsoft.Authorization/roleAssignments/00000000-0000-0000-0000-000000000000` - for scope `Resource Group`, the id format is `/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Authorization/roleAssignments/00000000-0000-0000-0000-000000000000`
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: A unique UUID/GUID for this Role Assignment - one will be generated if not specified. Changing this forces a new resource to be created.
:param pulumi.Input[str] principal_id: The ID of the Principal (User, Group or Service Principal) to assign the Role Definition to. Changing this forces a new resource to be created.
:param pulumi.Input[str] role_definition_id: The Scoped-ID of the Role Definition. Changing this forces a new resource to be created. Conflicts with `role_definition_name`.
:param pulumi.Input[str] role_definition_name: The name of a built-in Role. Changing this forces a new resource to be created. Conflicts with `role_definition_id`.
:param pulumi.Input[str] scope: The scope at which the Role Assignment applies to, such as `/subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333`, `/subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333/resourceGroups/myGroup`, or `/subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333/resourceGroups/myGroup/providers/Microsoft.Compute/virtualMachines/myVM`, or `/providers/Microsoft.Management/managementGroups/myMG`. Changing this forces a new resource to be created.
:param pulumi.Input[bool] skip_service_principal_aad_check: If the `principal_id` is a newly provisioned `Service Principal` set this value to `true` to skip the `Azure Active Directory` check which may fail due to replication lag. This argument is only valid if the `principal_id` is a `Service Principal` identity. If it is not a `Service Principal` identity it will cause the role assignment to fail. Defaults to `false`.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['name'] = name
if principal_id is None:
raise TypeError("Missing required property 'principal_id'")
__props__['principal_id'] = principal_id
__props__['role_definition_id'] = role_definition_id
__props__['role_definition_name'] = role_definition_name
if scope is None:
raise TypeError("Missing required property 'scope'")
__props__['scope'] = scope
__props__['skip_service_principal_aad_check'] = skip_service_principal_aad_check
__props__['principal_type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure:role/assignment:Assignment")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Assignment, __self__).__init__(
'azure:authorization/assignment:Assignment',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
principal_id: Optional[pulumi.Input[str]] = None,
principal_type: Optional[pulumi.Input[str]] = None,
role_definition_id: Optional[pulumi.Input[str]] = None,
role_definition_name: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[str]] = None,
skip_service_principal_aad_check: Optional[pulumi.Input[bool]] = None) -> 'Assignment':
"""
Get an existing Assignment resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: A unique UUID/GUID for this Role Assignment - one will be generated if not specified. Changing this forces a new resource to be created.
:param pulumi.Input[str] principal_id: The ID of the Principal (User, Group or Service Principal) to assign the Role Definition to. Changing this forces a new resource to be created.
:param pulumi.Input[str] principal_type: The type of the `principal_id`, e.g. User, Group, Service Principal, Application, etc.
:param pulumi.Input[str] role_definition_id: The Scoped-ID of the Role Definition. Changing this forces a new resource to be created. Conflicts with `role_definition_name`.
:param pulumi.Input[str] role_definition_name: The name of a built-in Role. Changing this forces a new resource to be created. Conflicts with `role_definition_id`.
:param pulumi.Input[str] scope: The scope at which the Role Assignment applies to, such as `/subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333`, `/subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333/resourceGroups/myGroup`, or `/subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333/resourceGroups/myGroup/providers/Microsoft.Compute/virtualMachines/myVM`, or `/providers/Microsoft.Management/managementGroups/myMG`. Changing this forces a new resource to be created.
:param pulumi.Input[bool] skip_service_principal_aad_check: If the `principal_id` is a newly provisioned `Service Principal` set this value to `true` to skip the `Azure Active Directory` check which may fail due to replication lag. This argument is only valid if the `principal_id` is a `Service Principal` identity. If it is not a `Service Principal` identity it will cause the role assignment to fail. Defaults to `false`.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["name"] = name
__props__["principal_id"] = principal_id
__props__["principal_type"] = principal_type
__props__["role_definition_id"] = role_definition_id
__props__["role_definition_name"] = role_definition_name
__props__["scope"] = scope
__props__["skip_service_principal_aad_check"] = skip_service_principal_aad_check
return Assignment(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A unique UUID/GUID for this Role Assignment - one will be generated if not specified. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> pulumi.Output[str]:
"""
The ID of the Principal (User, Group or Service Principal) to assign the Role Definition to. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="principalType")
def principal_type(self) -> pulumi.Output[str]:
"""
The type of the `principal_id`, e.g. User, Group, Service Principal, Application, etc.
"""
return pulumi.get(self, "principal_type")
@property
@pulumi.getter(name="roleDefinitionId")
def role_definition_id(self) -> pulumi.Output[str]:
"""
The Scoped-ID of the Role Definition. Changing this forces a new resource to be created. Conflicts with `role_definition_name`.
"""
return pulumi.get(self, "role_definition_id")
@property
@pulumi.getter(name="roleDefinitionName")
def role_definition_name(self) -> pulumi.Output[str]:
"""
The name of a built-in Role. Changing this forces a new resource to be created. Conflicts with `role_definition_id`.
"""
return pulumi.get(self, "role_definition_name")
@property
@pulumi.getter
def scope(self) -> pulumi.Output[str]:
"""
The scope at which the Role Assignment applies to, such as `/subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333`, `/subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333/resourceGroups/myGroup`, or `/subscriptions/0b1f6471-1bf0-4dda-aec3-111122223333/resourceGroups/myGroup/providers/Microsoft.Compute/virtualMachines/myVM`, or `/providers/Microsoft.Management/managementGroups/myMG`. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="skipServicePrincipalAadCheck")
def skip_service_principal_aad_check(self) -> pulumi.Output[bool]:
"""
If the `principal_id` is a newly provisioned `Service Principal` set this value to `true` to skip the `Azure Active Directory` check which may fail due to replication lag. This argument is only valid if the `principal_id` is a `Service Principal` identity. If it is not a `Service Principal` identity it will cause the role assignment to fail. Defaults to `false`.
"""
return pulumi.get(self, "skip_service_principal_aad_check")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 175 | 14,768 | 23 |
6dda82884b7d5a59497992a12048cc672dc09402 | 686 | py | Python | solution/divide_and_conquer/1802/main.py | gkgg123/baekjoon | 4ff8a1238a5809e4958258b5f2eeab7b22105ce9 | [
"MIT"
] | 2,236 | 2019-08-05T00:36:59.000Z | 2022-03-31T16:03:53.000Z | solution/divide_and_conquer/1802/main.py | juy4556/baekjoon | bc0b0a0ebaa45a5bbd32751f84c458a9cfdd9f92 | [
"MIT"
] | 225 | 2020-12-17T10:20:45.000Z | 2022-01-05T17:44:16.000Z | solution/divide_and_conquer/1802/main.py | juy4556/baekjoon | bc0b0a0ebaa45a5bbd32751f84c458a9cfdd9f92 | [
"MIT"
] | 602 | 2019-08-05T00:46:25.000Z | 2022-03-31T13:38:23.000Z | # // Authored by : chj3748
# // Co-authored by : -
# // Link : http://boj.kr/28603d67d3014c79af724768c75865af
import sys
for T in range(int(input())):
status = list(map(int, input()))
if origami(0, len(status) - 1):
answer = 'YES'
else:
answer = 'NO'
print(answer) | 23.655172 | 64 | 0.555394 | # // Authored by : chj3748
# // Co-authored by : -
# // Link : http://boj.kr/28603d67d3014c79af724768c75865af
import sys
def input():
return sys.stdin.readline().rstrip()
def origami(start, end):
if start == end:
return True
mid = (start + end) // 2
sign = True
for i in range(start,mid):
if status[i] == status[end-i]:
sign = False
break
if sign:
return origami(start, mid - 1) and origami(mid + 1, end)
else:
return False
for T in range(int(input())):
status = list(map(int, input()))
if origami(0, len(status) - 1):
answer = 'YES'
else:
answer = 'NO'
print(answer) | 343 | 0 | 46 |
124e5695acfe7d7fea2ad7e70e5484e2eac62392 | 1,164 | py | Python | SNIP.py | MiskovicMilica/Data-science-portfolio | 93041b066c550e083897a5e83601d32b1dd962e7 | [
"MIT"
] | null | null | null | SNIP.py | MiskovicMilica/Data-science-portfolio | 93041b066c550e083897a5e83601d32b1dd962e7 | [
"MIT"
] | null | null | null | SNIP.py | MiskovicMilica/Data-science-portfolio | 93041b066c550e083897a5e83601d32b1dd962e7 | [
"MIT"
] | 1 | 2022-03-08T08:48:44.000Z | 2022-03-08T08:48:44.000Z | import PIL.Image as pilimg
import os
import tkinter.messagebox as msg
# Using Pillow and EasyTkinter
# making a new folder in the working directory
current_directory = os.getcwd()
folder_name = os.path.join(current_directory, r'cropped_images')
if not os.path.exists(folder_name):
os.makedirs(folder_name)
else:
msg.showinfo("ERROR", "The folder 'cropped_images' already exists. Delete it and start this script again.")
assert ()
# cropping every image
for file in os.listdir(current_directory):
file_name = 'cropped_' + file
print(file_name)
if file.endswith('.jpg') or file.endswith('.JPG') or file.endswith('.PNG') or file.endswith('.png') or file.endswith('.jpeg') or file.endswith('.JPEG'):
# img = Image.open(file).convert('RGB')
img = pilimg.open(file).convert('RGB')
w, h = img.size
img_crop = img.crop((7, 170, w-10, h-35))
# making a new folder with cropped files
current_directory = os.path.join(folder_name, file_name)
img_crop.save(current_directory)
else:
msg.showinfo("ERROR", "The %s file is not image file." % file_name)
| 37.548387 | 157 | 0.668385 | import PIL.Image as pilimg
import os
import tkinter.messagebox as msg
# Using Pillow and EasyTkinter
# making a new folder in the working directory
current_directory = os.getcwd()
folder_name = os.path.join(current_directory, r'cropped_images')
if not os.path.exists(folder_name):
os.makedirs(folder_name)
else:
msg.showinfo("ERROR", "The folder 'cropped_images' already exists. Delete it and start this script again.")
assert ()
# cropping every image
for file in os.listdir(current_directory):
file_name = 'cropped_' + file
print(file_name)
if file.endswith('.jpg') or file.endswith('.JPG') or file.endswith('.PNG') or file.endswith('.png') or file.endswith('.jpeg') or file.endswith('.JPEG'):
# img = Image.open(file).convert('RGB')
img = pilimg.open(file).convert('RGB')
w, h = img.size
img_crop = img.crop((7, 170, w-10, h-35))
# making a new folder with cropped files
current_directory = os.path.join(folder_name, file_name)
img_crop.save(current_directory)
else:
msg.showinfo("ERROR", "The %s file is not image file." % file_name)
| 0 | 0 | 0 |
844c9a64c8bdc9f971454558ca10b67d18785a86 | 109 | py | Python | tests/test_sum.py | AxelPhi/pyhon-skeleton-project | 0b7415153c2cc98fed00b238129329001f20f8b5 | [
"MIT"
] | null | null | null | tests/test_sum.py | AxelPhi/pyhon-skeleton-project | 0b7415153c2cc98fed00b238129329001f20f8b5 | [
"MIT"
] | null | null | null | tests/test_sum.py | AxelPhi/pyhon-skeleton-project | 0b7415153c2cc98fed00b238129329001f20f8b5 | [
"MIT"
] | null | null | null | from skeleton import sum
| 13.625 | 27 | 0.623853 | from skeleton import sum
def test_sum():
expected = 9
test = sum(4, 5)
assert expected == test
| 60 | 0 | 23 |
edba35679948474a3d96ba9902a53391a6b2105b | 1,180 | py | Python | example.py | bieniu/nettigo | dbbcb086290157469d196887a7950623b047f550 | [
"Apache-2.0"
] | null | null | null | example.py | bieniu/nettigo | dbbcb086290157469d196887a7950623b047f550 | [
"Apache-2.0"
] | 2 | 2022-03-21T07:21:02.000Z | 2022-03-21T07:21:09.000Z | example.py | bieniu/nettigo | dbbcb086290157469d196887a7950623b047f550 | [
"Apache-2.0"
] | null | null | null | """An example of using Nettigo Air Monitor package."""
import asyncio
import logging
import async_timeout
from aiohttp import ClientConnectorError, ClientError, ClientSession
from nettigo_air_monitor import (
ApiError,
AuthFailed,
ConnectionOptions,
InvalidSensorData,
NettigoAirMonitor,
)
logging.basicConfig(level=logging.DEBUG)
async def main():
"""Main."""
websession = ClientSession()
options = ConnectionOptions(host="nam", username="user", password="password")
try:
nam = await NettigoAirMonitor.create(websession, options)
async with async_timeout.timeout(30):
data = await nam.async_update()
mac = await nam.async_get_mac_address()
except (
ApiError,
AuthFailed,
ClientConnectorError,
ClientError,
InvalidSensorData,
asyncio.exceptions.TimeoutError,
) as error:
print(f"Error: {error}")
else:
print(f"Firmware: {nam.software_version}")
print(f"MAC address: {mac}")
print(f"Data: {data}")
await websession.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
| 24.081633 | 81 | 0.668644 | """An example of using Nettigo Air Monitor package."""
import asyncio
import logging
import async_timeout
from aiohttp import ClientConnectorError, ClientError, ClientSession
from nettigo_air_monitor import (
ApiError,
AuthFailed,
ConnectionOptions,
InvalidSensorData,
NettigoAirMonitor,
)
logging.basicConfig(level=logging.DEBUG)
async def main():
"""Main."""
websession = ClientSession()
options = ConnectionOptions(host="nam", username="user", password="password")
try:
nam = await NettigoAirMonitor.create(websession, options)
async with async_timeout.timeout(30):
data = await nam.async_update()
mac = await nam.async_get_mac_address()
except (
ApiError,
AuthFailed,
ClientConnectorError,
ClientError,
InvalidSensorData,
asyncio.exceptions.TimeoutError,
) as error:
print(f"Error: {error}")
else:
print(f"Firmware: {nam.software_version}")
print(f"MAC address: {mac}")
print(f"Data: {data}")
await websession.close()
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
| 0 | 0 | 0 |
4db26a2c6ba50db6cdd2a506721f80ec77a66d3c | 775 | py | Python | 14B-088/Continuum/imaging.py | e-koch/VLA_Lband | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | [
"MIT"
] | 1 | 2021-03-08T23:19:12.000Z | 2021-03-08T23:19:12.000Z | 14B-088/Continuum/imaging.py | e-koch/VLA_Lband | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | [
"MIT"
] | null | null | null | 14B-088/Continuum/imaging.py | e-koch/VLA_Lband | 8fca7b2de0b88ce5c5011b34bf3936c69338d0b0 | [
"MIT"
] | null | null | null |
'''
Imaging tests for the 14B-088 continuum (I) data.
'''
import os
from tasks import tclean
vis = "14B-088_continuum_I.ms"
output_path = "imaging_nosub"
if not os.path.exists(output_path):
os.mkdir(output_path)
tclean(vis=vis,
datacolumn='data',
imagename=os.path.join(output_path, 'M33_14B-088_continuum.dirty'),
field='M33*',
spw="1",
imsize=[2560, 2560],
cell='3arcsec',
specmode='mfs',
startmodel=None,
gridder='mosaic',
weighting='natural',
niter=10000,
threshold='0.1mJy/beam',
phasecenter='J2000 01h33m50.904 +30d39m35.79',
pblimit=-1,
usemask='pb',
pbmask=0.2,
deconvolver='hogbom',
pbcor=False,
interactive=True
)
| 20.394737 | 74 | 0.59871 |
'''
Imaging tests for the 14B-088 continuum (I) data.
'''
import os
from tasks import tclean
vis = "14B-088_continuum_I.ms"
output_path = "imaging_nosub"
if not os.path.exists(output_path):
os.mkdir(output_path)
tclean(vis=vis,
datacolumn='data',
imagename=os.path.join(output_path, 'M33_14B-088_continuum.dirty'),
field='M33*',
spw="1",
imsize=[2560, 2560],
cell='3arcsec',
specmode='mfs',
startmodel=None,
gridder='mosaic',
weighting='natural',
niter=10000,
threshold='0.1mJy/beam',
phasecenter='J2000 01h33m50.904 +30d39m35.79',
pblimit=-1,
usemask='pb',
pbmask=0.2,
deconvolver='hogbom',
pbcor=False,
interactive=True
)
| 0 | 0 | 0 |
26a0adc919b98a736e69161ed05d46dca7bcccdc | 136 | py | Python | src/pdupes/__init__.py | n8henrie/pdupes | a03fedb5cf125560ba0c549a95d2179ef843e3a6 | [
"MIT"
] | null | null | null | src/pdupes/__init__.py | n8henrie/pdupes | a03fedb5cf125560ba0c549a95d2179ef843e3a6 | [
"MIT"
] | null | null | null | src/pdupes/__init__.py | n8henrie/pdupes | a03fedb5cf125560ba0c549a95d2179ef843e3a6 | [
"MIT"
] | null | null | null | __version__ = 'v0.2.0'
__author__ = 'Nathan Henrie'
__email__ = 'nate@n8henrie.com'
from pdupes.duplicatefinder import DuplicateFinder
| 22.666667 | 50 | 0.786765 | __version__ = 'v0.2.0'
__author__ = 'Nathan Henrie'
__email__ = 'nate@n8henrie.com'
from pdupes.duplicatefinder import DuplicateFinder
| 0 | 0 | 0 |
d0633de5b9d298d6c06e133da4c0e0461ea9fc73 | 821 | py | Python | backend/phonebook/employees/migrations/0004_auto_20160226_1739.py | unmade/phonebook | 121b2e5bb2eb217f5e183aa0c39a6d12f227d5e3 | [
"MIT"
] | null | null | null | backend/phonebook/employees/migrations/0004_auto_20160226_1739.py | unmade/phonebook | 121b2e5bb2eb217f5e183aa0c39a6d12f227d5e3 | [
"MIT"
] | null | null | null | backend/phonebook/employees/migrations/0004_auto_20160226_1739.py | unmade/phonebook | 121b2e5bb2eb217f5e183aa0c39a6d12f227d5e3 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-26 17:39
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
| 30.407407 | 194 | 0.651644 | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-02-26 17:39
from __future__ import unicode_literals
import django.db.models.deletion
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('employees', '0003_auto_20160102_1338'),
]
operations = [
migrations.AddField(
model_name='employee',
name='is_retired',
field=models.BooleanField(default=False, verbose_name='Числится уволеным'),
),
migrations.AlterField(
model_name='employee',
name='boss',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='secretary', to='employees.Employee', verbose_name='Является секретарем у'),
),
]
| 0 | 647 | 23 |
6168ca2a87dd3f851f217b56cdfdb372a2954cbb | 1,438 | py | Python | baseline.py | chenchongthu/cnn-text | c3d872a10d9ba647c8048d6d42b9396374a6f181 | [
"Apache-2.0"
] | 4 | 2018-09-06T02:54:54.000Z | 2020-10-23T03:45:20.000Z | baseline.py | chenchongthu/cnn-text | c3d872a10d9ba647c8048d6d42b9396374a6f181 | [
"Apache-2.0"
] | null | null | null | baseline.py | chenchongthu/cnn-text | c3d872a10d9ba647c8048d6d42b9396374a6f181 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
import tensorflow as tf
import data_helpers
from sklearn.feature_extraction.text import TfidfVectorizer
from xgboost import XGBClassifier
from sklearn import metrics
# Parameters
from sklearn.cross_validation import train_test_split
# ==================================================
# Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the positive data.")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
# Data Preparatopn
# ==================================================
# Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
X_train_raw, X_test_raw, y_train, y_test = train_test_split(x_text,
y)
# Build vocabulary
yy=[]
for i in y:
if i[0]==0:
yy.append(1)
if i[0]==1:
yy.append(0)
X_train_raw, X_test_raw, y_train, y_test = train_test_split(x_text,
yy)
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train_raw)
X_test = vectorizer.transform(X_test_raw)
xgbc=XGBClassifier()
xgbc.fit(X_train,y_train)
pres=xgbc.predict(X_test)
print metrics.accuracy_score(y_test, pres)
| 31.26087 | 124 | 0.721836 | #! /usr/bin/env python
import tensorflow as tf
import data_helpers
from sklearn.feature_extraction.text import TfidfVectorizer
from xgboost import XGBClassifier
from sklearn import metrics
# Parameters
from sklearn.cross_validation import train_test_split
# ==================================================
# Data loading params
tf.flags.DEFINE_float("dev_sample_percentage", .1, "Percentage of the training data to use for validation")
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the positive data.")
FLAGS = tf.flags.FLAGS
FLAGS._parse_flags()
# Data Preparatopn
# ==================================================
# Load data
print("Loading data...")
x_text, y = data_helpers.load_data_and_labels(FLAGS.positive_data_file, FLAGS.negative_data_file)
X_train_raw, X_test_raw, y_train, y_test = train_test_split(x_text,
y)
# Build vocabulary
yy=[]
for i in y:
if i[0]==0:
yy.append(1)
if i[0]==1:
yy.append(0)
X_train_raw, X_test_raw, y_train, y_test = train_test_split(x_text,
yy)
vectorizer = TfidfVectorizer()
X_train = vectorizer.fit_transform(X_train_raw)
X_test = vectorizer.transform(X_test_raw)
xgbc=XGBClassifier()
xgbc.fit(X_train,y_train)
pres=xgbc.predict(X_test)
print metrics.accuracy_score(y_test, pres)
| 0 | 0 | 0 |
0ce3ce04a7a6c7616409bd7c068261877bb29eda | 1,900 | py | Python | runtrack/controllers/auth.py | horeilly1101/runtrack | a02f4d449102c73d95b1348ac069f790f7281747 | [
"MIT"
] | 2 | 2018-10-08T01:51:39.000Z | 2019-03-15T20:15:46.000Z | runtrack/controllers/auth.py | horeilly1101/runtrack | a02f4d449102c73d95b1348ac069f790f7281747 | [
"MIT"
] | 7 | 2018-08-29T20:48:43.000Z | 2019-07-08T06:40:15.000Z | runtrack/controllers/auth.py | horeilly1101/runtrack | a02f4d449102c73d95b1348ac069f790f7281747 | [
"MIT"
] | null | null | null | """Contains controllers that deal with user accounts"""
from flask_login import logout_user
from runtrack.views.forms import LoginForm
from runtrack.models import db
from flask import render_template, url_for, flash, redirect, Blueprint
from flask_login import login_user, current_user
from runtrack.views.forms import RegistrationForm
from runtrack.models.tables import User
# blue print to handle authentication
auth = Blueprint("accounts", __name__)
@auth.route('/logout')
def logout():
"""route for the logout page. Logs a user out of their account."""
logout_user()
return redirect(url_for('login'))
@auth.route("/login", methods=["GET", "POST"])
def login():
"""route for the login page"""
if current_user.is_authenticated:
return redirect(url_for('accounts.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid email or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
return redirect(url_for('accounts.index'))
return render_template("auth/login.html", form=form)
@auth.route("/register", methods=["GET", "POST"])
def register():
"""route for the register page"""
if current_user.is_authenticated:
return redirect(url_for('accounts.index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, name=form.name.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
login_user(user, remember=form.remember_me.data)
flash("Welcome to runtrack!")
return redirect(url_for('accounts.index'))
return render_template("auth/register.html", form=form)
| 31.666667 | 71 | 0.699474 | """Contains controllers that deal with user accounts"""
from flask_login import logout_user
from runtrack.views.forms import LoginForm
from runtrack.models import db
from flask import render_template, url_for, flash, redirect, Blueprint
from flask_login import login_user, current_user
from runtrack.views.forms import RegistrationForm
from runtrack.models.tables import User
# blue print to handle authentication
auth = Blueprint("accounts", __name__)
@auth.route('/logout')
def logout():
"""route for the logout page. Logs a user out of their account."""
logout_user()
return redirect(url_for('login'))
@auth.route("/login", methods=["GET", "POST"])
def login():
"""route for the login page"""
if current_user.is_authenticated:
return redirect(url_for('accounts.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid email or password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
return redirect(url_for('accounts.index'))
return render_template("auth/login.html", form=form)
@auth.route("/register", methods=["GET", "POST"])
def register():
"""route for the register page"""
if current_user.is_authenticated:
return redirect(url_for('accounts.index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, name=form.name.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
login_user(user, remember=form.remember_me.data)
flash("Welcome to runtrack!")
return redirect(url_for('accounts.index'))
return render_template("auth/register.html", form=form)
| 0 | 0 | 0 |
d2432de3708aa10905adf6b22394d7649afcbe5e | 6,134 | py | Python | django_eel/__init__.py | seLain/Eel | ed46436040724315ae5e2d67b4bd867cef68620d | [
"MIT"
] | 21 | 2018-07-16T03:59:11.000Z | 2021-08-02T07:23:02.000Z | django_eel/__init__.py | seLain/Eel | ed46436040724315ae5e2d67b4bd867cef68620d | [
"MIT"
] | 3 | 2018-07-16T04:06:41.000Z | 2021-07-18T23:29:47.000Z | django_eel/__init__.py | seLain/Eel | ed46436040724315ae5e2d67b4bd867cef68620d | [
"MIT"
] | 7 | 2018-07-16T03:59:25.000Z | 2021-08-25T07:11:47.000Z | from django.http import HttpResponse
import sys, os
import re as rgx
import random as rnd
import pkg_resources as pkg
import json as jsn
import gevent as gvt
import django_eel.browsers as brw
_js_root_dir = os.sep.join(['django_eel', 'static', 'eel', 'js'])
_eel_js_file = pkg.resource_filename(pkg.Requirement.parse('django-eel'), 'django_eel/static/eel/js/eel.js')
#_eel_js = open(os.sep.join([_js_root_dir, _eel_js_file]), encoding='utf-8').read()
_eel_js = open(_eel_js_file, encoding='utf-8').read()
root_path = ''
_websockets = []
_exposed_functions = {}
_js_functions = []
_call_number = 0
_start_geometry = {}
_mock_queue = []
_mock_queue_done = set()
_on_close_callback = None
_call_return_values = {}
_call_return_callbacks = {}
_default_options = {
'mode': 'chrome-app',
'host': 'localhost',
'port': 8000,
'chromeFlags': []
}
# Public functions
# start localhost browsing
# Routes : eel/urls.py
# intercepts request of `eel.js`,
# replaces /** _py_functions **/ and /** _start_geometry **/
# Private functions | 32.62766 | 108 | 0.620965 | from django.http import HttpResponse
import sys, os
import re as rgx
import random as rnd
import pkg_resources as pkg
import json as jsn
import gevent as gvt
import django_eel.browsers as brw
_js_root_dir = os.sep.join(['django_eel', 'static', 'eel', 'js'])
_eel_js_file = pkg.resource_filename(pkg.Requirement.parse('django-eel'), 'django_eel/static/eel/js/eel.js')
#_eel_js = open(os.sep.join([_js_root_dir, _eel_js_file]), encoding='utf-8').read()
_eel_js = open(_eel_js_file, encoding='utf-8').read()
root_path = ''
_websockets = []
_exposed_functions = {}
_js_functions = []
_call_number = 0
_start_geometry = {}
_mock_queue = []
_mock_queue_done = set()
_on_close_callback = None
_call_return_values = {}
_call_return_callbacks = {}
_default_options = {
'mode': 'chrome-app',
'host': 'localhost',
'port': 8000,
'chromeFlags': []
}
# Public functions
def expose(name_or_function=None):
# Deal with '@eel.expose()' - treat as '@eel.expose'
if name_or_function is None:
return expose
if isinstance(name_or_function, str): # Called as '@eel.expose("my_name")'
name = name_or_function
def decorator(function):
_expose(name, function)
return function
return decorator
else:
function = name_or_function
_expose(function.__name__, function)
return function
def init(path):
global root_path, _js_functions
root_path = _get_real_path(path)
js_functions = set()
for root, _, files in os.walk(root_path):
for name in files:
allowed_extensions = '.js .html .txt .htm .xhtml'.split()
if not any(name.endswith(ext) for ext in allowed_extensions):
continue
try:
with open(os.path.join(root, name), encoding='utf-8') as file:
contents = file.read()
expose_calls = set()
finder = rgx.findall(r'eel\.expose\((.*)\)', contents)
for expose_call in finder:
expose_call = expose_call.strip()
msg = "eel.expose() call contains '(' or '='"
if rgx.findall(r'[\(=]', expose_call) != []:
raise AssertionError(msg)
expose_calls.add(expose_call)
js_functions.update(expose_calls)
except UnicodeDecodeError:
pass # Malformed file probably
_js_functions = list(js_functions)
for js_function in _js_functions:
_mock_js_function(js_function)
# start localhost browsing
def start(*start_urls, **kwargs):
global _on_close_callback
options = kwargs.pop('options', {})
size = kwargs.pop('size', None)
position = kwargs.pop('position', None)
geometry = kwargs.pop('geometry', {})
_on_close_callback = kwargs.pop('callback', None)
for k, v in list(_default_options.items()):
if k not in options:
options[k] = v
_start_geometry['default'] = {'size': size, 'position': position}
_start_geometry['pages'] = geometry
if options['port'] == 0:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
options['port'] = sock.getsockname()[1]
sock.close()
brw.open(start_urls, options)
def sleep(seconds):
gvt.sleep(seconds)
def spawn(function, *args, **kwargs):
gvt.spawn(function, *args, **kwargs)
# Routes : eel/urls.py
# intercepts request of `eel.js`,
# replaces /** _py_functions **/ and /** _start_geometry **/
def _eel(request):
funcs = list(_exposed_functions.keys())
page = _eel_js.replace('/** _py_functions **/',
'_py_functions: %s,' % funcs)
page = page.replace('/** _start_geometry **/',
'_start_geometry: %s,' % jsn.dumps(_start_geometry))
response = HttpResponse(content=page)
response['Content-Type'] = 'application/javascript'
return response
# Private functions
def _expose(name, function):
msg = 'Already exposed function with name "%s"' % name
if name in _exposed_functions:
raise AssertionError(msg)
_exposed_functions[name] = function
def _get_real_path(path):
if getattr(sys, 'frozen', False):
return os.path.join(sys._MEIPASS, path)
else:
return os.path.abspath(path)
def _mock_js_function(f):
exec('%s = lambda *args: _mock_call("%s", args)' % (f, f), globals())
def _mock_call(name, args):
call_object = _call_object(name, args)
global _mock_queue
_mock_queue += [call_object]
return _call_return(call_object)
def _call_object(name, args):
global _call_number
_call_number += 1
call_id = _call_number + rnd.random()
return {'call': call_id, 'name': name, 'args': args}
def _call_return(call):
call_id = call['call']
def return_func(callback=None):
if callback is not None:
_call_return_callbacks[call_id] = callback
else:
for _ in range(10000):
if call_id in _call_return_values:
return _call_return_values.pop(call_id)
sleep(0.001)
return return_func
def _import_js_function(f):
exec('%s = lambda *args: _js_call("%s", args)' % (f, f), globals())
def _process_message(message, ws):
if 'call' in message:
return_val = _exposed_functions[message['name']](*message['args'])
ws._repeated_send(jsn.dumps({ 'return': message['call'],
'value': return_val }))
elif 'return' in message:
call_id = message['return']
if call_id in _call_return_callbacks:
callback = _call_return_callbacks.pop(call_id)
callback(message['value'])
else:
_call_return_values[call_id] = message['value']
else:
print('Invalid message received: ', message)
def _js_call(name, args):
call_object = _call_object(name, args)
for _, ws in _websockets:
ws._repeated_send(jsn.dumps(call_object))
return _call_return(call_object) | 4,751 | 0 | 341 |
589ab6a47b4a4acf5663dc0ec40ac653648b9bb0 | 562 | py | Python | errors.py | Xideron/LootNanny | cfbf5127c25c5af63fa2414615ff848cfe436b0f | [
"MIT"
] | 9 | 2021-11-23T04:11:00.000Z | 2022-03-28T17:36:27.000Z | errors.py | Xideron/LootNanny | cfbf5127c25c5af63fa2414615ff848cfe436b0f | [
"MIT"
] | 2 | 2021-12-09T13:53:29.000Z | 2021-12-11T03:38:49.000Z | errors.py | Xideron/LootNanny | cfbf5127c25c5af63fa2414615ff848cfe436b0f | [
"MIT"
] | 3 | 2022-01-04T00:03:28.000Z | 2022-02-26T05:08:00.000Z | import traceback
from helpers import resource_path
import sys
import time
| 26.761905 | 69 | 0.649466 | import traceback
from helpers import resource_path
import sys
import time
def log_crash(e: Exception):
error_filepath = resource_path(f"crash_report_{time.time()}.log")
tb = traceback.format_tb(sys.exc_info()[2])
with open(error_filepath, 'w') as f:
f.write("\n".join(tb) + "\n")
f.write(str(e))
def log_error(e: Exception):
error_filepath = resource_path(f"crash_logs.log")
tb = traceback.format_tb(sys.exc_info()[2])
with open(error_filepath, 'a') as f:
f.write("\n".join(tb) + "\n")
f.write(str(e))
| 440 | 0 | 46 |
34b1fd795d401264b6e36fa0cd4e08325149fb8c | 3,365 | py | Python | SchemDraw/custom_elements.py | AdriaanRol/SchemDraw | 74e60217c82c2942c4ca95aa64dc440928f61189 | [
"MIT"
] | 3 | 2019-01-24T14:49:32.000Z | 2021-03-29T11:28:55.000Z | SchemDraw/custom_elements.py | AdriaanRol/SchemDraw | 74e60217c82c2942c4ca95aa64dc440928f61189 | [
"MIT"
] | 1 | 2020-09-09T14:36:29.000Z | 2020-09-09T15:02:53.000Z | SchemDraw/custom_elements.py | AdriaanRol/SchemDraw | 74e60217c82c2942c4ca95aa64dc440928f61189 | [
"MIT"
] | 1 | 2019-06-07T14:12:16.000Z | 2019-06-07T14:12:16.000Z | """
This file contains custom elements defined by Adriaan Rol
The intention is that these get merged into SchemDraw.elements after cleaning
up so as to merge them into the master of CDelker
"""
import numpy as np
import SchemDraw.elements as e
LOW_PASS = {
'name': 'LOW_PASS',
'base': e.RBOX,
'paths': [[[0.15, 0.05],
[0.6, 0.05],
[0.8, -.15]]]
}
# Single port amplifier
AMP = {'name': 'AMP',
'paths': [[[0, 0],
[np.nan, np.nan],
[0.7, 0]]],
'anchors': {'center': [2, 0]},
'shapes': [{'shape': 'poly', 'xy': np.array([[0., 0.5],
[0.7, 0.],
[0., -0.5]]), 'fill': False}]}
dircoup_w = 2
dircoup_h = .5
h_offset = 0.01
dx = .07
dy = .07
# Directional coupler
DIR_COUP = {
'name': 'DIR_COUP',
'paths': [[[0, h_offset], [0, dircoup_h], [dircoup_w, dircoup_h], [dircoup_w, -dircoup_h],
[0, -dircoup_h], [0, h_offset], [dircoup_w, h_offset]
]],
'shapes': [{'shape': 'arc',
'center': [dircoup_w*.9, -dircoup_h],
'theta1':90, 'theta2':180,
'width':1, 'height':1, # 'angle':0,
},
{'shape': 'arc',
'center': [dircoup_w*.1, -dircoup_h],
'theta1':0, 'theta2':90,
'width':1, 'height':1, # 'angle':0,
},
{'shape': 'poly',
'xy': [[dircoup_w*.333-dx, -dircoup_h-dy],
[dircoup_w*.333+dx, -dircoup_h-dy],
[dircoup_w*.333+dx, -dircoup_h+dy],
[dircoup_w*.333-dx, -dircoup_h+dy]],
'fill': True,
'fillcolor':'black'
},
{'shape': 'poly',
'xy': [[dircoup_w*.666-dx, -dircoup_h-dy],
[dircoup_w*.666+dx, -dircoup_h-dy],
[dircoup_w*.666+dx, -dircoup_h+dy],
[dircoup_w*.666-dx, -dircoup_h+dy]],
'fill': True,
'fillcolor':'black'
},
{'shape': 'poly',
'xy': [[0-dx, h_offset-dy], [0+dx, h_offset-dy],
[0+dx, h_offset+dy], [0-dx, h_offset+dy]],
'fill': True,
'fillcolor':'black'
},
{'shape': 'poly',
'xy': [[dircoup_w-dx, h_offset-dy],
[dircoup_w+dx, h_offset-dy],
[dircoup_w+dx, h_offset+dy],
[dircoup_w-dx, h_offset+dy]],
'fill': True,
'fillcolor':'black'
},
]
}
IQMIXER = {
'name': 'IQMIXER',
'base': e.SOURCE,
'paths': [[[-.35+dx, -.35], [.35+dx, .35],
[np.nan, np.nan],
[.35+dx, -.35], [-.35+dx, .35],
[np.nan, np.nan],
[0.5, -1], [0.5, -.50],
[np.nan, np.nan],
[0.5, .5], [0.5, 1],
]]
}
h=.65
CIRCULATOR = {
'name' : 'CIRCULATOR',
'base' : e.SOURCE,
'shapes':[{'shape':'arc', 'center':[.5,0],
'width':h, 'height':h, 'theta1':130, 'theta2':320, 'arrow':'ccw'}],# 'arrow':'cw'}
}
| 31.448598 | 95 | 0.398217 | """
This file contains custom elements defined by Adriaan Rol
The intention is that these get merged into SchemDraw.elements after cleaning
up so as to merge them into the master of CDelker
"""
import numpy as np
import SchemDraw.elements as e
LOW_PASS = {
'name': 'LOW_PASS',
'base': e.RBOX,
'paths': [[[0.15, 0.05],
[0.6, 0.05],
[0.8, -.15]]]
}
# Single port amplifier
AMP = {'name': 'AMP',
'paths': [[[0, 0],
[np.nan, np.nan],
[0.7, 0]]],
'anchors': {'center': [2, 0]},
'shapes': [{'shape': 'poly', 'xy': np.array([[0., 0.5],
[0.7, 0.],
[0., -0.5]]), 'fill': False}]}
dircoup_w = 2
dircoup_h = .5
h_offset = 0.01
dx = .07
dy = .07
# Directional coupler
DIR_COUP = {
'name': 'DIR_COUP',
'paths': [[[0, h_offset], [0, dircoup_h], [dircoup_w, dircoup_h], [dircoup_w, -dircoup_h],
[0, -dircoup_h], [0, h_offset], [dircoup_w, h_offset]
]],
'shapes': [{'shape': 'arc',
'center': [dircoup_w*.9, -dircoup_h],
'theta1':90, 'theta2':180,
'width':1, 'height':1, # 'angle':0,
},
{'shape': 'arc',
'center': [dircoup_w*.1, -dircoup_h],
'theta1':0, 'theta2':90,
'width':1, 'height':1, # 'angle':0,
},
{'shape': 'poly',
'xy': [[dircoup_w*.333-dx, -dircoup_h-dy],
[dircoup_w*.333+dx, -dircoup_h-dy],
[dircoup_w*.333+dx, -dircoup_h+dy],
[dircoup_w*.333-dx, -dircoup_h+dy]],
'fill': True,
'fillcolor':'black'
},
{'shape': 'poly',
'xy': [[dircoup_w*.666-dx, -dircoup_h-dy],
[dircoup_w*.666+dx, -dircoup_h-dy],
[dircoup_w*.666+dx, -dircoup_h+dy],
[dircoup_w*.666-dx, -dircoup_h+dy]],
'fill': True,
'fillcolor':'black'
},
{'shape': 'poly',
'xy': [[0-dx, h_offset-dy], [0+dx, h_offset-dy],
[0+dx, h_offset+dy], [0-dx, h_offset+dy]],
'fill': True,
'fillcolor':'black'
},
{'shape': 'poly',
'xy': [[dircoup_w-dx, h_offset-dy],
[dircoup_w+dx, h_offset-dy],
[dircoup_w+dx, h_offset+dy],
[dircoup_w-dx, h_offset+dy]],
'fill': True,
'fillcolor':'black'
},
]
}
IQMIXER = {
'name': 'IQMIXER',
'base': e.SOURCE,
'paths': [[[-.35+dx, -.35], [.35+dx, .35],
[np.nan, np.nan],
[.35+dx, -.35], [-.35+dx, .35],
[np.nan, np.nan],
[0.5, -1], [0.5, -.50],
[np.nan, np.nan],
[0.5, .5], [0.5, 1],
]]
}
h=.65
CIRCULATOR = {
'name' : 'CIRCULATOR',
'base' : e.SOURCE,
'shapes':[{'shape':'arc', 'center':[.5,0],
'width':h, 'height':h, 'theta1':130, 'theta2':320, 'arrow':'ccw'}],# 'arrow':'cw'}
}
| 0 | 0 | 0 |
51f49ad05b6b5bdc28b4a0f013723a485979a205 | 15,726 | py | Python | python/util/conll_scorer/conll/reader.py | UKPLab/cdcr-beyond-corpus-tailored | 52bf98692c7464f25628baea24addd1a988f9a1f | [
"Apache-2.0"
] | 10 | 2020-11-28T05:01:04.000Z | 2021-12-21T19:34:00.000Z | python/util/conll_scorer/conll/reader.py | UKPLab/cdcr-beyond-corpus-tailored | 52bf98692c7464f25628baea24addd1a988f9a1f | [
"Apache-2.0"
] | 1 | 2022-03-12T07:20:39.000Z | 2022-03-16T05:11:38.000Z | python/util/conll_scorer/conll/reader.py | UKPLab/cdcr-beyond-corpus-tailored | 52bf98692c7464f25628baea24addd1a988f9a1f | [
"Apache-2.0"
] | 1 | 2021-12-21T19:34:08.000Z | 2021-12-21T19:34:08.000Z | import sys
from python.util.conll_scorer.conll import mention
"""
Extracting gold parse annotation according to the CoNLL format
"""
"""
Extracting automatic parse annotation
"""
| 36.151724 | 147 | 0.56041 | import sys
from python.util.conll_scorer.conll import mention
def get_doc_mentions(doc_name, doc_lines, keep_singletons, print_debug=False, word_column=3):
clusters = {}
open_mentions = {}
to_be_merged = []
singletons_num = 0
for sent_num, sent_line in enumerate(doc_lines):
sent_words=[]
for word_index, line in enumerate(sent_line):
sent_words.append(line.split()[word_column] if len(line.split()) > word_column+1 else "")
single_token_coref, open_corefs, end_corefs = \
extract_coref_annotation(line)
if single_token_coref:
m = mention.Mention(doc_name, sent_num, word_index, word_index, [sent_words[word_index]])
for c in single_token_coref:
if c not in clusters:
clusters[c] = []
clusters[c].append(m)
if len(single_token_coref) > 1 :
to_be_merged.append(single_token_coref)
for c in open_corefs:
if c in open_mentions:
if print_debug:
print('Nested coreferring mentions.\n'+str(line))
open_mentions[c].append([sent_num, word_index])
else:
open_mentions[c]=[[sent_num, word_index]]
for c in end_corefs:
if c not in clusters:
clusters[c] = []
if c not in open_mentions:
print('Problem in the coreference annotation:\n', line)
else:
if open_mentions[c][0][0] != sent_num:
print('A mention span should be in a single sentence:\n', line)
m = mention.Mention(doc_name, sent_num,
open_mentions[c][-1][1], word_index, sent_words[open_mentions[c][-1][1]: word_index+1])
clusters[c].append(m)
if len(open_mentions[c]) == 1:
open_mentions.pop(c)
else:
open_mentions[c].pop()
if not keep_singletons:
singletons=[]
for c in clusters:
if len(clusters[c]) == 1:
singletons.append(c)
singletons_num += len(singletons)
for c in sorted(singletons, reverse=True):
clusters.pop(c)
for l in to_be_merged:
print('Merging ' + str(l) + ' clusters')
merged = []
first = l[0]
for c in l:
merged.extend(clusters[c])
clusters.pop(c)
clusters[first] = merged
return [clusters[c] for c in clusters], singletons_num
def mask_unseen_mentions(clusters, seen_mentions, keep_singletons):
unseens = {}
for i, cluster in enumerate(clusters):
for m in cluster:
if m not in seen_mentions:
if i not in unseens:
unseens[i] = set()
unseens[i].add(m)
remove_clusters = set()
for i in unseens:
clusters[i] = [m for m in clusters[i] if m not in unseens[i]]
if len(clusters[i]) == 0 or \
(len(clusters[i]) == 1 and not keep_singletons):
remove_clusters.add(i)
return [c for i, c in enumerate(clusters) if i not in remove_clusters]
def extract_coref_annotation(line):
single_token_coref = []
open_corefs = []
ending_corefs = []
last_num = []
coref_opened = False
coref_column = line.split()[-1]
for i, c in enumerate(coref_column):
if c.isdigit():
last_num.append(c)
elif c == '(':
last_num = []
coref_opened = True
elif c == ')':
if coref_opened:
#Coreference annotations that are marked without specifying the chain number
#will be skipped
if len(last_num)>0:
single_token_coref.append(int(''.join(last_num)))
coref_opened = False
last_num = []
else:
if len(last_num) > 0:
ending_corefs.append(int(''.join(last_num)))
last_num = []
elif c == '|':
if coref_opened:
open_corefs.append(int(''.join(last_num)))
coref_opened = False
last_num=[]
elif len(last_num) > 0:
sys.exit("Incorrect coreference annotation: ", coref_column)
if i == len(coref_column)-1:
if coref_opened and len(last_num) > 0:
open_corefs.append(int(''.join(last_num)))
if len(single_token_coref) > 1:
print('A single mention cannot be assigned to more than one cluster\n'
+'The following clusters will be merged: ' + str(single_token_coref))
return single_token_coref, open_corefs, ending_corefs
"""
Extracting gold parse annotation according to the CoNLL format
"""
def extract_annotated_parse(mention_lines, start_index, parse_column=5, word_column=3, POS_column=4):
open_nodes = []
tag_started = False
tag_name=[]
terminal_nodes=[]
root=None
roots = []
for i, line in enumerate(mention_lines):
parse = line.split()[parse_column]
for j, c in enumerate(parse):
if c == '(':
if tag_started:
node = mention.TreeNode(''.join(tag_name), start_index + i, False)
if open_nodes:
if open_nodes[-1].children:
open_nodes[-1].children.append(node)
else:
open_nodes[-1].children = [node]
open_nodes.append(node)
tag_name = []
if terminal_nodes:
# skipping words like commas, quotations and parantheses
if any(c.isalpha() for c in terminal_nodes) or \
any(c.isdigit() for c in terminal_nodes):
node = mention.TreeNode(' '.join(terminal_nodes), start_index + i, True)
if open_nodes:
if open_nodes[-1].children:
open_nodes[-1].children.append(node)
else:
open_nodes[-1].children = [node]
else:
open_nodes.append(node)
terminal_nodes = []
tag_started = True
elif c == '*':
terminal_nodes.append(line.split()[word_column])
if tag_started:
node = mention.TreeNode(''.join(tag_name), start_index + i, False)
if open_nodes:
if open_nodes[-1].children:
open_nodes[-1].children.append(node)
else:
open_nodes[-1].children = [node]
open_nodes.append(node)
tag_name = []
tag_started = False
elif c == ')':
if terminal_nodes:
node = mention.TreeNode(' '.join(terminal_nodes), start_index + i, True)
if open_nodes:
if open_nodes[-1].children:
open_nodes[-1].children.append(node)
else:
open_nodes[-1].children = [node]
else:
open_nodes.append(node)
terminal_nodes = []
if open_nodes:
root = open_nodes.pop()
if not open_nodes:
roots.append(root)
tag_started = False
elif c.isalpha():
tag_name.append(c)
if i == len(mention_lines)-1 and j == len(parse)-1 and terminal_nodes:
node = mention.TreeNode(' '.join(terminal_nodes), start_index + i, True)
if open_nodes:
if open_nodes[-1].children:
open_nodes[-1].children.append(node)
else:
open_nodes[-1].children = [node]
else:
open_nodes.append(node)
terminal_nodes = []
if open_nodes:
root = open_nodes.pop()
roots.append(root)
#If there is parsing errors in which starting phrasea are not ended at the end of detected mention boundaries
while root and open_nodes and root.index != start_index:
node = open_nodes.pop()
if not open_nodes or node.index == start_index:
root = node
if len(roots) > 1:
new_root = mention.TreeNode('NPS', start_index, False)
for node in roots:
new_root.children.append(node)
return new_root
return root
"""
Extracting automatic parse annotation
"""
def extract_automatic_parse(mention_lines, start_index):
return extract_annotated_parse(mention_lines, start_index, -1)
def set_annotated_parse_trees(clusters, key_doc_lines, NP_only, min_span=False, autoparse=False, partial_vp_chain_pruning=True, print_debug=False):
pruned_cluster_indices = set()
pruned_clusters = {}
for i, c in enumerate(clusters):
pruned_cluster = list(c)
for m in c:
if autoparse:
tree = extract_automatic_parse(key_doc_lines[m.sent_num][m.start: m.end+1], m.start)
else:
try:
tree = extract_annotated_parse(key_doc_lines[m.sent_num][m.start: m.end+1], m.start)
except IndexError as err:
print(len(key_doc_lines), m.sent_num)
m.set_gold_parse(tree)
if min_span:
m.set_min_span()
if tree and tree.tag == 'VP' and NP_only:
pruned_cluster.remove(m)
pruned_cluster_indices.add(i)
pruned_clusters[i]=pruned_cluster
if NP_only and pruned_cluster_indices:
for i in sorted(pruned_cluster_indices, reverse=True):
if len(pruned_clusters[i]) > 1 and partial_vp_chain_pruning:
if print_debug:
print('VP partial pruning: ', [str(m) for m in clusters[i]], '->', [str(m) for m in pruned_clusters[i]])
else:
if print_debug:
print('VP full pruning, cluster size: ', len(clusters[i]), ' cluster: ' , [str(m) for m in clusters[i]])
pruned_clusters.pop(i)
return [pruned_clusters[k] for k in pruned_clusters]
def get_doc_lines(file_name):
doc_lines = {}
doc_name = None
with open(file_name) as f:
new_sentence = True
for line in f:
if line.startswith("#begin document"):
doc_name = line[len("#begin document "):]
elif line.startswith("#end document"):
doc_name = None
elif doc_name:
if doc_name not in doc_lines:
doc_lines[doc_name] = []
if (not line.strip() and not new_sentence) or not doc_lines[doc_name]:
doc_lines[doc_name].append([])
if line.strip():
new_sentence = False
doc_lines[doc_name][-1].append(line)
else:
new_sentence = True
return doc_lines
def remove_nested_coref_mentions(clusters, keep_singletons, print_debug=False):
to_be_removed_mentions={}
to_be_removed_clusters=[]
all_removed_mentions = 0
all_removed_clusters = 0
for c_index, c in enumerate(clusters):
to_be_removed_mentions[c_index]=[]
for i, m1 in enumerate(c):
for m2 in c[i+1:]:
nested = m1.are_nested(m2)
#m1 is nested in m2
if nested == 0:
to_be_removed_mentions[c_index].append(m1)
print(m1, m2)
print('=========================')
#m2 is nested in m1
elif nested == 1:
to_be_removed_mentions[c_index].append(m2)
print(m2)
for c_index in to_be_removed_mentions:
all_removed_mentions += len(to_be_removed_mentions[c_index])
if len(clusters[c_index]) != 1 and len(clusters[c_index])-len(to_be_removed_mentions[c_index]) == 1:
all_removed_clusters +=1
if print_debug:
print(clusters[c_index][0])
if not keep_singletons:
to_be_removed_clusters.append(c_index)
else:
clusters[c_index] = [m for m in clusters[c_index] if m not in to_be_removed_mentions[c_index]]
for c_index in sorted(to_be_removed_clusters, reverse=True):
clusters.pop(c_index)
return all_removed_mentions, all_removed_clusters
def get_coref_infos(key_file, sys_file, NP_only, remove_nested, keep_singletons, print_debug=False):
key_doc_lines = get_doc_lines(key_file)
sys_doc_lines = get_doc_lines(sys_file)
doc_coref_infos = {}
key_nested_coref_num = 0
sys_nested_coref_num = 0
key_removed_nested_clusters = 0
sys_removed_nested_clusters = 0
key_singletons_num = 0
sys_singletons_num = 0
for doc in key_doc_lines:
key_clusters, singletons_num = get_doc_mentions(doc, key_doc_lines[doc], keep_singletons)
key_singletons_num += singletons_num
if NP_only:
key_clusters = set_annotated_parse_trees(key_clusters, key_doc_lines[doc], NP_only)
sys_clusters, singletons_num = get_doc_mentions(doc, sys_doc_lines[doc], keep_singletons)
sys_singletons_num += singletons_num
if NP_only:
sys_clusters = set_annotated_parse_trees(sys_clusters, key_doc_lines[doc], NP_only)
if remove_nested:
nested_mentions, removed_clusters = remove_nested_coref_mentions(key_clusters, keep_singletons)
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
nested_mentions, removed_clusters = remove_nested_coref_mentions(sys_clusters, keep_singletons)
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
sys_mention_key_cluster= get_mention_assignments(sys_clusters, key_clusters)
key_mention_sys_cluster = get_mention_assignments(key_clusters, sys_clusters)
doc_coref_infos[doc] = (key_clusters, sys_clusters, \
key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested and print_debug:
print('Number of removed nested coreferring mentions in the key annotation: ' , key_nested_coref_num,
' and system annotation: ', sys_nested_coref_num)
print('Number of resulting singleton clusters in the key annotation: ' , key_removed_nested_clusters,
' and system annotation: ', sys_removed_nested_clusters)
if not keep_singletons and print_debug:
print(key_singletons_num, ' and ', sys_singletons_num, ' singletons are removed from the key and system files, respectively')
return doc_coref_infos
def get_mention_assignments(inp_clusters, out_clusters):
mention_cluster_ids = {}
out_dic = {}
for i, c in enumerate(out_clusters):
for m in c:
out_dic[m] = i
for ic in inp_clusters:
for im in ic:
if im in out_dic:
mention_cluster_ids[im] = out_dic[im]
return mention_cluster_ids
| 15,311 | 0 | 228 |
c6cf93cd1b333728606ad22908679f0a83095b63 | 1,812 | py | Python | sample/basic/basic_account_information_example.py | opendxl/opendxl-domaintools-client-python | 426ec661018b868d9810b9b0bf7b6ef1561999b2 | [
"Apache-2.0"
] | 2 | 2018-03-01T14:55:17.000Z | 2019-06-06T07:03:48.000Z | sample/basic/basic_account_information_example.py | opendxl/opendxl-domaintools-client-python | 426ec661018b868d9810b9b0bf7b6ef1561999b2 | [
"Apache-2.0"
] | 1 | 2018-03-27T20:19:48.000Z | 2018-03-27T20:19:48.000Z | sample/basic/basic_account_information_example.py | opendxl/opendxl-domaintools-client-python | 426ec661018b868d9810b9b0bf7b6ef1561999b2 | [
"Apache-2.0"
] | 2 | 2017-10-18T17:19:58.000Z | 2018-08-13T21:53:24.000Z | from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import sys
from dxlbootstrap.util import MessageUtils
from dxlclient.client import DxlClient
from dxlclient.client_config import DxlClientConfig
root_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(root_dir + "/../..")
sys.path.append(root_dir + "/..")
from dxldomaintoolsclient.client import DomainToolsApiClient
# Import common logging and configuration
from common import *
# Configure local logger
logging.getLogger().setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
# Create DXL configuration from file
config = DxlClientConfig.create_dxl_config_from_file(CONFIG_FILE)
# Create the client
with DxlClient(config) as dxl_client:
# Connect to the fabric
dxl_client.connect()
logger.info("Connected to DXL fabric.")
# Create client wrapper
client = DomainToolsApiClient(dxl_client)
# Invoke 'account_information' method on service, in default (dict) output
# format
resp_dict = client.account_information()
# Print out the response
print("Response in default output format:\n{0}".format(
MessageUtils.dict_to_json(resp_dict, pretty_print=True)))
# Invoke 'account_information' method on service, in 'json' output
resp_json = client.account_information(out_format="json")
# Print out the response
print("Response in json output format:\n{0}".format(
MessageUtils.dict_to_json(MessageUtils.json_to_dict(resp_json),
pretty_print=True)))
# Invoke 'account_information' method on service, in 'xml' output
resp_xml = client.account_information(out_format="xml")
# Print out the response
print("Response in xml output format:\n{}".format(resp_xml))
| 30.711864 | 78 | 0.745585 | from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import sys
from dxlbootstrap.util import MessageUtils
from dxlclient.client import DxlClient
from dxlclient.client_config import DxlClientConfig
root_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(root_dir + "/../..")
sys.path.append(root_dir + "/..")
from dxldomaintoolsclient.client import DomainToolsApiClient
# Import common logging and configuration
from common import *
# Configure local logger
logging.getLogger().setLevel(logging.ERROR)
logger = logging.getLogger(__name__)
# Create DXL configuration from file
config = DxlClientConfig.create_dxl_config_from_file(CONFIG_FILE)
# Create the client
with DxlClient(config) as dxl_client:
# Connect to the fabric
dxl_client.connect()
logger.info("Connected to DXL fabric.")
# Create client wrapper
client = DomainToolsApiClient(dxl_client)
# Invoke 'account_information' method on service, in default (dict) output
# format
resp_dict = client.account_information()
# Print out the response
print("Response in default output format:\n{0}".format(
MessageUtils.dict_to_json(resp_dict, pretty_print=True)))
# Invoke 'account_information' method on service, in 'json' output
resp_json = client.account_information(out_format="json")
# Print out the response
print("Response in json output format:\n{0}".format(
MessageUtils.dict_to_json(MessageUtils.json_to_dict(resp_json),
pretty_print=True)))
# Invoke 'account_information' method on service, in 'xml' output
resp_xml = client.account_information(out_format="xml")
# Print out the response
print("Response in xml output format:\n{}".format(resp_xml))
| 0 | 0 | 0 |
c612552b3a25a4da68f5fab7652805cd1944d813 | 100 | py | Python | python/gurobi/notebooks/CuttingCases/Sorrentino.py | ampl/ampls-api | 9c9de155c42b496d1400504685d193829b9454ca | [
"BSD-3-Clause"
] | 2 | 2021-05-28T15:50:32.000Z | 2022-03-23T18:12:01.000Z | python/gurobi/notebooks/CuttingCases/Sorrentino.py | ampl/ampls-api | 9c9de155c42b496d1400504685d193829b9454ca | [
"BSD-3-Clause"
] | 1 | 2021-07-01T17:04:41.000Z | 2021-07-01T17:04:41.000Z | python/xpress/notebooks/CuttingCases/Sorrentino.py | ampl/ampls-api | 9c9de155c42b496d1400504685d193829b9454ca | [
"BSD-3-Clause"
] | null | null | null | roll_width = 64.5
overrun = 3
orders = {
6.77: 10,
7.56: 40,
17.46: 33,
18.76: 10
}
| 11.111111 | 17 | 0.49 | roll_width = 64.5
overrun = 3
orders = {
6.77: 10,
7.56: 40,
17.46: 33,
18.76: 10
}
| 0 | 0 | 0 |
9c6b22885ff4179dd1850ae64b955867ef592b84 | 19,097 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/ns/nslimitidentifier.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/ns/nslimitidentifier.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/ns/nslimitidentifier.py | guardicore/nitro-python | 5346a5086134aead80968f15a41ff527adaa0ec1 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nslimitidentifier(base_resource) :
""" Configuration for limit Indetifier resource. """
@property
def limitidentifier(self) :
r"""Name for a rate limit identifier. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Reserved words must not be used.
"""
try :
return self._limitidentifier
except Exception as e:
raise e
@limitidentifier.setter
def limitidentifier(self, limitidentifier) :
r"""Name for a rate limit identifier. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Reserved words must not be used.
"""
try :
self._limitidentifier = limitidentifier
except Exception as e:
raise e
@property
def threshold(self) :
r"""Maximum number of requests that are allowed in the given timeslice when requests (mode is set as REQUEST_RATE) are tracked per timeslice.
When connections (mode is set as CONNECTION) are tracked, it is the total number of connections that would be let through.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._threshold
except Exception as e:
raise e
@threshold.setter
def threshold(self, threshold) :
r"""Maximum number of requests that are allowed in the given timeslice when requests (mode is set as REQUEST_RATE) are tracked per timeslice.
When connections (mode is set as CONNECTION) are tracked, it is the total number of connections that would be let through.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._threshold = threshold
except Exception as e:
raise e
@property
def timeslice(self) :
r"""Time interval, in milliseconds, specified in multiples of 10, during which requests are tracked to check if they cross the threshold. This argument is needed only when the mode is set to REQUEST_RATE.<br/>Default value: 1000<br/>Minimum length = 10.
"""
try :
return self._timeslice
except Exception as e:
raise e
@timeslice.setter
def timeslice(self, timeslice) :
r"""Time interval, in milliseconds, specified in multiples of 10, during which requests are tracked to check if they cross the threshold. This argument is needed only when the mode is set to REQUEST_RATE.<br/>Default value: 1000<br/>Minimum length = 10
"""
try :
self._timeslice = timeslice
except Exception as e:
raise e
@property
def mode(self) :
r"""Defines the type of traffic to be tracked.
* REQUEST_RATE - Tracks requests/timeslice.
* CONNECTION - Tracks active transactions.
Examples
1. To permit 20 requests in 10 ms and 2 traps in 10 ms:
add limitidentifier limit_req -mode request_rate -limitType smooth -timeslice 1000 -Threshold 2000 -trapsInTimeSlice 200
2. To permit 50 requests in 10 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5000 -limitType smooth
3. To permit 1 request in 40 ms:
set limitidentifier limit_req -mode request_rate -timeslice 2000 -Threshold 50 -limitType smooth
4. To permit 1 request in 200 ms and 1 trap in 130 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5 -limitType smooth -trapsInTimeSlice 8
5. To permit 5000 requests in 1000 ms and 200 traps in 1000 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5000 -limitType BURSTY.<br/>Default value: REQUEST_RATE<br/>Possible values = CONNECTION, REQUEST_RATE, NONE.
"""
try :
return self._mode
except Exception as e:
raise e
@mode.setter
def mode(self, mode) :
r"""Defines the type of traffic to be tracked.
* REQUEST_RATE - Tracks requests/timeslice.
* CONNECTION - Tracks active transactions.
Examples
1. To permit 20 requests in 10 ms and 2 traps in 10 ms:
add limitidentifier limit_req -mode request_rate -limitType smooth -timeslice 1000 -Threshold 2000 -trapsInTimeSlice 200
2. To permit 50 requests in 10 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5000 -limitType smooth
3. To permit 1 request in 40 ms:
set limitidentifier limit_req -mode request_rate -timeslice 2000 -Threshold 50 -limitType smooth
4. To permit 1 request in 200 ms and 1 trap in 130 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5 -limitType smooth -trapsInTimeSlice 8
5. To permit 5000 requests in 1000 ms and 200 traps in 1000 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5000 -limitType BURSTY.<br/>Default value: REQUEST_RATE<br/>Possible values = CONNECTION, REQUEST_RATE, NONE
"""
try :
self._mode = mode
except Exception as e:
raise e
@property
def limittype(self) :
r"""Smooth or bursty request type.
* SMOOTH - When you want the permitted number of requests in a given interval of time to be spread evenly across the timeslice
* BURSTY - When you want the permitted number of requests to exhaust the quota anytime within the timeslice.
This argument is needed only when the mode is set to REQUEST_RATE.<br/>Default value: BURSTY<br/>Possible values = BURSTY, SMOOTH.
"""
try :
return self._limittype
except Exception as e:
raise e
@limittype.setter
def limittype(self, limittype) :
r"""Smooth or bursty request type.
* SMOOTH - When you want the permitted number of requests in a given interval of time to be spread evenly across the timeslice
* BURSTY - When you want the permitted number of requests to exhaust the quota anytime within the timeslice.
This argument is needed only when the mode is set to REQUEST_RATE.<br/>Default value: BURSTY<br/>Possible values = BURSTY, SMOOTH
"""
try :
self._limittype = limittype
except Exception as e:
raise e
@property
def selectorname(self) :
r"""Name of the rate limit selector. If this argument is NULL, rate limiting will be applied on all traffic received by the virtual server or the Citrix ADC (depending on whether the limit identifier is bound to a virtual server or globally) without any filtering.<br/>Minimum length = 1.
"""
try :
return self._selectorname
except Exception as e:
raise e
@selectorname.setter
def selectorname(self, selectorname) :
r"""Name of the rate limit selector. If this argument is NULL, rate limiting will be applied on all traffic received by the virtual server or the Citrix ADC (depending on whether the limit identifier is bound to a virtual server or globally) without any filtering.<br/>Minimum length = 1
"""
try :
self._selectorname = selectorname
except Exception as e:
raise e
@property
def maxbandwidth(self) :
r"""Maximum bandwidth permitted, in kbps.<br/>Maximum length = 4294967287.
"""
try :
return self._maxbandwidth
except Exception as e:
raise e
@maxbandwidth.setter
def maxbandwidth(self, maxbandwidth) :
r"""Maximum bandwidth permitted, in kbps.<br/>Maximum length = 4294967287
"""
try :
self._maxbandwidth = maxbandwidth
except Exception as e:
raise e
@property
def trapsintimeslice(self) :
r"""Number of traps to be sent in the timeslice configured. A value of 0 indicates that traps are disabled.<br/>Maximum length = 65535.
"""
try :
return self._trapsintimeslice
except Exception as e:
raise e
@trapsintimeslice.setter
def trapsintimeslice(self, trapsintimeslice) :
r"""Number of traps to be sent in the timeslice configured. A value of 0 indicates that traps are disabled.<br/>Maximum length = 65535
"""
try :
self._trapsintimeslice = trapsintimeslice
except Exception as e:
raise e
@property
def ngname(self) :
r"""Nodegroup name to which this identifier belongs to.
"""
try :
return self._ngname
except Exception as e:
raise e
@property
def hits(self) :
r"""The number of times this identifier was evaluated.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def drop(self) :
r"""The number of times action was taken.
"""
try :
return self._drop
except Exception as e:
raise e
@property
def rule(self) :
r"""Rule.
"""
try :
return self._rule
except Exception as e:
raise e
@property
def time(self) :
r"""Time interval considered for rate limiting.
"""
try :
return self._time
except Exception as e:
raise e
@property
def total(self) :
r"""Maximum number of requests permitted in the computed timeslice.
"""
try :
return self._total
except Exception as e:
raise e
@property
def trapscomputedintimeslice(self) :
r"""The number of traps that would be sent in the timeslice configured. .
"""
try :
return self._trapscomputedintimeslice
except Exception as e:
raise e
@property
def computedtraptimeslice(self) :
r"""The time interval computed for sending traps.
"""
try :
return self._computedtraptimeslice
except Exception as e:
raise e
@property
def referencecount(self) :
r"""Total number of transactions pointing to this entry.
"""
try :
return self._referencecount
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nslimitidentifier_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nslimitidentifier
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.limitidentifier is not None :
return str(self.limitidentifier)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
r""" Use this function to create a resource with only add operation specific parameters.
"""
addresource = nslimitidentifier()
addresource.limitidentifier = resource.limitidentifier
addresource.threshold = resource.threshold
addresource.timeslice = resource.timeslice
addresource.mode = resource.mode
addresource.limittype = resource.limittype
addresource.selectorname = resource.selectorname
addresource.maxbandwidth = resource.maxbandwidth
addresource.trapsintimeslice = resource.trapsintimeslice
return addresource
@classmethod
def add(cls, client, resource) :
r""" Use this API to add nslimitidentifier.
"""
try :
if type(resource) is not list :
addresource = cls.filter_add_parameters(resource)
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i] = cls.filter_add_parameters(resource[i])
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
r""" Use this function to create a resource with only delete operation specific parameters.
"""
deleteresource = nslimitidentifier()
deleteresource.limitidentifier = resource.limitidentifier
return deleteresource
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete nslimitidentifier.
"""
try :
if type(resource) is not list :
deleteresource = nslimitidentifier()
if type(resource) != type(deleteresource):
deleteresource.limitidentifier = resource
else :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].limitidentifier = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource)
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
r""" Use this function to create a resource with only update operation specific parameters.
"""
updateresource = nslimitidentifier()
updateresource.limitidentifier = resource.limitidentifier
updateresource.threshold = resource.threshold
updateresource.timeslice = resource.timeslice
updateresource.mode = resource.mode
updateresource.limittype = resource.limittype
updateresource.selectorname = resource.selectorname
updateresource.maxbandwidth = resource.maxbandwidth
updateresource.trapsintimeslice = resource.trapsintimeslice
return updateresource
@classmethod
def update(cls, client, resource) :
r""" Use this API to update nslimitidentifier.
"""
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_update_parameters(resource[i])
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of nslimitidentifier resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = nslimitidentifier()
if type(resource) != type(unsetresource):
unsetresource.limitidentifier = resource
else :
unsetresource.limitidentifier = resource.limitidentifier
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].limitidentifier = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].limitidentifier = resource[i].limitidentifier
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the nslimitidentifier resources that are configured on netscaler.
"""
try :
if not name :
obj = nslimitidentifier()
response = obj.get_resources(client, option_)
else :
if type(name) is not list :
if type(name) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name)))
obj = nslimitidentifier()
obj.limitidentifier = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
if type(name[0]) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name[0])))
response = [nslimitidentifier() for _ in range(len(name))]
obj = [nslimitidentifier() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = nslimitidentifier()
obj[i].limitidentifier = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of nslimitidentifier resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nslimitidentifier()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the nslimitidentifier resources configured on NetScaler.
"""
try :
obj = nslimitidentifier()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of nslimitidentifier resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nslimitidentifier()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
| 34.040998 | 291 | 0.726659 | #
# Copyright (c) 2021 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class nslimitidentifier(base_resource) :
""" Configuration for limit Indetifier resource. """
def __init__(self) :
self._limitidentifier = None
self._threshold = None
self._timeslice = None
self._mode = None
self._limittype = None
self._selectorname = None
self._maxbandwidth = None
self._trapsintimeslice = None
self._ngname = None
self._hits = None
self._drop = None
self._rule = None
self._time = None
self._total = None
self._trapscomputedintimeslice = None
self._computedtraptimeslice = None
self._referencecount = None
self.___count = None
@property
def limitidentifier(self) :
r"""Name for a rate limit identifier. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Reserved words must not be used.
"""
try :
return self._limitidentifier
except Exception as e:
raise e
@limitidentifier.setter
def limitidentifier(self, limitidentifier) :
r"""Name for a rate limit identifier. Must begin with an ASCII letter or underscore (_) character, and must consist only of ASCII alphanumeric or underscore characters. Reserved words must not be used.
"""
try :
self._limitidentifier = limitidentifier
except Exception as e:
raise e
@property
def threshold(self) :
r"""Maximum number of requests that are allowed in the given timeslice when requests (mode is set as REQUEST_RATE) are tracked per timeslice.
When connections (mode is set as CONNECTION) are tracked, it is the total number of connections that would be let through.<br/>Default value: 1<br/>Minimum length = 1.
"""
try :
return self._threshold
except Exception as e:
raise e
@threshold.setter
def threshold(self, threshold) :
r"""Maximum number of requests that are allowed in the given timeslice when requests (mode is set as REQUEST_RATE) are tracked per timeslice.
When connections (mode is set as CONNECTION) are tracked, it is the total number of connections that would be let through.<br/>Default value: 1<br/>Minimum length = 1
"""
try :
self._threshold = threshold
except Exception as e:
raise e
@property
def timeslice(self) :
r"""Time interval, in milliseconds, specified in multiples of 10, during which requests are tracked to check if they cross the threshold. This argument is needed only when the mode is set to REQUEST_RATE.<br/>Default value: 1000<br/>Minimum length = 10.
"""
try :
return self._timeslice
except Exception as e:
raise e
@timeslice.setter
def timeslice(self, timeslice) :
r"""Time interval, in milliseconds, specified in multiples of 10, during which requests are tracked to check if they cross the threshold. This argument is needed only when the mode is set to REQUEST_RATE.<br/>Default value: 1000<br/>Minimum length = 10
"""
try :
self._timeslice = timeslice
except Exception as e:
raise e
@property
def mode(self) :
r"""Defines the type of traffic to be tracked.
* REQUEST_RATE - Tracks requests/timeslice.
* CONNECTION - Tracks active transactions.
Examples
1. To permit 20 requests in 10 ms and 2 traps in 10 ms:
add limitidentifier limit_req -mode request_rate -limitType smooth -timeslice 1000 -Threshold 2000 -trapsInTimeSlice 200
2. To permit 50 requests in 10 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5000 -limitType smooth
3. To permit 1 request in 40 ms:
set limitidentifier limit_req -mode request_rate -timeslice 2000 -Threshold 50 -limitType smooth
4. To permit 1 request in 200 ms and 1 trap in 130 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5 -limitType smooth -trapsInTimeSlice 8
5. To permit 5000 requests in 1000 ms and 200 traps in 1000 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5000 -limitType BURSTY.<br/>Default value: REQUEST_RATE<br/>Possible values = CONNECTION, REQUEST_RATE, NONE.
"""
try :
return self._mode
except Exception as e:
raise e
@mode.setter
def mode(self, mode) :
r"""Defines the type of traffic to be tracked.
* REQUEST_RATE - Tracks requests/timeslice.
* CONNECTION - Tracks active transactions.
Examples
1. To permit 20 requests in 10 ms and 2 traps in 10 ms:
add limitidentifier limit_req -mode request_rate -limitType smooth -timeslice 1000 -Threshold 2000 -trapsInTimeSlice 200
2. To permit 50 requests in 10 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5000 -limitType smooth
3. To permit 1 request in 40 ms:
set limitidentifier limit_req -mode request_rate -timeslice 2000 -Threshold 50 -limitType smooth
4. To permit 1 request in 200 ms and 1 trap in 130 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5 -limitType smooth -trapsInTimeSlice 8
5. To permit 5000 requests in 1000 ms and 200 traps in 1000 ms:
set limitidentifier limit_req -mode request_rate -timeslice 1000 -Threshold 5000 -limitType BURSTY.<br/>Default value: REQUEST_RATE<br/>Possible values = CONNECTION, REQUEST_RATE, NONE
"""
try :
self._mode = mode
except Exception as e:
raise e
@property
def limittype(self) :
r"""Smooth or bursty request type.
* SMOOTH - When you want the permitted number of requests in a given interval of time to be spread evenly across the timeslice
* BURSTY - When you want the permitted number of requests to exhaust the quota anytime within the timeslice.
This argument is needed only when the mode is set to REQUEST_RATE.<br/>Default value: BURSTY<br/>Possible values = BURSTY, SMOOTH.
"""
try :
return self._limittype
except Exception as e:
raise e
@limittype.setter
def limittype(self, limittype) :
r"""Smooth or bursty request type.
* SMOOTH - When you want the permitted number of requests in a given interval of time to be spread evenly across the timeslice
* BURSTY - When you want the permitted number of requests to exhaust the quota anytime within the timeslice.
This argument is needed only when the mode is set to REQUEST_RATE.<br/>Default value: BURSTY<br/>Possible values = BURSTY, SMOOTH
"""
try :
self._limittype = limittype
except Exception as e:
raise e
@property
def selectorname(self) :
r"""Name of the rate limit selector. If this argument is NULL, rate limiting will be applied on all traffic received by the virtual server or the Citrix ADC (depending on whether the limit identifier is bound to a virtual server or globally) without any filtering.<br/>Minimum length = 1.
"""
try :
return self._selectorname
except Exception as e:
raise e
@selectorname.setter
def selectorname(self, selectorname) :
r"""Name of the rate limit selector. If this argument is NULL, rate limiting will be applied on all traffic received by the virtual server or the Citrix ADC (depending on whether the limit identifier is bound to a virtual server or globally) without any filtering.<br/>Minimum length = 1
"""
try :
self._selectorname = selectorname
except Exception as e:
raise e
@property
def maxbandwidth(self) :
r"""Maximum bandwidth permitted, in kbps.<br/>Maximum length = 4294967287.
"""
try :
return self._maxbandwidth
except Exception as e:
raise e
@maxbandwidth.setter
def maxbandwidth(self, maxbandwidth) :
r"""Maximum bandwidth permitted, in kbps.<br/>Maximum length = 4294967287
"""
try :
self._maxbandwidth = maxbandwidth
except Exception as e:
raise e
@property
def trapsintimeslice(self) :
r"""Number of traps to be sent in the timeslice configured. A value of 0 indicates that traps are disabled.<br/>Maximum length = 65535.
"""
try :
return self._trapsintimeslice
except Exception as e:
raise e
@trapsintimeslice.setter
def trapsintimeslice(self, trapsintimeslice) :
r"""Number of traps to be sent in the timeslice configured. A value of 0 indicates that traps are disabled.<br/>Maximum length = 65535
"""
try :
self._trapsintimeslice = trapsintimeslice
except Exception as e:
raise e
@property
def ngname(self) :
r"""Nodegroup name to which this identifier belongs to.
"""
try :
return self._ngname
except Exception as e:
raise e
@property
def hits(self) :
r"""The number of times this identifier was evaluated.
"""
try :
return self._hits
except Exception as e:
raise e
@property
def drop(self) :
r"""The number of times action was taken.
"""
try :
return self._drop
except Exception as e:
raise e
@property
def rule(self) :
r"""Rule.
"""
try :
return self._rule
except Exception as e:
raise e
@property
def time(self) :
r"""Time interval considered for rate limiting.
"""
try :
return self._time
except Exception as e:
raise e
@property
def total(self) :
r"""Maximum number of requests permitted in the computed timeslice.
"""
try :
return self._total
except Exception as e:
raise e
@property
def trapscomputedintimeslice(self) :
r"""The number of traps that would be sent in the timeslice configured. .
"""
try :
return self._trapscomputedintimeslice
except Exception as e:
raise e
@property
def computedtraptimeslice(self) :
r"""The time interval computed for sending traps.
"""
try :
return self._computedtraptimeslice
except Exception as e:
raise e
@property
def referencecount(self) :
r"""Total number of transactions pointing to this entry.
"""
try :
return self._referencecount
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(nslimitidentifier_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.nslimitidentifier
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.limitidentifier is not None :
return str(self.limitidentifier)
return None
except Exception as e :
raise e
@classmethod
def filter_add_parameters(cls, resource) :
r""" Use this function to create a resource with only add operation specific parameters.
"""
addresource = nslimitidentifier()
addresource.limitidentifier = resource.limitidentifier
addresource.threshold = resource.threshold
addresource.timeslice = resource.timeslice
addresource.mode = resource.mode
addresource.limittype = resource.limittype
addresource.selectorname = resource.selectorname
addresource.maxbandwidth = resource.maxbandwidth
addresource.trapsintimeslice = resource.trapsintimeslice
return addresource
@classmethod
def add(cls, client, resource) :
r""" Use this API to add nslimitidentifier.
"""
try :
if type(resource) is not list :
addresource = cls.filter_add_parameters(resource)
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i] = cls.filter_add_parameters(resource[i])
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def filter_delete_parameters(cls, resource) :
r""" Use this function to create a resource with only delete operation specific parameters.
"""
deleteresource = nslimitidentifier()
deleteresource.limitidentifier = resource.limitidentifier
return deleteresource
@classmethod
def delete(cls, client, resource) :
r""" Use this API to delete nslimitidentifier.
"""
try :
if type(resource) is not list :
deleteresource = nslimitidentifier()
if type(resource) != type(deleteresource):
deleteresource.limitidentifier = resource
else :
deleteresource = cls.filter_delete_parameters(resource)
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].limitidentifier = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i] = cls.filter_delete_parameters(resource)
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def filter_update_parameters(cls, resource) :
r""" Use this function to create a resource with only update operation specific parameters.
"""
updateresource = nslimitidentifier()
updateresource.limitidentifier = resource.limitidentifier
updateresource.threshold = resource.threshold
updateresource.timeslice = resource.timeslice
updateresource.mode = resource.mode
updateresource.limittype = resource.limittype
updateresource.selectorname = resource.selectorname
updateresource.maxbandwidth = resource.maxbandwidth
updateresource.trapsintimeslice = resource.trapsintimeslice
return updateresource
@classmethod
def update(cls, client, resource) :
r""" Use this API to update nslimitidentifier.
"""
try :
if type(resource) is not list :
updateresource = cls.filter_update_parameters(resource)
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i] = cls.filter_update_parameters(resource[i])
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
r""" Use this API to unset the properties of nslimitidentifier resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = nslimitidentifier()
if type(resource) != type(unsetresource):
unsetresource.limitidentifier = resource
else :
unsetresource.limitidentifier = resource.limitidentifier
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].limitidentifier = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ nslimitidentifier() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].limitidentifier = resource[i].limitidentifier
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
r""" Use this API to fetch all the nslimitidentifier resources that are configured on netscaler.
"""
try :
if not name :
obj = nslimitidentifier()
response = obj.get_resources(client, option_)
else :
if type(name) is not list :
if type(name) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name)))
obj = nslimitidentifier()
obj.limitidentifier = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
if type(name[0]) == cls :
raise Exception('Invalid parameter name:{0}'.format(type(name[0])))
response = [nslimitidentifier() for _ in range(len(name))]
obj = [nslimitidentifier() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = nslimitidentifier()
obj[i].limitidentifier = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
r""" Use this API to fetch filtered set of nslimitidentifier resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nslimitidentifier()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
r""" Use this API to count the nslimitidentifier resources configured on NetScaler.
"""
try :
obj = nslimitidentifier()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
r""" Use this API to count filtered the set of nslimitidentifier resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = nslimitidentifier()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Mode:
CONNECTION = "CONNECTION"
REQUEST_RATE = "REQUEST_RATE"
NONE = "NONE"
class Limittype:
BURSTY = "BURSTY"
SMOOTH = "SMOOTH"
class nslimitidentifier_response(base_response) :
def __init__(self, length=1) :
self.nslimitidentifier = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.nslimitidentifier = [nslimitidentifier() for _ in range(length)]
| 661 | 129 | 117 |
f6347dd0544898f0b56a1bffd7b36ab3a17b143c | 7,846 | py | Python | choice/models.py | ppp314/multiple | d74c789b552ebe20211b0d327341a99bd6ee1368 | [
"Apache-2.0"
] | null | null | null | choice/models.py | ppp314/multiple | d74c789b552ebe20211b0d327341a99bd6ee1368 | [
"Apache-2.0"
] | 6 | 2019-02-14T12:16:33.000Z | 2020-04-11T09:21:26.000Z | choice/models.py | ppp314/multiple | d74c789b552ebe20211b0d327341a99bd6ee1368 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019 Acacia Shop
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
from django.db.models import Sum, F, Q, Count
from django.urls import reverse
from django.utils import timezone
CHOICE_MARK_ONE = 'MARK1'
CHOICE_MARK_TWO = 'MARK2'
CHOICE_MARK_THREE = 'MARK3'
CHOICE_MARK_FOUR = 'MARK4'
CHOICE_MARK_FIVE = 'MARK5'
CHOICE_MARK_CHOICES = (
(CHOICE_MARK_ONE, 'Mark 1'),
(CHOICE_MARK_TWO, 'Mark 2'),
(CHOICE_MARK_THREE, 'Mark 3'),
(CHOICE_MARK_FOUR, 'Mark 4'),
(CHOICE_MARK_FIVE, 'Mark 5'),
)
class Answer(models.Model):
""" The class which contains correct answers."""
exam = models.ForeignKey('Exam', on_delete=models.CASCADE)
created = models.DateTimeField(verbose_name='作成日',
blank=True,
default=None,
null=True)
no = models.IntegerField(verbose_name='大問', default=0)
sub_no = models.PositiveIntegerField(verbose_name='小問', default=0)
point = models.PositiveIntegerField(verbose_name='配点', default=0)
correct = models.CharField(
max_length=30,
choices=CHOICE_MARK_CHOICES,
blank=True,
)
class DrillManager(models.Manager):
"""Manager used as Drill class manager."""
def score(self):
"""Each drill queryset with a score of correct answer attribute.
Each drill with score of the correct answer as
a `mark_point_sum` attribute.
Return QuerySet: the drill queryset with `total_score` attribute
"""
pass
class Drill(models.Model):
"""Hold Drill object for the Exam instance."""
exam = models.ForeignKey('Exam', on_delete=models.CASCADE)
description = models.CharField(verbose_name='ドリルの説明', max_length=200)
created = models.DateTimeField(verbose_name='作成日',
blank=True,
default=None,
null=True)
objects = DrillManager()
def save(self, *args, **kwargs):
"""Save the drill instance as well as create the Mark objects.
Create the Mark objects as many as the answer objects.
Todo:
Work around when there is no answer object.
"""
super().save(*args, **kwargs)
answers = self.exam.answer_set.all()
for an in answers:
Mark.objects.create(drill=self, answer=an)
def point_full_mark(self):
""" Return the dictionary of the sum of the allocated point.
Returns:
the dictionary of total: {'total': 100}
"""
p = self.exam.answer_set.all()
dict = p.aggregate(total=Sum('point'))
return dict # {'total': 100}
def point_earned(self):
""" Return the sum of point earned."""
qs = Mark.objects.filter(drill=self)
dict = qs.aggregate(total=Sum(
'answer__point', filter=Q(answer__correct=F('your_choice'))))
return dict # {'total': 100}
def register_grade(self):
"""Register the result of this drill."""
dict = self.point_earned()
Grade.objects.create(
exam=self.exam,
point=dict['total'],
created=timezone.now(),
)
class MarkManager(models.Manager):
"""Mark Manager."""
def create_mark(self, drill, answer, your_choice=''):
"""Create mark method.
Create and return mark object with drill and answer.
"""
mark = self.create(
drill=drill,
answer=answer,
your_choice=your_choice,
)
return mark
class Mark(models.Model):
"""The class contains submitted answers."""
drill = models.ForeignKey('Drill', on_delete=models.CASCADE)
answer = models.ForeignKey('Answer', on_delete=models.CASCADE)
your_choice = models.CharField(
max_length=30,
choices=CHOICE_MARK_CHOICES,
blank=True,
)
objects = MarkManager()
class Grade(models.Model):
"""Hold the results of drills.
"""
exam = models.ForeignKey('Exam', on_delete=models.CASCADE)
point = models.PositiveIntegerField(blank=True)
created = models.DateTimeField(
blank=True,
default=None,
)
def factorial(n):
"""Return the factorial of n, an exact integer >= 0.
>>> [factorial(n) for n in range(6)]
[1, 1, 2, 6, 24, 120]
>>> factorial(30)
265252859812191058636308480000000
>>> factorial(-1)
Traceback (most recent call last):
...
ValueError: n must be >= 0
Factorials of floats are OK, but the float must be an exact integer:
>>> factorial(30.1)
Traceback (most recent call last):
...
ValueError: n must be exact integer
>>> factorial(30.0)
265252859812191058636308480000000
It must also not be ridiculously large:
>>> factorial(1e100)
Traceback (most recent call last):
...
OverflowError: n too large
"""
import math
if not n >= 0:
raise ValueError("n must be >= 0")
if math.floor(n) != n:
raise ValueError("n must be exact integer")
if n + 1 == n: # catch a value like 1e300
raise OverflowError("n too large")
result = 1
factor = 2
while factor <= n:
result *= factor
factor += 1
return result
| 27.148789 | 77 | 0.614453 | """
Copyright 2019 Acacia Shop
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
from django.db.models import Sum, F, Q, Count
from django.urls import reverse
from django.utils import timezone
class ExamManeger(models.Manager):
def get_queryset(self):
return super().get_queryset().annotate(Count('answer'),
Sum('answer__point'))
class Exam(models.Model):
title = models.CharField(verbose_name='テスト名', max_length=200)
created = models.DateTimeField(verbose_name='作成日', default=timezone.now)
number_of_question = models.IntegerField(verbose_name='問題数', default=1)
objects = ExamManeger()
class Meta:
verbose_name = '試験'
verbose_name_plural = '試験'
ordering = ['created']
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('choice:exam-detail', kwargs={'pk': self.pk})
CHOICE_MARK_ONE = 'MARK1'
CHOICE_MARK_TWO = 'MARK2'
CHOICE_MARK_THREE = 'MARK3'
CHOICE_MARK_FOUR = 'MARK4'
CHOICE_MARK_FIVE = 'MARK5'
CHOICE_MARK_CHOICES = (
(CHOICE_MARK_ONE, 'Mark 1'),
(CHOICE_MARK_TWO, 'Mark 2'),
(CHOICE_MARK_THREE, 'Mark 3'),
(CHOICE_MARK_FOUR, 'Mark 4'),
(CHOICE_MARK_FIVE, 'Mark 5'),
)
class Answer(models.Model):
""" The class which contains correct answers."""
exam = models.ForeignKey('Exam', on_delete=models.CASCADE)
created = models.DateTimeField(verbose_name='作成日',
blank=True,
default=None,
null=True)
no = models.IntegerField(verbose_name='大問', default=0)
sub_no = models.PositiveIntegerField(verbose_name='小問', default=0)
point = models.PositiveIntegerField(verbose_name='配点', default=0)
correct = models.CharField(
max_length=30,
choices=CHOICE_MARK_CHOICES,
blank=True,
)
class Meta:
verbose_name = '解答'
verbose_name_plural = '解答'
ordering = ['no', 'sub_no']
def __str__(self):
return str(self.no) + '-' + str(self.sub_no)
class ExamManeger(models.Manager):
def get_queryset(self):
return super().get_queryset().annotate(Count('answer'))
class DrillManager(models.Manager):
"""Manager used as Drill class manager."""
def score(self):
"""Each drill queryset with a score of correct answer attribute.
Each drill with score of the correct answer as
a `mark_point_sum` attribute.
Return QuerySet: the drill queryset with `total_score` attribute
"""
pass
def get_queryset(self):
mark_point_sum = Sum('mark__answer__point', )
"""
filter=Q(
mark__answer__correct=F('mark__your_choice')
)
"""
return super().get_queryset().annotate(mark_point_sum=mark_point_sum)
class Drill(models.Model):
"""Hold Drill object for the Exam instance."""
exam = models.ForeignKey('Exam', on_delete=models.CASCADE)
description = models.CharField(verbose_name='ドリルの説明', max_length=200)
created = models.DateTimeField(verbose_name='作成日',
blank=True,
default=None,
null=True)
objects = DrillManager()
def __str__(self):
return f"is {self.description}."
def save(self, *args, **kwargs):
"""Save the drill instance as well as create the Mark objects.
Create the Mark objects as many as the answer objects.
Todo:
Work around when there is no answer object.
"""
super().save(*args, **kwargs)
answers = self.exam.answer_set.all()
for an in answers:
Mark.objects.create(drill=self, answer=an)
def point_full_mark(self):
""" Return the dictionary of the sum of the allocated point.
Returns:
the dictionary of total: {'total': 100}
"""
p = self.exam.answer_set.all()
dict = p.aggregate(total=Sum('point'))
return dict # {'total': 100}
def point_earned(self):
""" Return the sum of point earned."""
qs = Mark.objects.filter(drill=self)
dict = qs.aggregate(total=Sum(
'answer__point', filter=Q(answer__correct=F('your_choice'))))
return dict # {'total': 100}
def register_grade(self):
"""Register the result of this drill."""
dict = self.point_earned()
Grade.objects.create(
exam=self.exam,
point=dict['total'],
created=timezone.now(),
)
class MarkManager(models.Manager):
"""Mark Manager."""
def create_mark(self, drill, answer, your_choice=''):
"""Create mark method.
Create and return mark object with drill and answer.
"""
mark = self.create(
drill=drill,
answer=answer,
your_choice=your_choice,
)
return mark
class Mark(models.Model):
"""The class contains submitted answers."""
drill = models.ForeignKey('Drill', on_delete=models.CASCADE)
answer = models.ForeignKey('Answer', on_delete=models.CASCADE)
your_choice = models.CharField(
max_length=30,
choices=CHOICE_MARK_CHOICES,
blank=True,
)
objects = MarkManager()
def __str__(self):
return f"is {self.your_choice}."
class Grade(models.Model):
"""Hold the results of drills.
"""
exam = models.ForeignKey('Exam', on_delete=models.CASCADE)
point = models.PositiveIntegerField(blank=True)
created = models.DateTimeField(
blank=True,
default=None,
)
def __str__(self):
return f"is {self.point}"
class Publication(models.Model):
title = models.CharField(max_length=30)
class Meta:
ordering = ('title', )
def __str__(self):
return self.title
class Article(models.Model):
headline = models.CharField(max_length=100)
publications = models.ManyToManyField(Publication)
class Meta:
ordering = ('headline', )
def __str__(self):
return self.headline
def factorial(n):
"""Return the factorial of n, an exact integer >= 0.
>>> [factorial(n) for n in range(6)]
[1, 1, 2, 6, 24, 120]
>>> factorial(30)
265252859812191058636308480000000
>>> factorial(-1)
Traceback (most recent call last):
...
ValueError: n must be >= 0
Factorials of floats are OK, but the float must be an exact integer:
>>> factorial(30.1)
Traceback (most recent call last):
...
ValueError: n must be exact integer
>>> factorial(30.0)
265252859812191058636308480000000
It must also not be ridiculously large:
>>> factorial(1e100)
Traceback (most recent call last):
...
OverflowError: n too large
"""
import math
if not n >= 0:
raise ValueError("n must be >= 0")
if math.floor(n) != n:
raise ValueError("n must be exact integer")
if n + 1 == n: # catch a value like 1e300
raise OverflowError("n too large")
result = 1
factor = 2
while factor <= n:
result *= factor
factor += 1
return result
| 762 | 890 | 329 |
1399b004703bcafe35b47e54193957c9e3e2651a | 1,836 | py | Python | pycycle/elements/test/test_ambient.py | askprash/pyCycle | e0845d7e320b6cb47367734c26ec3410c9fa5bf7 | [
"Apache-2.0"
] | 38 | 2019-08-12T15:27:25.000Z | 2022-01-27T16:34:51.000Z | pycycle/elements/test/test_ambient.py | askprash/pyCycle | e0845d7e320b6cb47367734c26ec3410c9fa5bf7 | [
"Apache-2.0"
] | 16 | 2019-11-07T17:39:54.000Z | 2022-03-01T14:59:48.000Z | pycycle/elements/test/test_ambient.py | askprash/pyCycle | e0845d7e320b6cb47367734c26ec3410c9fa5bf7 | [
"Apache-2.0"
] | 35 | 2019-08-12T15:27:37.000Z | 2022-03-17T16:25:33.000Z | import numpy as np
import unittest
import os
from openmdao.api import Problem
from openmdao.utils.assert_utils import assert_check_partials
from pycycle.elements.ambient import Ambient
fpath = os.path.dirname(os.path.realpath(__file__))
ref_data = np.loadtxt(fpath + "/reg_data/ambient.csv",
delimiter=",", skiprows=1)
header = ['alt','MN','dTs','Pt','Ps','Tt','Ts']
h_map = dict(((v_name,i) for i,v_name in enumerate(header)))
if __name__ == "__main__":
unittest.main() | 30.098361 | 121 | 0.595316 | import numpy as np
import unittest
import os
from openmdao.api import Problem
from openmdao.utils.assert_utils import assert_check_partials
from pycycle.elements.ambient import Ambient
fpath = os.path.dirname(os.path.realpath(__file__))
ref_data = np.loadtxt(fpath + "/reg_data/ambient.csv",
delimiter=",", skiprows=1)
header = ['alt','MN','dTs','Pt','Ps','Tt','Ts']
h_map = dict(((v_name,i) for i,v_name in enumerate(header)))
class FlowStartTestCase(unittest.TestCase):
def setUp(self):
self.prob = Problem()
self.prob.model.add_subsystem('amb', Ambient())
self.prob.model.set_input_defaults('amb.alt', 0, units='ft')
self.prob.model.set_input_defaults('amb.dTs', 0, units='degR')
self.prob.setup(check=False, force_alloc_complex=True)
def test_case1(self):
np.seterr(divide='raise')
# 6 cases to check against
for i, data in enumerate(ref_data):
self.prob['amb.alt'] = data[h_map['alt']]
self.prob['amb.dTs'] = data[h_map['dTs']]
self.prob.run_model()
# check outputs
tol = 1.0e-2 # seems a little generous
npss = data[h_map['Ps']]
pyc = self.prob['amb.Ps']
rel_err = abs(npss - pyc)/npss
self.assertLessEqual(rel_err, tol)
npss = data[h_map['Ts']]
pyc = self.prob['amb.Ts']
rel_err = abs(npss - pyc)/npss
self.assertLessEqual(rel_err, tol)
partial_data = self.prob.check_partials(out_stream=None, method='cs',
includes=['amb.*'], excludes=['*.base_thermo.*', 'amb.readAtmTable'])
assert_check_partials(partial_data, atol=1e-8, rtol=1e-8)
if __name__ == "__main__":
unittest.main() | 1,231 | 22 | 77 |
e6ba67dd133bf02c0b7e121ffba5bad0c094d16b | 169 | py | Python | lib/JumpScale/baselib/admin/__init__.py | jumpscale7/jumpscale_core7 | c3115656214cab1bd32f7a1e092c0bffc84a00cd | [
"Apache-2.0"
] | null | null | null | lib/JumpScale/baselib/admin/__init__.py | jumpscale7/jumpscale_core7 | c3115656214cab1bd32f7a1e092c0bffc84a00cd | [
"Apache-2.0"
] | 4 | 2016-08-25T12:08:39.000Z | 2018-04-12T12:36:01.000Z | lib/JumpScale/baselib/admin/__init__.py | jumpscale7/jumpscale_core7 | c3115656214cab1bd32f7a1e092c0bffc84a00cd | [
"Apache-2.0"
] | 3 | 2016-03-08T07:49:34.000Z | 2018-10-19T13:56:43.000Z | from JumpScale import j
j.base.loader.makeAvailable(j, 'tools')
j.tools._register('admin', cb)
| 18.777778 | 39 | 0.721893 | from JumpScale import j
j.base.loader.makeAvailable(j, 'tools')
def cb():
from .Admin import AdminFactory
return AdminFactory()
j.tools._register('admin', cb)
| 50 | 0 | 23 |
025fa04c3d6f58c6e16463f74df0305706a0b999 | 6,706 | py | Python | dizoo/gfootball/model/conv1d/conv1d.py | LuciusMos/DI-engine | b040b1c36afce038effec9eb483f625131573824 | [
"Apache-2.0"
] | 464 | 2021-07-08T07:26:33.000Z | 2022-03-31T12:35:16.000Z | dizoo/gfootball/model/conv1d/conv1d.py | LuciusMos/DI-engine | b040b1c36afce038effec9eb483f625131573824 | [
"Apache-2.0"
] | 177 | 2021-07-09T08:22:55.000Z | 2022-03-31T07:35:22.000Z | dizoo/gfootball/model/conv1d/conv1d.py | LuciusMos/DI-engine | b040b1c36afce038effec9eb483f625131573824 | [
"Apache-2.0"
] | 92 | 2021-07-08T12:16:37.000Z | 2022-03-31T09:24:41.000Z | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ding.utils import MODEL_REGISTRY, deep_merge_dicts
from ding.config import read_config
from dizoo.gfootball.model.conv1d.conv1d_default_config import conv1d_default_config
@MODEL_REGISTRY.register('conv1d')
| 48.244604 | 119 | 0.659708 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ding.utils import MODEL_REGISTRY, deep_merge_dicts
from ding.config import read_config
from dizoo.gfootball.model.conv1d.conv1d_default_config import conv1d_default_config
@MODEL_REGISTRY.register('conv1d')
class GfootballConv1DModel(nn.Module):
def __init__(
self,
cfg: dict = {},
) -> None:
super(GfootballConv1DModel, self).__init__()
self.cfg = deep_merge_dicts(conv1d_default_config, cfg)
self.fc_player = nn.Linear(
self.cfg.feature_embedding.player.input_dim, self.cfg.feature_embedding.player.output_dim
)
self.fc_ball = nn.Linear(self.cfg.feature_embedding.ball.input_dim, self.cfg.feature_embedding.ball.output_dim)
self.fc_left = nn.Linear(
self.cfg.feature_embedding.left_team.input_dim, self.cfg.feature_embedding.left_team.output_dim
)
self.fc_right = nn.Linear(
self.cfg.feature_embedding.right_team.input_dim, self.cfg.feature_embedding.right_team.output_dim
)
self.fc_left_closest = nn.Linear(
self.cfg.feature_embedding.left_closest.input_dim, self.cfg.feature_embedding.left_closest.output_dim
)
self.fc_right_closest = nn.Linear(
self.cfg.feature_embedding.right_closest.input_dim, self.cfg.feature_embedding.right_closest.output_dim
)
self.conv1d_left = nn.Conv1d(
self.cfg.feature_embedding.left_team.output_dim,
self.cfg.feature_embedding.left_team.conv1d_output_channel,
1,
stride=1
)
self.conv1d_right = nn.Conv1d(
self.cfg.feature_embedding.right_team.output_dim,
self.cfg.feature_embedding.right_team.conv1d_output_channel,
1,
stride=1
)
self.fc_left2 = nn.Linear(
self.cfg.feature_embedding.left_team.conv1d_output_channel * 10,
self.cfg.feature_embedding.left_team.fc_output_dim
)
self.fc_right2 = nn.Linear(
self.cfg.feature_embedding.right_team.conv1d_output_channel * 11,
self.cfg.feature_embedding.right_team.fc_output_dim
)
self.fc_cat = nn.Linear(self.cfg.fc_cat.input_dim, self.cfg.lstm_size)
self.norm_player = nn.LayerNorm(64)
self.norm_ball = nn.LayerNorm(64)
self.norm_left = nn.LayerNorm(48)
self.norm_left2 = nn.LayerNorm(96)
self.norm_left_closest = nn.LayerNorm(48)
self.norm_right = nn.LayerNorm(48)
self.norm_right2 = nn.LayerNorm(96)
self.norm_right_closest = nn.LayerNorm(48)
self.norm_cat = nn.LayerNorm(self.cfg.lstm_size)
self.lstm = nn.LSTM(self.cfg.lstm_size, self.cfg.lstm_size)
self.fc_pi_a1 = nn.Linear(self.cfg.lstm_size, self.cfg.policy_head.hidden_dim)
self.fc_pi_a2 = nn.Linear(self.cfg.policy_head.hidden_dim, self.cfg.policy_head.act_shape)
self.norm_pi_a1 = nn.LayerNorm(164)
self.fc_pi_m1 = nn.Linear(self.cfg.lstm_size, 164)
self.fc_pi_m2 = nn.Linear(164, 8)
self.norm_pi_m1 = nn.LayerNorm(164)
self.fc_v1 = nn.Linear(self.cfg.lstm_size, self.cfg.value_head.hidden_dim)
self.norm_v1 = nn.LayerNorm(164)
self.fc_v2 = nn.Linear(self.cfg.value_head.hidden_dim, self.cfg.value_head.output_dim, bias=False)
def forward(self, state_dict):
player_state = state_dict["player"].unsqueeze(0)
ball_state = state_dict["ball"].unsqueeze(0)
left_team_state = state_dict["left_team"].unsqueeze(0)
left_closest_state = state_dict["left_closest"].unsqueeze(0)
right_team_state = state_dict["right_team"].unsqueeze(0)
right_closest_state = state_dict["right_closest"].unsqueeze(0)
avail = state_dict["avail"].unsqueeze(0)
player_embed = self.norm_player(self.fc_player(player_state))
ball_embed = self.norm_ball(self.fc_ball(ball_state))
left_team_embed = self.norm_left(self.fc_left(left_team_state)) # horizon, batch, n, dim
left_closest_embed = self.norm_left_closest(self.fc_left_closest(left_closest_state))
right_team_embed = self.norm_right(self.fc_right(right_team_state))
right_closest_embed = self.norm_right_closest(self.fc_right_closest(right_closest_state))
[horizon, batch_size, n_player, dim] = left_team_embed.size()
left_team_embed = left_team_embed.view(horizon * batch_size, n_player,
dim).permute(0, 2, 1) # horizon * batch, dim1, n
left_team_embed = F.relu(self.conv1d_left(left_team_embed)).permute(0, 2, 1) # horizon * batch, n, dim2
left_team_embed = left_team_embed.reshape(horizon * batch_size,
-1).view(horizon, batch_size, -1) # horizon, batch, n * dim2
left_team_embed = F.relu(self.norm_left2(self.fc_left2(left_team_embed)))
right_team_embed = right_team_embed.view(horizon * batch_size, n_player + 1,
dim).permute(0, 2, 1) # horizon * batch, dim1, n
right_team_embed = F.relu(self.conv1d_right(right_team_embed)).permute(0, 2, 1) # horizon * batch, n * dim2
## Usually we need to call reshape() or contiguous() after permute, transpose, etc to make sure
# tensor on memory is contiguous
right_team_embed = right_team_embed.reshape(horizon * batch_size, -1).view(horizon, batch_size, -1)
## view() can only be used on contiguous tensor, reshape() don't have this limit.
right_team_embed = F.relu(self.norm_right2(self.fc_right2(right_team_embed)))
cat = torch.cat(
[player_embed, ball_embed, left_team_embed, right_team_embed, left_closest_embed, right_closest_embed], 2
)
cat = F.relu(self.norm_cat(self.fc_cat(cat)))
hidden = state_dict.pop('prev_state', None)
if hidden is None:
h_in = (
torch.zeros([1, batch_size, self.cfg.lstm_size],
dtype=torch.float), torch.zeros([1, batch_size, self.cfg.lstm_size], dtype=torch.float)
)
else:
h_in = hidden
out, h_out = self.lstm(cat, h_in)
a_out = F.relu(self.norm_pi_a1(self.fc_pi_a1(out)))
a_out = self.fc_pi_a2(a_out)
logit = a_out + (avail - 1) * 1e7
prob = F.softmax(logit, dim=2)
v = F.relu(self.norm_v1(self.fc_v1(out)))
v = self.fc_v2(v)
return {'logit': prob.squeeze(0), 'value': v.squeeze(0), 'next_state': h_out}
| 6,285 | 17 | 76 |
87278da15a86b03d091fdffe67c2fbb97e9cfaf7 | 2,611 | py | Python | nosy-bdd/bddtests/test_auth.py | notification-system/play_circle | a141ba6be1b642cc635851b5fc259d56a87d4301 | [
"Apache-2.0"
] | null | null | null | nosy-bdd/bddtests/test_auth.py | notification-system/play_circle | a141ba6be1b642cc635851b5fc259d56a87d4301 | [
"Apache-2.0"
] | 1 | 2022-02-16T00:57:10.000Z | 2022-02-16T00:57:10.000Z | nosy-bdd/bddtests/test_auth.py | notification-system/play_circle | a141ba6be1b642cc635851b5fc259d56a87d4301 | [
"Apache-2.0"
] | null | null | null | import requests
import json
import bddtests.config as c
| 35.283784 | 85 | 0.712371 | import requests
import json
import bddtests.config as c
def test_auth_user_creation():
print("Get Text response")
r = requests.post(
url=c.create_user_url, data=json.dumps(c.api_user_create), headers=c.headers
)
json_result = r.json()
assert json_result.get("email") == c.api_user_create.get("email")
assert r.status_code == 201
def test_auth_conflict_user_creation():
r = requests.post(
url=c.create_user_url, data=json.dumps(c.api_user_create), headers=c.headers
)
assert r.status_code == 409
def test_auth_get_token():
r = requests.post(
url=c.get_token_url, data=json.dumps(c.api_user_create), headers=c.headers
)
assert r.status_code == 200
assert r.json is not None
def test_get_status(auth_get_token):
r = requests.post(
url=c.status_token_url, data=json.dumps(auth_get_token), headers=c.headers
)
assert r.text == "true"
def test_auth_get_user_profile(auth_get_token):
json_bearer = "Bearer " + auth_get_token.get("accessToken")
headers_auth = {"Content-type": "application/json", "Authorization": json_bearer}
r = requests.get(url=c.create_user_url, headers=headers_auth)
user_profile = r.json()
assert user_profile.get("firstName") == c.api_user_get.get("firstName")
assert user_profile.get("lastName") == c.api_user_get.get("lastName")
assert user_profile.get("email") == c.api_user_get.get("email")
def test_auth_logout(auth_get_token):
json_bearer = "Bearer " + auth_get_token.get("accessToken")
headers_auth = {"Content-type": "application/json", "Authorization": json_bearer}
r = requests.get(url=c.logout_token_url, headers=headers_auth)
assert r.status_code == 204
def test_email_admin_inputsystems_create(auth_get_token):
json_bearer = "Bearer " + auth_get_token.get("accessToken")
headers_auth = {"Content-type": "application/json", "Authorization": json_bearer}
r = requests.post(
url=c.create_inputsystemdto_url,
data=json.dumps(c.create_inputsystemdto),
headers=headers_auth,
)
created_input_system = r.json()
assert r.status_code == 201
assert created_input_system.get("name") == c.create_inputsystemdto.get("name")
def test_email_admin_get_emailproviders(auth_get_token):
json_bearer = "Bearer " + auth_get_token.get("accessToken")
headers_auth = {"Content-type": "application/json", "Authorization": json_bearer}
r = requests.get(url=c.get_emailproviders_url, headers=headers_auth)
assert r.status_code == 200
assert r.text == '["DEFAULT","YANDEX","GMAIL"]'
| 2,363 | 0 | 184 |
ab1e107d90b72883319c56872da66b126e65a2f8 | 1,857 | py | Python | scripts/source/knn_model.py | hi-akshat/Emotion-Recogniton-from-EEG-Signals | 3b939dd9557188048d41ca16c02004c4aabbc663 | [
"MIT"
] | 26 | 2020-09-30T01:56:39.000Z | 2022-01-17T11:53:48.000Z | scripts/source/knn_model.py | akshat1706/Emotion-Recogniton-from-DEAP | 3b939dd9557188048d41ca16c02004c4aabbc663 | [
"MIT"
] | 1 | 2020-07-06T13:36:09.000Z | 2020-07-06T13:36:09.000Z | scripts/source/knn_model.py | akshat1706/Emotion-Recogniton-from-DEAP | 3b939dd9557188048d41ca16c02004c4aabbc663 | [
"MIT"
] | 13 | 2019-08-15T02:31:44.000Z | 2020-05-20T10:21:53.000Z | from sklearn.neighbors import KNeighborsClassifier
import numpy as np
from sklearn.model_selection import KFold,train_test_split
kf=KFold(n_splits=10)
train_y = [] #Actual result of the data used in testing of the valence
train_a = [] #Actual result of the data used in testing of the arousal
train_x = np.genfromtxt('traina.csv',delimiter=',',skip_header=0)
train_x = np.array(train_x)
train_x=train_x.astype(np.long)
f = open("labels_0.dat","r")
for i in f:
train_y.append(i) #copying data from the file to the list
train_y = np.array(train_y).astype(np.float)
train_y = train_y.astype(np.int)#changing the list to numpy array and its value type from float to int
clf = KNeighborsClassifier(n_neighbors=3) #knn model for classifying the valence
for train_index,test_index in kf.split(train_x):
X_train,X_test,y_train,y_test=train_x[train_index],train_x[test_index],train_y[train_index],train_y[test_index]
predicted_val=get_score(clf,X_train,X_test,y_train,y_test)
print( predicted_val)
f = open("labels_1.dat","r")
for i in f:
train_a.append(i) #copying data from the file to the list
train_a = np.array(train_a).astype(np.float)
train_a = train_a.astype(np.int) #changing the list to numpy array and its value type from float to int
kf1=KFold(n_splits=10)
clf1 = KNeighborsClassifier(n_neighbors=3) #knn model for classifying the valence
for train_index,test_index in kf1.split(train_x):
X_train1,X_test1,y_train1,y_test1=train_x[train_index],train_x[test_index],train_a[train_index],train_a[test_index]
arousal_val=get_score(clf1,X_train1,X_test1,y_train1,y_test1)
print(arousal_val)
| 43.186047 | 148 | 0.775444 | from sklearn.neighbors import KNeighborsClassifier
import numpy as np
from sklearn.model_selection import KFold,train_test_split
def get_score(model,X_train,X_test,y_train,y_test): #this function is used to check the accuracy score for a given model, training and testing data
model.fit(X_train,y_train)
return model.score(X_test,y_test)
kf=KFold(n_splits=10)
train_y = [] #Actual result of the data used in testing of the valence
train_a = [] #Actual result of the data used in testing of the arousal
train_x = np.genfromtxt('traina.csv',delimiter=',',skip_header=0)
train_x = np.array(train_x)
train_x=train_x.astype(np.long)
f = open("labels_0.dat","r")
for i in f:
train_y.append(i) #copying data from the file to the list
train_y = np.array(train_y).astype(np.float)
train_y = train_y.astype(np.int)#changing the list to numpy array and its value type from float to int
clf = KNeighborsClassifier(n_neighbors=3) #knn model for classifying the valence
for train_index,test_index in kf.split(train_x):
X_train,X_test,y_train,y_test=train_x[train_index],train_x[test_index],train_y[train_index],train_y[test_index]
predicted_val=get_score(clf,X_train,X_test,y_train,y_test)
print( predicted_val)
f = open("labels_1.dat","r")
for i in f:
train_a.append(i) #copying data from the file to the list
train_a = np.array(train_a).astype(np.float)
train_a = train_a.astype(np.int) #changing the list to numpy array and its value type from float to int
kf1=KFold(n_splits=10)
clf1 = KNeighborsClassifier(n_neighbors=3) #knn model for classifying the valence
for train_index,test_index in kf1.split(train_x):
X_train1,X_test1,y_train1,y_test1=train_x[train_index],train_x[test_index],train_a[train_index],train_a[test_index]
arousal_val=get_score(clf1,X_train1,X_test1,y_train1,y_test1)
print(arousal_val)
| 191 | 0 | 23 |
0ef62b4680c0d4b7a18845e15141971134e0e7a8 | 2,521 | py | Python | simbad/exit.py | hlasimpk/SIMBAD | 684de027f25fe63e8d973e494b0adf74db08cd89 | [
"BSD-3-Clause"
] | null | null | null | simbad/exit.py | hlasimpk/SIMBAD | 684de027f25fe63e8d973e494b0adf74db08cd89 | [
"BSD-3-Clause"
] | null | null | null | simbad/exit.py | hlasimpk/SIMBAD | 684de027f25fe63e8d973e494b0adf74db08cd89 | [
"BSD-3-Clause"
] | null | null | null | """Exit utility for catching errors and printing unified error messages"""
__author__ = "Jens Thomas & Felix Simkovic"
__date__ = "08 May 2017"
__version__ = "1.1"
import logging
import os
import sys
import traceback
try:
import pyrvapi
except ImportError:
pyrvapi = None
def _debug_logfile(logger):
"""Get the debug logfile"""
if logger.handlers:
for d in logger.handlers:
if hasattr(d, 'baseFilename') and d.level == logging.DEBUG:
return getattr(d, 'baseFilename')
return None
def exit_error(exc_type, exc_value, exc_traceback):
"""Exit on error collecting as much information as we can.
Parameters
----------
exc_type : str
The exception type
exc_value : str
The exception value
exc_traceback
The exception traceback
Warnings
--------
This function terminates the program after printing appropriate
error messages.
"""
# Get the root logger
logger = logging.getLogger(__name__)
# Traceback info
traceback_value_msg = exc_value
traceback_full_msg = traceback.format_exception(exc_type, exc_value, exc_traceback)
# Find debug log file
debug_log = _debug_logfile(logger)
# Construct the message
main_msg = "%(sep)s%(hashish)s%(sep)s"\
+ "%(short_hash)s%(msg)s%(short_hash)s%(sep)s"\
+ "%(hashish)s%(sep)s%(sep)s"\
+ "SIMBAD exited with message: %(tb_value)s"\
+ "%(sep)s%(sep)s%(hashish)s%(sep)s%(sep)s"
if debug_log:
main_msg += "More information may be found in the debug log file: %(logfile)s%(sep)s"
main_msg += "%(sep)sIf you believe that this is an error with SIMBAD, please email: %(email)s%(sep)s"
main_msg += "providing as much information as you can about how you ran the program.%(sep)s"
if debug_log:
main_msg += "%(sep)sPlease static the debug logfile with your email: %(logfile)s%(sep)s"
nhashes = 70
main_msg_kwargs = {
'sep': os.linesep, 'hashish': '*' * nhashes, 'short_hash': '*' * 19, 'msg': "SIMBAD_ERROR".center(32, " "),
'tb_value': traceback_value_msg, 'logfile': debug_log, 'email': 'ccp4@stfc.ac.uk'
}
# String it all together
logger.critical(main_msg, main_msg_kwargs)
logger.critical("SIMBAD EXITING AT...")
logger.critical("".join(traceback_full_msg))
# Make sure the error widget is updated
if pyrvapi:
pyrvapi.rvapi_flush()
sys.exit(1)
| 29.658824 | 116 | 0.636255 | """Exit utility for catching errors and printing unified error messages"""
__author__ = "Jens Thomas & Felix Simkovic"
__date__ = "08 May 2017"
__version__ = "1.1"
import logging
import os
import sys
import traceback
try:
import pyrvapi
except ImportError:
pyrvapi = None
def _debug_logfile(logger):
"""Get the debug logfile"""
if logger.handlers:
for d in logger.handlers:
if hasattr(d, 'baseFilename') and d.level == logging.DEBUG:
return getattr(d, 'baseFilename')
return None
def exit_error(exc_type, exc_value, exc_traceback):
"""Exit on error collecting as much information as we can.
Parameters
----------
exc_type : str
The exception type
exc_value : str
The exception value
exc_traceback
The exception traceback
Warnings
--------
This function terminates the program after printing appropriate
error messages.
"""
# Get the root logger
logger = logging.getLogger(__name__)
# Traceback info
traceback_value_msg = exc_value
traceback_full_msg = traceback.format_exception(exc_type, exc_value, exc_traceback)
# Find debug log file
debug_log = _debug_logfile(logger)
# Construct the message
main_msg = "%(sep)s%(hashish)s%(sep)s"\
+ "%(short_hash)s%(msg)s%(short_hash)s%(sep)s"\
+ "%(hashish)s%(sep)s%(sep)s"\
+ "SIMBAD exited with message: %(tb_value)s"\
+ "%(sep)s%(sep)s%(hashish)s%(sep)s%(sep)s"
if debug_log:
main_msg += "More information may be found in the debug log file: %(logfile)s%(sep)s"
main_msg += "%(sep)sIf you believe that this is an error with SIMBAD, please email: %(email)s%(sep)s"
main_msg += "providing as much information as you can about how you ran the program.%(sep)s"
if debug_log:
main_msg += "%(sep)sPlease static the debug logfile with your email: %(logfile)s%(sep)s"
nhashes = 70
main_msg_kwargs = {
'sep': os.linesep, 'hashish': '*' * nhashes, 'short_hash': '*' * 19, 'msg': "SIMBAD_ERROR".center(32, " "),
'tb_value': traceback_value_msg, 'logfile': debug_log, 'email': 'ccp4@stfc.ac.uk'
}
# String it all together
logger.critical(main_msg, main_msg_kwargs)
logger.critical("SIMBAD EXITING AT...")
logger.critical("".join(traceback_full_msg))
# Make sure the error widget is updated
if pyrvapi:
pyrvapi.rvapi_flush()
sys.exit(1)
| 0 | 0 | 0 |
72f645b4ace4b4e23c0f7f12bd4d30f9d66670b3 | 2,073 | py | Python | main.py | mauricioprod/DeepLabv3FineTuning | fd0c883cd11709802fd3bbf1f7f3c0455216acf9 | [
"MIT"
] | null | null | null | main.py | mauricioprod/DeepLabv3FineTuning | fd0c883cd11709802fd3bbf1f7f3c0455216acf9 | [
"MIT"
] | null | null | null | main.py | mauricioprod/DeepLabv3FineTuning | fd0c883cd11709802fd3bbf1f7f3c0455216acf9 | [
"MIT"
] | null | null | null | from pathlib import Path
import click
import torch
from sklearn.metrics import f1_score, roc_auc_score, jaccard_score
from torch.utils import data
import datahandler
from model import createDeepLabv3
from trainer import train_model
@click.command()
@click.option("--data-directory",
required=True,
help="Specify the data directory.")
@click.option("--exp_directory",
required=True,
help="Specify the experiment directory.")
@click.option(
"--epochs",
default=25,
type=int,
help="Specify the number of epochs you want to run the experiment for.")
@click.option("--batch-size",
default=4,
type=int,
help="Specify the batch size for the dataloader.")
if __name__ == "__main__":
main()
| 30.485294 | 89 | 0.654124 | from pathlib import Path
import click
import torch
from sklearn.metrics import f1_score, roc_auc_score, jaccard_score
from torch.utils import data
import datahandler
from model import createDeepLabv3
from trainer import train_model
@click.command()
@click.option("--data-directory",
required=True,
help="Specify the data directory.")
@click.option("--exp_directory",
required=True,
help="Specify the experiment directory.")
@click.option(
"--epochs",
default=25,
type=int,
help="Specify the number of epochs you want to run the experiment for.")
@click.option("--batch-size",
default=4,
type=int,
help="Specify the batch size for the dataloader.")
def main(data_directory, exp_directory, epochs, batch_size):
# Create the deeplabv3 resnet101 model which is pretrained on a subset
# of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.
model = createDeepLabv3()
model.train()
data_directory = Path(data_directory)
# Create the experiment directory if not present
exp_directory = Path(exp_directory)
if not exp_directory.exists():
exp_directory.mkdir()
# Specify the loss function
criterion = torch.nn.MSELoss(reduction='mean')
# Specify the optimizer with a lower learning rate
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)
# Specify the evaluation metrics
metrics = {'f1_score': f1_score,
'auroc': roc_auc_score,
#'iou': jaccard_score
}
# Create the dataloader
dataloaders = datahandler.get_dataloader_single_folder(
data_directory, batch_size=batch_size)
_ = train_model(model,
criterion,
dataloaders,
optimizer,
bpath=exp_directory,
metrics=metrics,
num_epochs=epochs)
# Save the trained model
torch.save(model, exp_directory / 'weights.pt')
if __name__ == "__main__":
main()
| 1,245 | 0 | 22 |
716e3f35d02f8be3dc9e9b53e14b7b2822a7d5f7 | 1,098 | py | Python | vsphere/tests/legacy/conftest.py | 01100010011001010110010101110000/integrations-core | b6216f96c9faa67e9e1e236caa8ddac597f0ef13 | [
"BSD-3-Clause"
] | null | null | null | vsphere/tests/legacy/conftest.py | 01100010011001010110010101110000/integrations-core | b6216f96c9faa67e9e1e236caa8ddac597f0ef13 | [
"BSD-3-Clause"
] | null | null | null | vsphere/tests/legacy/conftest.py | 01100010011001010110010101110000/integrations-core | b6216f96c9faa67e9e1e236caa8ddac597f0ef13 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import mock
import pytest
from datadog_checks.vsphere.legacy.vsphere_legacy import VSphereLegacyCheck
from .utils import disable_thread_pool, get_mocked_server
def _instance():
"""
Create a default instance, used by multiple fixtures
"""
return {'name': 'vsphere_mock', 'tags': ['foo:bar']}
@pytest.fixture
def instance():
"""
Return a default instance
"""
return _instance()
@pytest.fixture
def vsphere():
"""
Provide a check instance with mocked parts
"""
# mock the server
server_mock = get_mocked_server()
# create a check instance
check = VSphereLegacyCheck('vsphere', {}, [_instance()])
# patch the check instance
check._get_server_instance = mock.MagicMock(return_value=server_mock)
# return the check after disabling the thread pool
return disable_thread_pool(check)
@pytest.fixture
| 22.875 | 75 | 0.711293 | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import mock
import pytest
from datadog_checks.vsphere.legacy.vsphere_legacy import VSphereLegacyCheck
from .utils import disable_thread_pool, get_mocked_server
def _instance():
"""
Create a default instance, used by multiple fixtures
"""
return {'name': 'vsphere_mock', 'tags': ['foo:bar']}
@pytest.fixture
def instance():
"""
Return a default instance
"""
return _instance()
@pytest.fixture
def vsphere():
"""
Provide a check instance with mocked parts
"""
# mock the server
server_mock = get_mocked_server()
# create a check instance
check = VSphereLegacyCheck('vsphere', {}, [_instance()])
# patch the check instance
check._get_server_instance = mock.MagicMock(return_value=server_mock)
# return the check after disabling the thread pool
return disable_thread_pool(check)
@pytest.fixture
def aggregator():
from datadog_checks.stubs import aggregator
aggregator.reset()
return aggregator
| 90 | 0 | 22 |
46d98de8b19ea0d2e59dfca1e2e6e9db7ab228c4 | 513 | py | Python | Day 19/Make_an_Etch_a_sketch_app.py | hamzaoda/100-Days-of-Code---The-Complete-Python-Pro-Bootcamp-for-2021 | 5340007d8405df2e29643b47d3ff9fa4f7af9e10 | [
"Unlicense"
] | null | null | null | Day 19/Make_an_Etch_a_sketch_app.py | hamzaoda/100-Days-of-Code---The-Complete-Python-Pro-Bootcamp-for-2021 | 5340007d8405df2e29643b47d3ff9fa4f7af9e10 | [
"Unlicense"
] | null | null | null | Day 19/Make_an_Etch_a_sketch_app.py | hamzaoda/100-Days-of-Code---The-Complete-Python-Pro-Bootcamp-for-2021 | 5340007d8405df2e29643b47d3ff9fa4f7af9e10 | [
"Unlicense"
] | null | null | null | # import colorgram
from turtle import *
import random
import turtle as t
timy = t.Turtle()
t.listen()
t.onkey(key = "w", fun = moveForward)
t.onkey(key = "a", fun = turnLeft)
t.onkey(key = "d", fun = turnRight)
t.onkey(key = "s", fun = moveBackward)
t.onkey(key = "c", fun = timy.reset)
the_screen = Screen()
the_screen.exitonclick()
| 12.214286 | 38 | 0.619883 | # import colorgram
from turtle import *
import random
import turtle as t
timy = t.Turtle()
def moveForward():
timy.fd(20)
def moveBackward():
timy.back(20)
def turnRight():
timy.right(20)
def turnLeft():
timy.left(20)
t.listen()
t.onkey(key = "w", fun = moveForward)
t.onkey(key = "a", fun = turnLeft)
t.onkey(key = "d", fun = turnRight)
t.onkey(key = "s", fun = moveBackward)
t.onkey(key = "c", fun = timy.reset)
the_screen = Screen()
the_screen.exitonclick()
| 55 | 0 | 92 |
9ed0bfed7678b3e557578465045c55d329de8231 | 1,540 | py | Python | setup.py | toxinu/sofart | ec93cee5979ec02ea15955def837aed8f8804970 | [
"BSD-3-Clause"
] | 1 | 2018-03-02T17:49:44.000Z | 2018-03-02T17:49:44.000Z | setup.py | toxinu/sofart | ec93cee5979ec02ea15955def837aed8f8804970 | [
"BSD-3-Clause"
] | null | null | null | setup.py | toxinu/sofart | ec93cee5979ec02ea15955def837aed8f8804970 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.version < '3':
import codecs
else:
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
setup(
name=u('sofart'),
version=get_version(),
description=u('Python in-memory embedded and non-relationnal database'),
long_description=open('README.rst').read(),
license=open("LICENSE").read(),
author=u("toxinu"),
author_email=u("toxinu@gmail.com"),
packages = ['sofart', 'sofart.serializers'],
install_requires = ['isit'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
]
)
| 26.101695 | 79 | 0.655195 | #!/usr/bin/env python
# coding: utf-8
import os
import re
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def get_version():
VERSIONFILE = 'sofart/__init__.py'
initfile_lines = open(VERSIONFILE, 'rt').readlines()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
for line in initfile_lines:
mo = re.search(VSRE, line, re.M)
if mo:
return mo.group(1)
raise RuntimeError('Unable to find version string in %s.' % (VERSIONFILE,))
if sys.version < '3':
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
setup(
name=u('sofart'),
version=get_version(),
description=u('Python in-memory embedded and non-relationnal database'),
long_description=open('README.rst').read(),
license=open("LICENSE").read(),
author=u("toxinu"),
author_email=u("toxinu@gmail.com"),
packages = ['sofart', 'sofart.serializers'],
install_requires = ['isit'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
]
)
| 376 | 0 | 71 |
2c7d27c174995caee966f90d2d62e70461be4c5e | 12,794 | py | Python | src/sec4parser.py | zbirnba1/quantative-finance | 55c20fc4db99837fce6e90cf0f621cb40af1187b | [
"MIT"
] | 3 | 2018-10-11T19:40:56.000Z | 2019-02-21T23:44:25.000Z | src/sec4parser.py | droiter/quantative-finance | 55c20fc4db99837fce6e90cf0f621cb40af1187b | [
"MIT"
] | 1 | 2019-11-02T00:54:26.000Z | 2019-11-02T00:54:26.000Z | src/sec4parser.py | droiter/quantative-finance | 55c20fc4db99837fce6e90cf0f621cb40af1187b | [
"MIT"
] | 4 | 2019-03-06T23:28:14.000Z | 2021-03-27T15:05:46.000Z | import xml.etree.cElementTree as et
import urllib2
import pandas as pd
import mongomanager
import logging
import inspect
import requestswrapper
from joblib import Parallel, delayed
import multiprocessing
from random import shuffle
if __name__ == "__main__":
logging.basicConfig(filename=inspect.stack()[0][1].replace('py','log'),level=logging.INFO,format='%(asctime)s:%(levelname)s:%(message)s')
allfilings_2_form4()
update_data()
| 37.852071 | 154 | 0.766297 | import xml.etree.cElementTree as et
import urllib2
import pandas as pd
import mongomanager
import logging
import inspect
import requestswrapper
from joblib import Parallel, delayed
import multiprocessing
from random import shuffle
class sec4parser():
def __init__(self,url=None,xml_text=None):
#the url should be the sec link to the xbrl xsd or xml file
if url is not None:
self.connector=requestswrapper.RequestsWrapper()
resp=self.connector.issue_request(url)
if resp is None:
logging.error('bad requst url:'+url)
self.root=None
self.xml_text=None
else:
try:
self.root=et.fromstring(resp.text)
self.xml_text=resp.text
except:
logging.error('parsing error for url:'+url)
self.root=None
self.xml_text=None
elif xml_text is not None:
try:
self.root=et.fromstring(xml_text)
self.xml_text=xml_text
except:
logging.error('parsing error for text')
self.root=None
self.xml_text=None
else:
logging.error(str(url))
logging.error(str(xml_text))
exit()
return
def get_schema_version(self):
if self.root is None:
return None
return self.root.find('schemaVersion').text
def get_filing_date(self):
if self.root is None:
return None
periodOfReport=self.root.find('periodOfReport')
if periodOfReport is None:
return None
else:
return periodOfReport.text
def get_company_cik(self):
if self.root is None:
return None
issuerCiks=list(self.root.iter("issuerCik"))
if len(issuerCiks)==0:
return None
elif len(issuerCiks)>1:
logging.error('more than 1 name for owner')
return None
else:
return issuerCiks[0].text
def get_num_owners(self):
if self.root is None:
return None
return len(self.get_reporting_owners())
def get_reporting_owners(self):
if self.root is None:
return None
return list(self.root.iter('reportingOwner'))
def get_owner_relationship(self,reportingOwner,relationship_type="isDirector"):
if self.root is None:
return None
owner_relationships=list(reportingOwner.iter(relationship_type))
if len(owner_relationships)==0:
return None
elif len(owner_relationships)>1:
logging.error('more than 1 name for owner')
return None
else:
if owner_relationships[0].text.lower() in ['1','true']:
return True
elif owner_relationships[0].text.lower() in ['0','false']:
return False
return bool(int(owner_relationships[0].text))
def get_owner_relationships_info(self,reportingOwner):
info={}
info['director']=self.get_owner_relationship(reportingOwner,'isDirector')
info['officer']=self.get_owner_relationship(reportingOwner,'isOfficer')
info['ten_percent_owner']=self.get_owner_relationship(reportingOwner,'isTenPercentOwner')
info['other_relation']=self.get_owner_relationship(reportingOwner,'isOther')
info['officer_title']=self.get_owner_title(reportingOwner)
info['owner_cik']=self.get_owner_cik(reportingOwner)
info['owner_name']=self.get_owner_name(reportingOwner)
return info
def get_owner_title(self,reportingOwner):
if self.root is None:
return None
owner_relationships=list(reportingOwner.iter('officerTitle'))
if len(owner_relationships)==0:
return None
elif len(owner_relationships)>1:
logging.error('more than 1 name for owner')
return None
else:
return owner_relationships[0].text
def get_owner_name(self,reportingOwner):
if self.root is None:
return None
owner_names=list(reportingOwner.iter('rptOwnerName'))
if len(owner_names)==0:
return None
elif len(owner_names)>1:
logging.error('more than 1 name for owner')
return None
else:
return owner_names[0].text
def get_owner_cik(self,reportingOwner):
if self.root is None:
return None
owner_ciks=list(reportingOwner.iter('rptOwnerCik'))
if len(owner_ciks)==0:
return None
elif len(owner_ciks)>1:
logging.error('more than 1 cik for owner')
return None
else:
return owner_ciks[0].text
def get_num_nonderivativetransactions(self):
if self.root is None:
return None
return len(self.get_nonderivativetransactions())
def get_num_derivativetransactions(self):
if self.root is None:
return None
return len(self.get_derivativetransactions())
def get_nonderivativetransactions(self):
if self.root is None:
return None
return list(self.root.iter('nonDerivativeTransaction'))
def get_derivativetransactions(self):
if self.root is None:
return None
return list(self.root.iter('derivativeTransaction'))
def get_transaction_date(self,nonDerivativeTransaction):
if self.root is None:
return None
transactionDates=list(nonDerivativeTransaction.iter('transactionDate'))
if len(transactionDates)==0:
return None
elif len(transactionDates)>1:
logging.error('more than 1 date for transaction')
return None
else:
return transactionDates[0].find('value').text
def get_security_title(self,nonDerivativeTransaction):
if self.root is None:
return None
securityTitles=list(nonDerivativeTransaction.iter('securityTitle'))
if len(securityTitles)==0:
return None
elif len(securityTitles)>1:
logging.error('more than 1 date for transaction')
return None
else:
return securityTitles[0].find('value').text
def get_transaction_type_code(self,nonDerivativeTransaction):
if self.root is None:
return None
transactionCodes=list(nonDerivativeTransaction.iter('transactionCode'))
if len(transactionCodes)==0:
return None
elif len(transactionCodes)>1:
logging.error('more than 1 date for transaction')
return None
else:
return transactionCodes[0].text
def get_ammount_of_shares(self,nonDerivativeTransaction):
transactionSharess=list(nonDerivativeTransaction.iter('transactionShares'))
if len(transactionSharess)==0:
return None
elif len(transactionSharess)>1:
logging.error('more than 1 date for transaction')
return None
else:
return float(transactionSharess[0].find('value').text)
def get_acquisition_disposition_code(self,nonDerivativeTransaction):
transactionAcquiredDisposedCodes=list(nonDerivativeTransaction.iter('transactionAcquiredDisposedCode'))
if len(transactionAcquiredDisposedCodes)==0:
return None
elif len(transactionAcquiredDisposedCodes)>1:
logging.error('more than 1 date for transaction')
return None
else:
return transactionAcquiredDisposedCodes[0].find('value').text
def get_transaction_price(self,nonDerivativeTransaction):
transactionPricePerShares=list(nonDerivativeTransaction.iter('transactionPricePerShare'))
if len(transactionPricePerShares)==0:
return None
elif len(transactionPricePerShares)>1:
logging.error('more than 1 date for transaction')
return None
else:
if transactionPricePerShares[0].find('value') is None:
return None
else:
return float(transactionPricePerShares[0].find('value').text)
def get_total_shares_owned(self,nonDerivativeTransaction):
sharesOwnedFollowingTransactions=list(nonDerivativeTransaction.iter('sharesOwnedFollowingTransaction'))
if len(sharesOwnedFollowingTransactions)==0:
return None
elif len(sharesOwnedFollowingTransactions)>1:
logging.error('more than 1 date for transaction')
return None
else:
return float(sharesOwnedFollowingTransactions[0].find('value').text)
def get_ownership_type_code(self,nonDerivativeTransaction):
directOrIndirectOwnerships=list(nonDerivativeTransaction.iter('directOrIndirectOwnership'))
if len(directOrIndirectOwnerships)==0:
return None
elif len(directOrIndirectOwnerships)>1:
logging.error('more than 1 date for transaction')
return None
else:
return directOrIndirectOwnerships[0].find('value').text
def get_nonderivative_transaction_info(self,nonDerivativeTransaction):
info={}
info['security_title']=self.get_security_title(nonDerivativeTransaction)
info['transaction_date']=self.get_transaction_date(nonDerivativeTransaction)
info['transaction_type_code']=self.get_transaction_type_code(nonDerivativeTransaction)
info['ammount_of_shares']=self.get_ammount_of_shares(nonDerivativeTransaction)
info['acquisition_disposition_code']=self.get_acquisition_disposition_code(nonDerivativeTransaction)
info['transaction_price']=self.get_transaction_price(nonDerivativeTransaction)
info['total_shares_owned']=self.get_total_shares_owned(nonDerivativeTransaction)
info['ownership_type_code']=self.get_ownership_type_code(nonDerivativeTransaction)
info['deemed_execution_date']=self.get_deemed_execution_date(nonDerivativeTransaction)
return info
def get_expiration_date(self,Transaction):
expirationDates=list(nonDerivativeTransaction.iter('expirationDate'))
if len(expirationDates)==0:
return None
elif len(expirationDates)>1:
logging.error('more than 1 date for transaction')
return None
else:
return expirationDates[0].find('value').text
def get_deemed_execution_date(self,Transaction):
deemedExecutionDates=list(Transaction.iter('deemedExecutionDate'))
if len(deemedExecutionDates)==0:
return None
elif len(deemedExecutionDates)>1:
logging.error('more than 1 date for transaction')
return None
else:
if deemedExecutionDates[0].find('value') is None:
return None
else:
return deemedExecutionDates[0].find('value').text
def get_exercise_date(self,Transaction):
exerciseDates=list(nonDerivativeTransaction.iter('exerciseDate'))
if len(exerciseDates)==0:
return None
elif len(exerciseDates)>1:
logging.error('more than 1 date for transaction')
return None
else:
return exerciseDates[0].find('value').text
def get_owner_info_list(self):
if self.root is None:
return None
if self.get_reporting_owners() is None or self.get_num_owners()==0 or self.get_num_owners() is None:
return None
owner_df=[]
owners=self.get_reporting_owners()
for owner in owners:
owner_df.append(self.get_owner_relationships_info(owner))
return owner_df
def get_non_derivative_transactions_list(self):
if self.root is None:
return None
if self.get_num_nonderivativetransactions() is None or self.get_num_nonderivativetransactions()==0:
return None
trans_df=[]
transactions=self.get_nonderivativetransactions()
for trans in transactions:
trans_df.append(self.get_nonderivative_transaction_info(trans))
return trans_df
def allfilings_2_form4(collections,m):
def get_xml_for_filing(collections,m,totalitems,filing_id):
if m.db[collections['sec_form4_xmls']].find_one({"_id":filing_id}) is not None:
return
filing=m.db[collections['intrinio_filings']].find_one({'_id':filing_id},{'report_url':1})
report_url=filing['report_url']
url=report_url.split('/')
del url[-2]
url='/'.join(url)
s=sec4parser(url=url)
data={}
data['_id']=filing_id
data['xml_url']=url
data['xml_text']=s.xml_text
m.db[collections['sec_form4_xmls']].update({'_id':data['_id']},data,upsert=True)
logging.info('complete:'+str(float(m.db[collections['sec_form4_xmls']].count())/float(totalitems)))
return
processed_accno=[x['_id'] for x in list(m.db[collections['sec_form4_xmls']].find({},{'_id':1}))]
available_filings=[x['_id'] for x in list(m.db[collections['intrinio_filings']].find({'report_type':'4'},{"_id":1}))]
to_process_filings=list(set(available_filings)-set(processed_accno))
shuffle(to_process_filings)
totalitems=m.db[collections['intrinio_filings']].find({'report_type':'4'}).count()
for filing_id in to_process_filings:
get_xml_for_filing(collections,m,totalitems,filing_id)
def update_data(collections,m):
def find_and_update(collections,m,key,function):
m.create_index(collections['sec_form4_xmls'],key)
items=m.db[collections['sec_form4_xmls']].find({"$and":[{"xml_text":{"$ne":None}},{"xml_text":{"$exists":True}},{key:{"$exists":False}}]}).batch_size(1)
for item in items:
s=sec4parser(xml_text=item['xml_text'])
item[key]=getattr(s,function)()
logging.info("updating:"+item['_id']+' for:'+key+' with value:'+str(item[key]))
m.db[collections['sec_form4_xmls']].update({'_id':item['_id']},item,upsert=True)
return
find_and_update(collections,m,'filing_date',"get_filing_date")
find_and_update(collections,m,'num_owners',"get_num_owners")
find_and_update(collections,m,'company_cik',"get_company_cik")
find_and_update(collections,m,'num_derivativetransactions',"get_num_derivativetransactions")
find_and_update(collections,m,'num_nonderivativetransactions',"get_num_nonderivativetransactions")
find_and_update(collections,m,'non_derivative_transactions_list',"get_non_derivative_transactions_list")
find_and_update(collections,m,'owner_info_list','get_owner_info_list')
if __name__ == "__main__":
logging.basicConfig(filename=inspect.stack()[0][1].replace('py','log'),level=logging.INFO,format='%(asctime)s:%(levelname)s:%(message)s')
allfilings_2_form4()
update_data()
| 11,609 | -2 | 750 |
dd47fa495258810e2925cecbbb2c9da53d2e821f | 2,144 | py | Python | blueprints/html/__init__.py | mariussteffens/security-crawl-maze | 7bfa4e58344633016e2b5f2f30bd2dacea0a819b | [
"Apache-2.0"
] | 103 | 2019-05-25T00:44:52.000Z | 2022-03-30T17:21:28.000Z | blueprints/html/__init__.py | mariussteffens/security-crawl-maze | 7bfa4e58344633016e2b5f2f30bd2dacea0a819b | [
"Apache-2.0"
] | 3 | 2020-08-10T09:36:30.000Z | 2022-03-11T11:59:20.000Z | blueprints/html/__init__.py | mariussteffens/security-crawl-maze | 7bfa4e58344633016e2b5f2f30bd2dacea0a819b | [
"Apache-2.0"
] | 22 | 2019-06-27T11:25:16.000Z | 2022-03-18T16:24:11.000Z | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module serving all the traffic for html test cases."""
import os
from flask import abort
from flask import Blueprint
from flask import render_template
from flask import Response
from flask import send_from_directory
html_module = Blueprint("html_module", __name__, template_folder="templates")
# Global app.instance_path is not accessible from blueprints ¯\_(ツ)_/¯.
TEST_CASES_PATH = os.path.abspath(__file__ + "/../../../test-cases/html/")
@html_module.route("/misc/url/full-url/")
@html_module.route("/misc/url/path-relative-url/")
@html_module.route("/misc/url/protocol-relative-url/")
@html_module.route("/misc/string/url-string/")
@html_module.route("/", defaults={"path": ""})
@html_module.route("/<path:path>")
def html_dir(path):
"""Lists contents of requested directory."""
requested_path = os.path.join(TEST_CASES_PATH, path)
if not os.path.exists(requested_path):
return abort(404)
if os.path.isdir(requested_path):
files = os.listdir(requested_path)
return render_template("list-html-dir.html", files=files, path=path)
if os.path.isfile(requested_path):
return send_from_directory("test-cases/html", path)
| 34.031746 | 80 | 0.724813 | # Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module serving all the traffic for html test cases."""
import os
from flask import abort
from flask import Blueprint
from flask import render_template
from flask import Response
from flask import send_from_directory
html_module = Blueprint("html_module", __name__, template_folder="templates")
# Global app.instance_path is not accessible from blueprints ¯\_(ツ)_/¯.
TEST_CASES_PATH = os.path.abspath(__file__ + "/../../../test-cases/html/")
@html_module.route("/misc/url/full-url/")
def full_url():
return render_template("url/full-url.html")
@html_module.route("/misc/url/path-relative-url/")
def path_relative_url():
return render_template("url/path-relative-url.html")
@html_module.route("/misc/url/protocol-relative-url/")
def protocol_relative_url():
return render_template("url/protocol-relative-url.html")
@html_module.route("/misc/string/url-string/")
def inline_url_string():
return render_template("string/url-string.html")
@html_module.route("/", defaults={"path": ""})
@html_module.route("/<path:path>")
def html_dir(path):
"""Lists contents of requested directory."""
requested_path = os.path.join(TEST_CASES_PATH, path)
if not os.path.exists(requested_path):
return abort(404)
if os.path.isdir(requested_path):
files = os.listdir(requested_path)
return render_template("list-html-dir.html", files=files, path=path)
if os.path.isfile(requested_path):
return send_from_directory("test-cases/html", path)
| 218 | 0 | 88 |
db857f32f541baebe1a0ccf43e0f9bb8532b99e7 | 8,373 | py | Python | lib/tests/streamlit/dataframe_selector_test.py | ChangHoon-Sung/streamlit | 83e0b80d2fa13e29e83d092a9fc4d946460bbf73 | [
"Apache-2.0"
] | 1 | 2022-03-14T07:55:33.000Z | 2022-03-14T07:55:33.000Z | lib/tests/streamlit/dataframe_selector_test.py | ChangHoon-Sung/streamlit | 83e0b80d2fa13e29e83d092a9fc4d946460bbf73 | [
"Apache-2.0"
] | 35 | 2021-10-12T04:41:39.000Z | 2022-03-28T04:50:45.000Z | lib/tests/streamlit/dataframe_selector_test.py | AlexRogalskiy/streamlit | d153db37d97faada87bf88972886cda5a624f8c8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dataframe_selector unit test."""
import unittest
from unittest.mock import patch
import altair as alt
import pandas as pd
import streamlit
from streamlit.delta_generator import DeltaGenerator
from tests.testutil import patch_config_options
DATAFRAME = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
ALTAIR_CHART = alt.Chart(DATAFRAME).mark_bar().encode(x="a", y="b")
| 47.845714 | 88 | 0.745611 | # Copyright 2018-2022 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dataframe_selector unit test."""
import unittest
from unittest.mock import patch
import altair as alt
import pandas as pd
import streamlit
from streamlit.delta_generator import DeltaGenerator
from tests.testutil import patch_config_options
DATAFRAME = pd.DataFrame([["A", "B", "C", "D"], [28, 55, 43, 91]], index=["a", "b"]).T
ALTAIR_CHART = alt.Chart(DATAFRAME).mark_bar().encode(x="a", y="b")
class DataFrameSelectorTest(unittest.TestCase):
def test_arrow_is_default(self):
"""The 'arrow' config option is the default."""
self.assertEqual("arrow", streamlit.get_option("global.dataFrameSerialization"))
@patch.object(DeltaGenerator, "_legacy_dataframe")
@patch.object(DeltaGenerator, "_arrow_dataframe")
@patch_config_options({"global.dataFrameSerialization": "legacy"})
def test_legacy_dataframe(self, arrow_dataframe, legacy_dataframe):
streamlit.dataframe(DATAFRAME, 100, 200)
legacy_dataframe.assert_called_once_with(DATAFRAME, 100, 200)
arrow_dataframe.assert_not_called()
@patch.object(DeltaGenerator, "_legacy_dataframe")
@patch.object(DeltaGenerator, "_arrow_dataframe")
@patch_config_options({"global.dataFrameSerialization": "arrow"})
def test_arrow_dataframe(self, arrow_dataframe, legacy_dataframe):
streamlit.dataframe(DATAFRAME, 100, 200)
legacy_dataframe.assert_not_called()
arrow_dataframe.assert_called_once_with(DATAFRAME, 100, 200)
@patch.object(DeltaGenerator, "_legacy_table")
@patch.object(DeltaGenerator, "_arrow_table")
@patch_config_options({"global.dataFrameSerialization": "legacy"})
def test_legacy_table(self, arrow_table, legacy_table):
streamlit.table(DATAFRAME)
legacy_table.assert_called_once_with(DATAFRAME)
arrow_table.assert_not_called()
@patch.object(DeltaGenerator, "_legacy_table")
@patch.object(DeltaGenerator, "_arrow_table")
@patch_config_options({"global.dataFrameSerialization": "arrow"})
def test_arrow_table(self, arrow_table, legacy_table):
streamlit.table(DATAFRAME)
legacy_table.assert_not_called()
arrow_table.assert_called_once_with(DATAFRAME)
@patch.object(DeltaGenerator, "_legacy_line_chart")
@patch.object(DeltaGenerator, "_arrow_line_chart")
@patch_config_options({"global.dataFrameSerialization": "legacy"})
def test_legacy_line_chart(self, arrow_line_chart, legacy_line_chart):
streamlit.line_chart(DATAFRAME, 100, 200, True)
legacy_line_chart.assert_called_once_with(DATAFRAME, 100, 200, True)
arrow_line_chart.assert_not_called()
@patch.object(DeltaGenerator, "_legacy_line_chart")
@patch.object(DeltaGenerator, "_arrow_line_chart")
@patch_config_options({"global.dataFrameSerialization": "arrow"})
def test_arrow_line_chart(self, arrow_line_chart, legacy_line_chart):
streamlit.line_chart(DATAFRAME, 100, 200, True)
legacy_line_chart.assert_not_called()
arrow_line_chart.assert_called_once_with(DATAFRAME, 100, 200, True)
@patch.object(DeltaGenerator, "_legacy_area_chart")
@patch.object(DeltaGenerator, "_arrow_area_chart")
@patch_config_options({"global.dataFrameSerialization": "legacy"})
def test_legacy_area_chart(self, arrow_area_chart, legacy_area_chart):
streamlit.area_chart(DATAFRAME, 100, 200, True)
legacy_area_chart.assert_called_once_with(DATAFRAME, 100, 200, True)
arrow_area_chart.assert_not_called()
@patch.object(DeltaGenerator, "_legacy_area_chart")
@patch.object(DeltaGenerator, "_arrow_area_chart")
@patch_config_options({"global.dataFrameSerialization": "arrow"})
def test_arrow_area_chart(self, arrow_area_chart, legacy_area_chart):
streamlit.area_chart(DATAFRAME, 100, 200, True)
legacy_area_chart.assert_not_called()
arrow_area_chart.assert_called_once_with(DATAFRAME, 100, 200, True)
@patch.object(DeltaGenerator, "_legacy_bar_chart")
@patch.object(DeltaGenerator, "_arrow_bar_chart")
@patch_config_options({"global.dataFrameSerialization": "legacy"})
def test_legacy_bar_chart(self, arrow_bar_chart, legacy_bar_chart):
streamlit.bar_chart(DATAFRAME, 100, 200, True)
legacy_bar_chart.assert_called_once_with(DATAFRAME, 100, 200, True)
arrow_bar_chart.assert_not_called()
@patch.object(DeltaGenerator, "_legacy_bar_chart")
@patch.object(DeltaGenerator, "_arrow_bar_chart")
@patch_config_options({"global.dataFrameSerialization": "arrow"})
def test_arrow_bar_chart(self, arrow_bar_chart, legacy_bar_chart):
streamlit.bar_chart(DATAFRAME, 100, 200, True)
legacy_bar_chart.assert_not_called()
arrow_bar_chart.assert_called_once_with(DATAFRAME, 100, 200, True)
@patch.object(DeltaGenerator, "_legacy_altair_chart")
@patch.object(DeltaGenerator, "_arrow_altair_chart")
@patch_config_options({"global.dataFrameSerialization": "legacy"})
def test_legacy_altair_chart(self, arrow_altair_chart, legacy_altair_chart):
streamlit.altair_chart(ALTAIR_CHART, True)
legacy_altair_chart.assert_called_once_with(ALTAIR_CHART, True)
arrow_altair_chart.assert_not_called()
@patch.object(DeltaGenerator, "_legacy_altair_chart")
@patch.object(DeltaGenerator, "_arrow_altair_chart")
@patch_config_options({"global.dataFrameSerialization": "arrow"})
def test_arrow_altair_chart(self, arrow_altair_chart, legacy_altair_chart):
streamlit.altair_chart(ALTAIR_CHART, True)
legacy_altair_chart.assert_not_called()
arrow_altair_chart.assert_called_once_with(ALTAIR_CHART, True)
@patch.object(DeltaGenerator, "_legacy_vega_lite_chart")
@patch.object(DeltaGenerator, "_arrow_vega_lite_chart")
@patch_config_options({"global.dataFrameSerialization": "legacy"})
def test_legacy_vega_lite_chart(
self, arrow_vega_lite_chart, legacy_vega_lite_chart
):
streamlit.vega_lite_chart(
DATAFRAME, None, True, x="foo", boink_boop=100, baz={"boz": "booz"}
)
legacy_vega_lite_chart.assert_called_once_with(
DATAFRAME, None, True, x="foo", boink_boop=100, baz={"boz": "booz"}
)
arrow_vega_lite_chart.assert_not_called()
@patch.object(DeltaGenerator, "_legacy_vega_lite_chart")
@patch.object(DeltaGenerator, "_arrow_vega_lite_chart")
@patch_config_options({"global.dataFrameSerialization": "arrow"})
def test_arrow_vega_lite_chart(self, arrow_vega_lite_chart, legacy_vega_lite_chart):
streamlit.vega_lite_chart(
DATAFRAME, None, True, x="foo", boink_boop=100, baz={"boz": "booz"}
)
legacy_vega_lite_chart.assert_not_called()
arrow_vega_lite_chart.assert_called_once_with(
DATAFRAME, None, True, x="foo", boink_boop=100, baz={"boz": "booz"}
)
@patch.object(DeltaGenerator, "_legacy_add_rows")
@patch.object(DeltaGenerator, "_arrow_add_rows")
@patch_config_options({"global.dataFrameSerialization": "legacy"})
def test_legacy_add_rows(self, arrow_add_rows, legacy_add_rows):
elt = streamlit.dataframe(DATAFRAME)
elt.add_rows(DATAFRAME, foo=DATAFRAME)
legacy_add_rows.assert_called_once_with(DATAFRAME, foo=DATAFRAME)
arrow_add_rows.assert_not_called()
@patch.object(DeltaGenerator, "_legacy_add_rows")
@patch.object(DeltaGenerator, "_arrow_add_rows")
@patch_config_options({"global.dataFrameSerialization": "arrow"})
def test_arrow_add_rows(self, arrow_add_rows, legacy_add_rows):
elt = streamlit.dataframe(DATAFRAME)
elt.add_rows(DATAFRAME, foo=DATAFRAME)
legacy_add_rows.assert_not_called()
arrow_add_rows.assert_called_once_with(DATAFRAME, foo=DATAFRAME)
| 3,828 | 3,536 | 23 |
cb100ee7bf1499f3a116b719697003e206b1839a | 4,204 | py | Python | OnlineParticipationDataset/spiders/Wuppertal2017Spider.py | Liebeck/OnlineParticipationDatasets | 27e82cb19b00af8fd912327fc795c19dfc63a72a | [
"MIT"
] | null | null | null | OnlineParticipationDataset/spiders/Wuppertal2017Spider.py | Liebeck/OnlineParticipationDatasets | 27e82cb19b00af8fd912327fc795c19dfc63a72a | [
"MIT"
] | null | null | null | OnlineParticipationDataset/spiders/Wuppertal2017Spider.py | Liebeck/OnlineParticipationDatasets | 27e82cb19b00af8fd912327fc795c19dfc63a72a | [
"MIT"
] | 3 | 2018-05-10T14:04:51.000Z | 2018-06-02T13:40:39.000Z | import locale
from datetime import datetime
from typing import Generator, List, Any, Optional
import scrapy
from scrapy.http import HtmlResponse
| 51.901235 | 210 | 0.66627 | import locale
from datetime import datetime
from typing import Generator, List, Any, Optional
import scrapy
from scrapy.http import HtmlResponse
class Wuppertal2017Spider(scrapy.Spider):
name = "wuppertal2017"
start_urls = ["https://buergerbudget.wuppertal.de/cb/t711bwqTXj3GSGiEVwa3li3YZDqvq4pL?type=phase1&ajax_call=true&sort_order=order_by_multi_vote&search=&topics_to_show=500&filter_phases=197&_=1527933477600",
"https://buergerbudget.wuppertal.de/cb/t711bwqTXj3GSGiEVwa3li3YZDqvq4pL?type=phase1&ajax_call=true&sort_order=order_by_multi_vote&search=&topics_to_show=500&filter_phases=198&_=1527933477599"]
def __init__(self, *args, **kwargs):
super(Wuppertal2017Spider, self).__init__(*args, **kwargs)
locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')
def parse(self, response: HtmlResponse) -> Generator:
for suggestion_url in response.css(".topic-title > a:last-of-type::attr('href')").extract():
yield response.follow(suggestion_url, Wuppertal2017Spider.parse_suggestion)
@staticmethod
def parse_suggestion(suggestion: HtmlResponse) -> dict:
suggestion_item = dict()
suggestion_item['suggestion_id'] = suggestion.url.split("/")[-1].split("?")[0]
suggestion_item['title'] = suggestion.css("h2::text").extract_first()
suggestion_item['date_time'] = datetime.strptime(suggestion.css(".fa-calendar")[0].root.tail.strip(), "%Y-%m-%d")
suggestion_item['tags'] = [element_text.strip() for element_text in suggestion.xpath("//*[@class='fa fa-sticky-note-o']/../..//strong/text()").extract()]
suggestion_item['author'] = suggestion.css(".fa-user")[0].root.tail.strip()
suggestion_item['approval_phase_1'] = int(suggestion.css(".fa-thumbs-up")[0].root.tail.strip().split(" ")[0])
suggestion_item['approval_phase_3'] = int(suggestion.css(".fa-check-square-o")[0].root.tail.strip().split(" ")[0])
suggestion_item.update(Wuppertal2017Spider.get_status(suggestion.css(".checkpoints-title ~ div::attr('class')").extract()))
suggestion_item['content'] = "".join([text.strip() for text in suggestion.css(".description *::text").extract()])
suggestion_item['comment_count'] = int(suggestion.css(".count-comments::text").extract_first().split(" ")[0])
suggestion_item['costs'] = Wuppertal2017Spider.get_costs(suggestion)
suggestion_item.update(Wuppertal2017Spider.get_subsections(suggestion.css(".param-text-area-description::text").extract()))
return suggestion_item
@staticmethod
def get_costs(suggestion: HtmlResponse) -> Optional[int]:
try:
return int("".join(c for c in suggestion.css(".fa-eur")[0].root.tail.strip() if c.isdigit()))
except IndexError:
return None
@staticmethod
def get_subsections(texts: [str]) -> dict:
subsections = [
"Voraussichtliche Rolle für die Stadt Wuppertal",
"Geschätzte Umsetzungsdauer und Startschuss",
"Mehrwert der Idee für Wuppertal",
"Eigene Rolle bei der Projektidee",
"Kostenschätzung der Ideeneinreicher/in",
]
return dict(zip(subsections, texts))
@staticmethod
def get_status(css_classes: [str]) -> dict:
status_keys = [
"Kriteriencheck bestanden",
"Teil der TOP 100",
"Gemeinwohl-Check bestanden: Teil der TOP 30",
"Detailprüfung durch Verwaltung bestanden: Zur finalen Abstimmung freigegeben",
"Bei der finalen Abstimmung gewonnen",
"Umsetzung gestartet",
"Umgesetzt!"
]
status_values = [("not-checked-yet" not in css_classes[i]) and ("did-not-passed" not in css_classes[i])
for i in range(len(status_keys))]
phase = Wuppertal2017Spider.get_last([i for i, passed in enumerate(status_values) if passed], -1) + 1
status = dict(zip(status_keys, status_values))
status["status"] = phase
return status
@staticmethod
def get_last(lst: List, default: Any) -> Any:
try:
return lst[-1]
except IndexError:
return default
| 3,291 | 748 | 23 |
ed328ad0584f94997a4717820240f934deddca73 | 1,272 | py | Python | empower/cli/projects_commands/list_projects.py | 5g-empower/empower-runtime | 7a71f692f8a0814093d35de5ef0c79d348aa4c2d | [
"Apache-2.0"
] | 52 | 2016-04-18T09:40:29.000Z | 2021-12-14T19:32:21.000Z | empower/cli/projects_commands/list_projects.py | 5g-empower/empower-runtime | 7a71f692f8a0814093d35de5ef0c79d348aa4c2d | [
"Apache-2.0"
] | 36 | 2016-07-04T14:10:58.000Z | 2021-08-13T01:10:32.000Z | empower/cli/projects_commands/list_projects.py | 5g-empower/empower-runtime | 7a71f692f8a0814093d35de5ef0c79d348aa4c2d | [
"Apache-2.0"
] | 51 | 2016-04-20T14:21:32.000Z | 2022-03-18T14:43:56.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""List projects."""
import empower_core.command as command
def do_cmd(gargs, *_):
"""List projects. """
_, data = command.connect(gargs, ('GET', '/api/v1/projects'), 200)
for entry in data.values():
accum = []
accum.append("project_id ")
accum.append(entry['project_id'])
accum.append(" desc \"%s\"" % entry['desc'])
if 'wifi_props' in entry and entry['wifi_props']:
accum.append(" ssid \"%s\"" % entry['wifi_props']['ssid'])
if 'lte_props' in entry and entry['lte_props']:
accum.append(" plmnid \"%s\"" % entry['lte_props']['plmnid'])
print(''.join(accum))
| 28.266667 | 73 | 0.653302 | #!/usr/bin/env python3
#
# Copyright (c) 2019 Roberto Riggio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""List projects."""
import empower_core.command as command
def do_cmd(gargs, *_):
"""List projects. """
_, data = command.connect(gargs, ('GET', '/api/v1/projects'), 200)
for entry in data.values():
accum = []
accum.append("project_id ")
accum.append(entry['project_id'])
accum.append(" desc \"%s\"" % entry['desc'])
if 'wifi_props' in entry and entry['wifi_props']:
accum.append(" ssid \"%s\"" % entry['wifi_props']['ssid'])
if 'lte_props' in entry and entry['lte_props']:
accum.append(" plmnid \"%s\"" % entry['lte_props']['plmnid'])
print(''.join(accum))
| 0 | 0 | 0 |
fb99ead52bbf4f6574a7f21012730db1aec98efe | 597 | py | Python | ana/debug_buffer.py | hanswenzel/opticks | b75b5929b6cf36a5eedeffb3031af2920f75f9f0 | [
"Apache-2.0"
] | 11 | 2020-07-05T02:39:32.000Z | 2022-03-20T18:52:44.000Z | ana/debug_buffer.py | hanswenzel/opticks | b75b5929b6cf36a5eedeffb3031af2920f75f9f0 | [
"Apache-2.0"
] | null | null | null | ana/debug_buffer.py | hanswenzel/opticks | b75b5929b6cf36a5eedeffb3031af2920f75f9f0 | [
"Apache-2.0"
] | 4 | 2020-09-03T20:36:32.000Z | 2022-01-19T07:42:21.000Z | #!/usr/bin/env python
"""
::
run ~/opticks/ana/debug_buffer.py
"""
import os, numpy as np
np.set_printoptions(suppress=True)
os.environ.setdefault("OPTICKS_EVENT_BASE",os.path.expandvars("/tmp/$USER/opticks"))
path = os.path.expandvars("$OPTICKS_EVENT_BASE/G4OKTest/evt/g4live/natural/1/dg.npy")
dg = np.load(path)
sensorIndex = dg[:,0,3].view(np.uint32)
#tid = dg[:,0,3].view(np.uint32)
sel = sensorIndex > 0
#sel = tid > 0x5000000 # for DYB this means landing (but not necessarily "hitting") a volume of the instanced PMT assembly
dgi = sensorIndex[sel]
dgs = dg[sel]
| 21.321429 | 127 | 0.695142 | #!/usr/bin/env python
"""
::
run ~/opticks/ana/debug_buffer.py
"""
import os, numpy as np
np.set_printoptions(suppress=True)
os.environ.setdefault("OPTICKS_EVENT_BASE",os.path.expandvars("/tmp/$USER/opticks"))
path = os.path.expandvars("$OPTICKS_EVENT_BASE/G4OKTest/evt/g4live/natural/1/dg.npy")
dg = np.load(path)
sensorIndex = dg[:,0,3].view(np.uint32)
#tid = dg[:,0,3].view(np.uint32)
sel = sensorIndex > 0
#sel = tid > 0x5000000 # for DYB this means landing (but not necessarily "hitting") a volume of the instanced PMT assembly
dgi = sensorIndex[sel]
dgs = dg[sel]
| 0 | 0 | 0 |
894a4e16e11d105fadd523bfe361f601fd3318a5 | 71,917 | py | Python | Tank1990AIf/Test1_main.py | cheapmouse94/Machine-Learning-tank1990-python | 8b75983289c7bc0831827561cec12d4ad2addee2 | [
"MIT"
] | null | null | null | Tank1990AIf/Test1_main.py | cheapmouse94/Machine-Learning-tank1990-python | 8b75983289c7bc0831827561cec12d4ad2addee2 | [
"MIT"
] | null | null | null | Tank1990AIf/Test1_main.py | cheapmouse94/Machine-Learning-tank1990-python | 8b75983289c7bc0831827561cec12d4ad2addee2 | [
"MIT"
] | null | null | null | import threading
import pygame
import time
import sys
import os
from pygame.locals import *
import numpy as np
from collections import deque
import torch
from torch.autograd import Variable
from Tank_AI import Linear_QNet, QTrainer
import random
FPS = 1000
SQM = 64
EAGLE_Y = []
EAGLE_G = []
BULLETS_Y_objects = []
BULLETS_Y_RECT = []
BULLETS_G_objects = []
BULLETS_G_RECT = []
BACKGROUND_RECT = []
GRASS_RECT = []
WATER_RECT = []
BRICK_RECT = []
BRICK_RECT_MANY = []
BRICK_RECT_MINI = []
SOLID_RECT = []
MAPPING = [
'HHHHHHHHHHHHHHHHH',
'HHHHHHHHHHHHHHHHH',
'HHHHSGOOOBOOSGOHH',
'HHHHGBOWBGBOOBGHH',
'HHHHOG1BGSGB2GOHH',
'HHHHGBOOBGBWOBGHH',
'HHHHOGSOOBOOOGSHH',
'HHHHHHHHHHHHHHHHH',
'HHHHHHHHHHHHHHHHH'
]
TANK_YELLOW_IMG = [pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_up.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_down.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_left.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_right.png'))), (52,52))]
TANK_GREEN_IMG = [pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_up.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_down.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_left.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_right.png'))), (52,52))]
BULLET_IMG = [pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_u.png'))), (16,22)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_d.png'))), (16,22)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_l.png'))), (22,16)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_r.png'))), (22,16))]
WATER_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_water_1.png'))), (64,64))
WATER_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_water_2.png'))), (64,64))
BRICK_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_brick.png'))), (64,64))
BRICK_IMG_MINI = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_brick_mini.png'))), (32,32))
GRASS_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_grass.png'))), (64,64))
SOLIDWALL_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_solid_wall.png'))), (64,64))
EAGLE_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_eagle_1.png'))), (64,64))
EAGLE_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_eagle_2.png'))), (64,64))
EXPLOSION_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_1.png'))), (64,64))
EXPLOSION_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_2.png'))), (64,64))
EXPLOSION_3_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_3.png'))), (64,64))
EXPLOSION_GREAT_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_great_1.png'))), (128,128))
EXPLOSION_GREAT_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_great_2.png'))), (128,128))
INVICIBLE_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'invicible_1.png'))), (52,52))
INVICIBLE_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'invicible_2.png'))), (52,52))
BACKGROUND_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'background.png'))), (64,64))
MAX_MEMORY = 100_000_000
BATCH_SIZE = 1000
LR = 0.0001
if __name__ == '__main__':
main = Main()
main.runtime() | 43.825107 | 212 | 0.584549 | import threading
import pygame
import time
import sys
import os
from pygame.locals import *
import numpy as np
from collections import deque
import torch
from torch.autograd import Variable
from Tank_AI import Linear_QNet, QTrainer
import random
FPS = 1000
SQM = 64
EAGLE_Y = []
EAGLE_G = []
BULLETS_Y_objects = []
BULLETS_Y_RECT = []
BULLETS_G_objects = []
BULLETS_G_RECT = []
BACKGROUND_RECT = []
GRASS_RECT = []
WATER_RECT = []
BRICK_RECT = []
BRICK_RECT_MANY = []
BRICK_RECT_MINI = []
SOLID_RECT = []
MAPPING = [
'HHHHHHHHHHHHHHHHH',
'HHHHHHHHHHHHHHHHH',
'HHHHSGOOOBOOSGOHH',
'HHHHGBOWBGBOOBGHH',
'HHHHOG1BGSGB2GOHH',
'HHHHGBOOBGBWOBGHH',
'HHHHOGSOOBOOOGSHH',
'HHHHHHHHHHHHHHHHH',
'HHHHHHHHHHHHHHHHH'
]
TANK_YELLOW_IMG = [pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_up.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_down.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_left.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'yellow_tank_right.png'))), (52,52))]
TANK_GREEN_IMG = [pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_up.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_down.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_left.png'))), (52,52)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'green_tank_right.png'))), (52,52))]
BULLET_IMG = [pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_u.png'))), (16,22)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_d.png'))), (16,22)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_l.png'))), (22,16)),
pygame.transform.scale((pygame.image.load(os.path.join('textures', 'bullet_r.png'))), (22,16))]
WATER_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_water_1.png'))), (64,64))
WATER_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_water_2.png'))), (64,64))
BRICK_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_brick.png'))), (64,64))
BRICK_IMG_MINI = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_brick_mini.png'))), (32,32))
GRASS_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_grass.png'))), (64,64))
SOLIDWALL_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'prop_solid_wall.png'))), (64,64))
EAGLE_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_eagle_1.png'))), (64,64))
EAGLE_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_eagle_2.png'))), (64,64))
EXPLOSION_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_1.png'))), (64,64))
EXPLOSION_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_2.png'))), (64,64))
EXPLOSION_3_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_3.png'))), (64,64))
EXPLOSION_GREAT_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_great_1.png'))), (128,128))
EXPLOSION_GREAT_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'entity_explosion_great_2.png'))), (128,128))
INVICIBLE_1_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'invicible_1.png'))), (52,52))
INVICIBLE_2_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'invicible_2.png'))), (52,52))
BACKGROUND_IMG = pygame.transform.scale((pygame.image.load(os.path.join('textures', 'background.png'))), (64,64))
MAX_MEMORY = 100_000_000
BATCH_SIZE = 1000
LR = 0.0001
class AI_YELLOW:
def __init__(self):
self.state = []
self.gamma = 0.5
self.score = 0
self.memory = deque(maxlen=MAX_MEMORY)
self.model = Linear_QNet(24, 256, 64, 5)
self.trainer = QTrainer(self.model, lr=LR, gamma=self.gamma)
def get_state(self, a, b, c, d, e, f, g, h, i, j):
self.state = []
self.state_n = [a, b, c, d, e, f, g, h, i, j]
for n in self.state_n:
for mn in n:
self.get_state_loop(mn)
return self.state
def get_state_loop(self, m):
self.state.append(m)
def get_action(self, state, frame):
final_move = [0,0,0,0,0]
if frame > 500:
state0 = torch.tensor(state, dtype=float)
state0 = state0.double()
prediction = self.model(state0.float())
move = torch.argmax(prediction).item()
move_0 = torch.softmax(prediction, dim=-1).detach().numpy()
x = random.choices([0,1,2,3,4],move_0)
final_move[move] = 1
else:
rand = random.randint(0,4)
final_move[rand] = 1
return final_move
def print_state(self, state, frame, score):
if frame % 100 == 0:
print(f'---ŻÓŁTY------klata nr. {frame}--------wynik sumaryczny {score}---------')
print(len(state))
print(f'Pozycja Zółtego czołgu względem Zielonego czołgu {state[0:4]}')
#print(f'Pozycja Zółtego czołgu względem własnego orła {state[4:8]}')
#print(f'Pozycja Zółtego czołgu względem obcego orła {state[8:12]}')
print(f'Zwrot swojego czołgu {state[4:8]}')
print(f'Obecność swojego pocisku {state[8]}')
print(f'Obecność przeciwnika pocisku {state[9]}')
print(f'Kierunek swojego pocisku {state[10:14]}')
print(f'Kierunek przeciwnika pocisku {state[14:18]}')
print(f'Zwrot czołgu do obiektów 1.Tło - {state[18]} 2.Ściana - {state[19]} 3.Orzeł własny - ??? 4.Orzeł przeciwnika - ??? 5.Przeciwnik - {state[20]}')
print(f'Czy Żółty czołg utkną? {state[21]}')
print(f'Czy zielony czołg otrzymał obrażenia? {state[22]}')
print(f'Czy żółty czołg otrzymał obrażenia? {state[23]}')
#print(f'Czy orzeł zółtego otrzymał obrażenia przez żółtego? {state[23]}')
#print(f'Czy orzeł zielonego otrzymał obrażenia przez żółtego? {state[24]}')
print('------------------------------------------------------------')
def train_short_memory(self, satte_old, action, reward, nest_state, done):
self.trainer.train_step(satte_old, action, reward, nest_state, done)
def remember(self, satte_old, action, reward, nest_state, done):
self.memory.append((satte_old, action, reward, nest_state, done))
def final_score(self, reward):
self.score += reward
return "{0:0.2f}".format(self.score)
class AI_GREEN:
def __init__(self):
self.state = []
self.gamma = 0.5
self.score = 0
self.memory = deque(maxlen=MAX_MEMORY)
self.model = Linear_QNet(24, 256, 64, 5)
self.trainer = QTrainer(self.model, lr=LR, gamma=self.gamma)
def get_state(self, a, b, c, d, e, f, g, h, i, j):
self.state = []
self.state_n = [a, b, c, d, e, f, g, h, i, j]
for n in self.state_n:
for mn in n:
self.get_state_loop(mn)
return self.state
def get_state_loop(self, m):
self.state.append(m)
def get_action(self, state, frame):
final_move = [0,0,0,0,0]
if frame > 500:
state0 = torch.tensor(state, dtype=float)
state0 = state0.double()
prediction = self.model(state0.float())
move = torch.argmax(prediction).item()
move_0 = torch.softmax(prediction, dim=-1).detach().numpy()
x = random.choices([0,1,2,3,4],move_0)
final_move[move] = 1
else:
rand = random.randint(0,4)
final_move[rand] = 1
return final_move
def print_state(self, state, frame, score):
if frame % 100 == 0:
print(f'---ZIELONY------klata nr. {frame}--------wynik sumaryczny {score}---------')
print(len(state))
print(f'Pozycja Zielonego czołgu względem Zółtego czołgu {state[0:4]}')
#print(f'Pozycja Zielonego czołgu względem własnego orła {state[4:8]}')
#print(f'Pozycja Zielonego czołgu względem obcego orła {state[8:12]}')
print(f'Zwrot swojego czołgu {state[4:8]}')
print(f'Obecność swojego pocisku {state[8]}')
print(f'Obecność przeciwnika pocisku {state[9]}')
print(f'Kierunek swojego pocisku {state[10:14]}')
print(f'Kierunek przeciwnika pocisku {state[14:18]}')
print(f'Zwrot czołgu do obiektów 1.Tło - {state[18]} 2.Ściana - {state[19]} 3.Orzeł własny - ??? 4.Orzeł przeciwnika - ??? 5.Przeciwnik - {state[20]}')
print(f'Czy Zielony czołg utkną? {state[21]}')
print(f'Czy Zółty czołg otrzymał obrażenia? {state[22]}')
print(f'Czy Zielony czołg otrzymał obrażenia? {state[23]}')
#print(f'Czy orzeł zielonego otrzymał obrażenia przez zielonego? {state[32]}')
#print(f'Czy orzeł żółtego otrzymał obrażenia przez zielonego? {state[33]}')
print('------------------------------------------------------------')
def train_short_memory(self, satte_old, action, reward, nest_state, done):
self.trainer.train_step(satte_old, action, reward, nest_state, done)
def remember(self, satte_old, action, reward, nest_state, done):
self.memory.append((satte_old, action, reward, nest_state, done))
def final_score(self, reward):
self.score += reward
return "{0:0.2f}".format(self.score)
class On_Hit_By_Yellow:
def __init__(self, dir):
self.dir = dir
self.x_exp = 0
self.y_exp = 0
self.frame_l = 0
self.frame_h = 0
self.break_bullet_one_time_flag = True
self.allow_explosion_little = False
self.allow_explosion_hard = False
def brick_on_hit(self, i, e):
BRICK_RECT_TEMP = []
for b in BRICK_RECT_MINI:
if e.colliderect(b):
BRICK_RECT_TEMP.append(b)
if len(BRICK_RECT_TEMP) >= 1:
for x in BRICK_RECT_TEMP:
BRICK_RECT_MINI.remove(x)
self.explosion_find_location()
self.allow_explosion_hard = True
return True
return False
def solid_on_hit(self, i, e):
for b in SOLID_RECT:
if e.colliderect(b):
self.explosion_find_location()
self.allow_explosion_little = True
return True
return False
def background_on_hit(self, i, e):
for b in BACKGROUND_RECT:
if e.colliderect(b):
self.explosion_find_location()
self.allow_explosion_little = True
return True
return False
def green_tank_on_hit(self, i, e, TG_MASK, TG_CLASS, TG_DEST, TG_INVI):
if e.colliderect(TG_MASK) and TG_INVI is False:
print('Green Tank took damage')
self.does_enemy_tank_got_hit = True
TG_CLASS.__init__()
return True
return False
def eagle_greens_tank_on_hit(self, i, e, TG_CLASS, TY_CLASS, MAPPING):
for b in EAGLE_G:
if e.colliderect(b):
TG_CLASS.__init__()
TY_CLASS.__init__()
print('Green\'s eagle gas been destroyed')
self.does_enemy_eagle_got_hit = True
return True
return False
def eagle_yellows_tank_on_hit(self, i, e, TG_CLASS, TY_CLASS, MAPPING):
for b in EAGLE_Y:
if e.colliderect(b):
TG_CLASS.__init__()
TY_CLASS.__init__()
print('Yellow\'s eagle gas been destroyed')
self.does_ally_eagle_fot_hit = True
return True
return False
def enemys_bullet_on_hit(self, i, e):
for b in BULLETS_G_RECT:
if e.colliderect(b):
if len(BULLETS_G_RECT) >= 1:
BULLETS_G_objects.pop(i)
BULLETS_G_RECT.pop(i)
return True
return False
def break_bullet(self, i):
if self.break_bullet_one_time_flag:
BULLETS_Y_objects.pop(i)
BULLETS_Y_RECT.pop(i)
self.break_bullet_one_time_flag = False
def explosion_find_location(self):
for k in BULLETS_Y_RECT:
if self.dir == 'right':
self.x_exp = k.x
self.y_exp = k.y - 26
if self.dir == 'left':
self.x_exp = k.x
self.y_exp = k.y - 26
if self.dir == 'up':
self.x_exp = k.x - 26
self.y_exp = k.y
if self.dir == 'down':
self.x_exp = k.x - 26
self.y_exp = k.y
def draw_explosion_little(self, screen, elf):
if self.allow_explosion_little and elf:
if self.frame_l == 0:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 1:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 2:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l >= 2:
self.allow_explosion_little = False
elf = False
self.frame_l += 0
else:
self.frame_l += 1
def draw_explosion_hard(self, screen, ehf):
if self.allow_explosion_hard and ehf:
if self.frame_h <= 1:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_h >= 2 and self.frame_h < 4:
screen.blit(EXPLOSION_3_IMG,(self.x_exp, self.y_exp))
if self.frame_h >= 4:
ehf = False
self.allow_explosion_hard = False
self.frame_h = 0
else:
self.frame_h += 1
class On_Hit_By_Green:
def __init__(self, dir):
self.dir = dir
self.x_exp = 0
self.y_exp = 0
self.frame_l = 0
self.frame_h = 0
self.break_bullet_one_time_flag = True
self.allow_explosion_little = False
self.allow_explosion_hard = False
def brick_on_hit(self, i, e):
BRICK_RECT_TEMP = []
for b in BRICK_RECT_MINI:
if e.colliderect(b):
BRICK_RECT_TEMP.append(b)
if len(BRICK_RECT_TEMP) >= 1:
for x in BRICK_RECT_TEMP:
BRICK_RECT_MINI.remove(x)
self.explosion_find_location()
self.allow_explosion_hard = True
return True
return False
def solid_on_hit(self, i, e):
for b in SOLID_RECT:
if e.colliderect(b):
self.explosion_find_location()
self.allow_explosion_little = True
return True
return False
def background_on_hit(self, i, e):
for b in BACKGROUND_RECT:
if e.colliderect(b):
self.explosion_find_location()
self.allow_explosion_little = True
return True
return False
def yellow_tank_on_hit(self, i, e, TY_MASK, TG_CLASS, TY_DEST, TY_INVI):
if e.colliderect(TY_MASK) and TY_INVI is False:
TY_DEST = True
TG_CLASS.__init__()
print('Yellow Tank took damage')
self.does_enemy_tank_got_hit = True
return True
return False
def eagle_greens_tank_on_hit(self, i, e, TG_CLASS, TY_CLASS, MAPPING):
for b in EAGLE_G:
if e.colliderect(b):
TG_CLASS.__init__()
TY_CLASS.__init__()
print('Green\'s eagle has been destroyed')
self.does_ally_eagle_got_hit = True
return True
return False
def eagle_yellows_tank_on_hit(self, i, e, TG_CLASS, TY_CLASS, MAPPING):
for b in EAGLE_Y:
if e.colliderect(b):
TG_CLASS.__init__()
TY_CLASS.__init__()
print('Yellow\'s eagle has been destroyed')
self.does_enemy_eagle_got_hit = True
return True
return False
def enemys_bullet_on_hit(self, i, e):
for b in BULLETS_Y_RECT:
if e.colliderect(b):
if len(BULLETS_Y_RECT) >= 1:
BULLETS_Y_objects.pop(i)
BULLETS_Y_RECT.pop(i)
return True
return False
def break_bullet(self, i):
if self.break_bullet_one_time_flag:
BULLETS_G_objects.pop(i)
BULLETS_G_RECT.pop(i)
self.break_bullet_one_time_flag = False
def explosion_find_location(self):
for k in BULLETS_G_RECT:
if self.dir == 'right':
self.x_exp = k.x
self.y_exp = k.y - 26
if self.dir == 'left':
self.x_exp = k.x
self.y_exp = k.y - 26
if self.dir == 'up':
self.x_exp = k.x - 26
self.y_exp = k.y
if self.dir == 'down':
self.x_exp = k.x - 26
self.y_exp = k.y
def draw_explosion_little(self, screen, elf):
if self.allow_explosion_little and elf:
if self.frame_l == 0:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 1:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_l == 2:
screen.blit(EXPLOSION_1_IMG,(self.x_exp, self.y_exp))
if self.frame_l >= 2:
self.allow_explosion_little = False
elf = False
self.frame_l += 0
else:
self.frame_l += 1
def draw_explosion_hard(self, screen, ehf):
if self.allow_explosion_hard and ehf:
if self.frame_h == 0:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_h == 1:
screen.blit(EXPLOSION_3_IMG,(self.x_exp, self.y_exp))
if self.frame_h == 2:
screen.blit(EXPLOSION_2_IMG,(self.x_exp, self.y_exp))
if self.frame_h >= 2:
ehf = False
self.allow_explosion_hard = False
self.frame_h = 0
else:
self.frame_h += 1
class Mapping:
def __init__(self):
self.x = 0
self.y = 0
self.frames = 0
self.convert_entities()
def convert_entities(self):
for row in MAPPING:
for col in row:
if col == 'H':
BACKGROUND_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == 'G':
GRASS_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == 'W':
WATER_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == 'B':
#BRICK_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
#BRICK_RECT_MANY.append(BRICK_IMG)
#self.convert_entities_mini()
pass
elif col == 'S':
SOLID_RECT.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == '3':
EAGLE_Y.append(pygame.Rect((self.x,self.y,SQM,SQM)))
elif col == '4':
EAGLE_G.append(pygame.Rect((self.x,self.y,SQM,SQM)))
self.x+=SQM
self.y+=SQM
self.x=0
def convert_entities_mini(self):
self.x_mini = self.x
self.y_mini = self.y
for i in range(2):
for j in range(2):
BRICK_RECT_MINI.append(pygame.Rect((self.x_mini,self.y_mini,SQM/2,SQM/2)))
self.x_mini += SQM/2
self.y_mini += SQM/2
self.x_mini = self.x
def draw_props(self, screen):
for x in BACKGROUND_RECT:
#pygame.draw.rect(screen,(89, 89, 89),x)
screen.blit(BACKGROUND_IMG, (x.x,x.y))
for x in GRASS_RECT:
#pygame.draw.rect(screen,(51, 204, 51),x)
screen.blit(GRASS_IMG, (x.x,x.y))
for x in WATER_RECT:
#pygame.draw.rect(screen,(0, 153, 255),x)
if self.frames <= 30:
screen.blit(WATER_1_IMG, (x.x,x.y))
else:
screen.blit(WATER_2_IMG, (x.x,x.y))
'''
for x in BRICK_RECT:
screen.blit(BRICK_IMG, (x.x,x.y))
for x in BRICK_RECT_MINI:
screen.blit(BRICK_IMG_MINI, (x.x,x.y))
'''
for x in SOLID_RECT:
screen.blit(SOLIDWALL_IMG, (x.x,x.y))
for x in EAGLE_Y:
screen.blit(EAGLE_1_IMG, (x.x,x.y))
for x in EAGLE_G:
screen.blit(EAGLE_1_IMG, (x.x,x.y))
self.frames += 1
if self.frames == 60:
self.frames = 0
class Bullet_TY(object):
def __init__(self,x,y,dir):
self.dir = dir
self.x = x
self.y = y
self.vel = 22
if self.dir == 'right':
self.x = x+15
self.y = y+18
self.width = 22
self.height = 16
elif self.dir == 'left':
self.x = x+15
self.y = y+18
self.width = 22
self.height = 16
elif self.dir == 'down':
self.x = x+18
self.y = y+15
self.width = 16
self.height = 22
elif self.dir == 'up':
self.x = x+18
self.y = y+7
self.width = 16
self.height = 22
def move(self):
if self.dir == 'right':
self.x += self.vel
elif self.dir == 'left':
self.x -= self.vel
elif self.dir == 'down':
self.y += self.vel
elif self.dir == 'up':
self.y -= self.vel
def movehitbox(self, rect):
if self.dir == 'right':
rect.x += self.vel
elif self.dir == 'left':
rect.x -= self.vel
elif self.dir == 'down':
rect.y += self.vel
elif self.dir == 'up':
rect.y -= self.vel
def draw(self, screen):
if self.dir == 'right':
self.BULLET_DRAW = BULLET_IMG[3]
elif self.dir == 'left':
self.BULLET_DRAW = BULLET_IMG[2]
elif self.dir == 'down':
self.BULLET_DRAW = BULLET_IMG[1]
elif self.dir == 'up':
self.BULLET_DRAW = BULLET_IMG[0]
screen.blit(self.BULLET_DRAW, (self.x, self.y))
class Tank_Yellow:
def __init__(self):
self.x = 0
self.y = 0
self.actions = [False, False, False, False]
self.TY_face = TANK_YELLOW_IMG[3]
self.TY_face_txt = 'right'
self.tank_yellow_shoot_allow = True
self.tank_yellow_shoot_cooldown = False
self.explosion_l_flag = False
self.explosion_h_flag = False
self.yellow_tank_destroyed = False
self.yellow_tank_invicible = True
self.frames_inv = 0
self.bullet_dir = None
self.eagle_yellows_tank_on_hit_state = False
self.green_tank_on_hit_state = False
self.eagle_greens_tank_on_hit_state = False
self.AI_player = True
self.Human_player = True
for row in MAPPING:
for col in row:
if col == '1':
self.ty_pos_x = self.x
self.ty_pos_y = self.y
self.x+=SQM
self.y+=SQM
self.x=0
self.TY_mask = pygame.Rect(self.ty_pos_x, self.ty_pos_y, 52, 52)
def bind(self, event):
if event.type == KEYDOWN:
if event.key == K_d:
self.actions[0] = True
elif event.key == K_a:
self.actions[1] = True
elif event.key == K_s:
self.actions[2] = True
elif event.key == K_w:
self.actions[3] = True
if event.type == KEYUP:
if event.key == K_d:
self.actions[0] = False
elif event.key == K_a:
self.actions[1] = False
elif event.key == K_s:
self.actions[2] = False
elif event.key == K_w:
self.actions[3] = False
def move_tank(self, action):
self.movement = [0,0]
if action[0]:
self.movement[0] += 8
self.TY_face = TANK_YELLOW_IMG[3]
self.TY_face_txt = 'right'
elif action[1]:
self.movement[0] -= 8
self.TY_face = TANK_YELLOW_IMG[2]
self.TY_face_txt = 'left'
elif action[3]:
self.movement[1] -= 8
self.TY_face = TANK_YELLOW_IMG[0]
self.TY_face_txt = 'up'
elif action[2]:
self.movement[1] += 8
self.TY_face = TANK_YELLOW_IMG[1]
self.TY_face_txt = 'down'
self.TY_mask.x += self.movement[0]
self.collisions_h = self.collision_test()
for tile in self.collisions_h:
if self.movement[0] > 0:
self.TY_mask.right = tile.left
if self.movement[0] < 0:
self.TY_mask.left = tile.right
self.TY_mask.y += self.movement[1]
self.collisions_v = self.collision_test()
for tile in self.collisions_v:
if self.movement[1] > 0:
self.TY_mask.bottom = tile.top
if self.movement[1] < 0:
self.TY_mask.top = tile.bottom
self.collisions_sum = [self.collisions_h, self.collisions_v]
def collision_test(self):
colli = []
for back in BACKGROUND_RECT:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in SOLID_RECT:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in BRICK_RECT:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in WATER_RECT:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in EAGLE_Y:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in EAGLE_G:
if self.TY_mask.colliderect(back):
colli.append(back)
for back in BRICK_RECT_MINI:
if self.TY_mask.colliderect(back):
colli.append(back)
return colli
def draw(self, screen, flag_1, flag_2):
if flag_1 is False:
screen.blit(self.TY_face,(self.TY_mask.x,self.TY_mask.y))
if flag_2:
if (self.frames_inv % 4) == 0 or (self.frames_inv % 4) == 1:
screen.blit(INVICIBLE_1_IMG,(self.TY_mask.x,self.TY_mask.y))
elif (self.frames_inv % 4) == 2 or (self.frames_inv % 4) == 3:
screen.blit(INVICIBLE_2_IMG,(self.TY_mask.x,self.TY_mask.y))
if self.frames_inv >= 45:
self.yellow_tank_invicible = False
self.frames_inv += 1
def bind_shoot(self, Flag):
if Flag:
keys = pygame.key.get_pressed()
if keys[pygame.K_r]:
flag_temp = True
self.execute_shoot(flag_temp)
def execute_shoot(self, Flag):
if Flag:
self.frames = 0
self.tank_yellow_shoot_cooldown = True
self.tank_yellow_shoot_allow = False
self.b_ty = Bullet_TY(self.TY_mask.x, self.TY_mask.y, self.TY_face_txt)
BULLETS_Y_objects.append(self.b_ty)
BULLETS_Y_RECT.append(pygame.Rect(self.b_ty.x,self.b_ty.y,self.b_ty.width,self.b_ty.height))
self.OHBY = On_Hit_By_Yellow(self.b_ty.dir)
self.bullet_dir = self.b_ty.dir
def shoot_delay(self, flag):
if flag:
if len(BULLETS_Y_RECT) == 0 and self.frames > 20:
self.tank_yellow_shoot_allow = True
self.tank_yellow_shoot_cooldown = False
self.bullet_dir = None
self.frames += 1
def bullets_onhit(self, TG_MASK, TG_CLASS, TY_CLASS, TG_DEST, TG_INVI, MAPPING, screen):
if len(BULLETS_Y_RECT) >= 1:
for i, e in enumerate(BULLETS_Y_RECT):
self.explosion_h_flag = True
self.explosion_l_flag = True
self.brick_on_hit_state = self.OHBY.brick_on_hit(i, e)
self.background_on_hit_state = self.OHBY.background_on_hit(i, e)
self.green_tank_on_hit_state = self.OHBY.green_tank_on_hit(i, e, TG_MASK, TG_CLASS, TG_DEST, TG_INVI)
self.solid_on_hit_state = self.OHBY.solid_on_hit(i, e)
self.eagle_greens_tank_on_hit_state = self.OHBY.eagle_greens_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.eagle_yellows_tank_on_hit_state = self.OHBY.eagle_yellows_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.enemys_bullet_on_hit_state = self.OHBY.enemys_bullet_on_hit(i, e)
self.states = [self.brick_on_hit_state,
self.background_on_hit_state,
self.green_tank_on_hit_state,
self.solid_on_hit_state,
self.eagle_greens_tank_on_hit_state,
self.eagle_yellows_tank_on_hit_state,
self.enemys_bullet_on_hit_state]
for xi in self.states:
if xi:
self.OHBY.break_bullet(i)
if self.explosion_l_flag or self.explosion_h_flag:
self.OHBY.draw_explosion_little(screen, self.explosion_l_flag)
self.OHBY.draw_explosion_hard(screen, self.explosion_h_flag)
def yellow_tank_position_relative_with_green_tank(self, TY_mask, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
if TY_mask.x <= TG_mask.x:
flags[0] = True
if TY_mask.x >= TG_mask.x:
flags[1] = True
if TY_mask.y >= TG_mask.y:
flags[2] = True
if TY_mask.y <= TG_mask.y:
flags[3] = True
return flags
def yellow_eagle_position_relative_with_yellow_tank(self, TY_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_Y:
if TY_mask.x <= i.x:
flags[0] = True
if TY_mask.x >= i.x:
flags[1] = True
if TY_mask.y >= i.y:
flags[2] = True
if TY_mask.y <= i.y:
flags[3] = True
return flags
def green_eagle_position_relative_with_yellow_tank(self, TY_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_G:
if TY_mask.x <= i.x:
flags[0] = True
if TY_mask.x >= i.x:
flags[1] = True
if TY_mask.y >= i.y:
flags[2] = True
if TY_mask.y <= i.y:
flags[3] = True
return flags
def yellow_tank_direction(self):
#flags [R,L,U,D]
flags = [False, False, False, False]
if self.TY_face_txt == 'right':
flags[0] = True
elif self.TY_face_txt == 'left':
flags[1] = True
elif self.TY_face_txt == 'up':
flags[2] = True
elif self.TY_face_txt == 'down':
flags[3] = True
return flags
def yellow_tank_bullet_presence(self):
flag = False
if self.tank_yellow_shoot_allow is True:
flag = False
elif self.tank_yellow_shoot_allow is False:
flag = True
return [flag]
def yellow_tank_own_bullet_direction(self, dir, pres):
#flags [R,L,U,D]
flags = [False, False, False, False]
if pres:
if dir == 'right':
flags[0] = True
elif dir == 'left':
flags[1] = True
elif dir == 'up':
flags[2] = True
elif dir == 'down':
flags[3] = True
return flags
def yellow_tank_faced_to_entity_solid(self, dir, TY_MASK, TG_MASK, win):
self.xn = TY_MASK.x + 26
self.yn = TY_MASK.y + 26
if dir[0] is True:
for i in range(44):
self.xn += 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[1] is True:
for i in range(44):
self.xn -= 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[2] is True:
for i in range(44):
self.yn -= 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[3] is True:
for i in range(44):
self.yn += 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
pygame.draw.rect(win, (255, 0, 0), self.sample)
self.loop_logic_background = self.yellow_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.yellow_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
#self.loop_logic_enemys_eagle = self.yellow_tank_faced_to_entity_loop(self.sample, EAGLE_G)
self.loop_logic_enemy = self.yellow_tank_faced_to_enemy_loop(self.sample, TG_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
def yellow_tank_faced_to_entity_loop(self, sample, entity):
self.sample = sample
for ni in entity:
if self.sample.colliderect(ni):
return True
return False
def yellow_tank_faced_to_enemy_loop(self, sample, TG_MASK):
self.sample = sample
if self.sample.colliderect(TG_MASK):
return True
return False
def yellow_tank_stuck(self, colli):
if len(colli[0]) >= 1 or len(colli[1]) >= 1:
return [True]
return [False]
def green_tank_got_hit(self, flag):
if self.green_tank_on_hit_state:
self.green_tank_on_hit_state = False
print('Żółty czołg zniszczył zielony czołg')
return [True]
else:
return [False]
def yellow_eagle_got_hit_by_yellow(self, flag):
if self.eagle_yellows_tank_on_hit_state:
self.eagle_yellows_tank_on_hit_state = False
print('Żółty czołg zniszczył swojego orła')
return [True]
else:
return [False]
def green_eagle_got_hit_by_yellow(self, flag):
if self.eagle_greens_tank_on_hit_state:
self.eagle_greens_tank_on_hit_state = False
print('Żółty czołg zniszczył orła przeciwnika')
return [True]
else:
return [False]
def yellow_tank_collision_sensor(self, TY_MASK):
self.xs = TY_MASK.x - 2
self.ys = TY_MASK.y - 2
self.coli_sensor = pygame.Rect(self.xs,self.ys,56,56)
for n in SOLID_RECT:
if self.coli_sensor.colliderect(n):
return [True]
for n in WATER_RECT:
if self.coli_sensor.colliderect(n):
return [True]
for n in BACKGROUND_RECT:
if self.coli_sensor.colliderect(n):
return [True]
return [False]
def play_step(self, action, green_tank_got_hit_by_yellow, yellow_tank_got_hit_by_green, yellow_eagle_got_hit_by_yellow, green_eagle_got_hit_by_yellow, yellow_tank_collision_sensor_state, frame_counter_idle):
self.move_it(action)
REWARD = 0
GAME_OVER = False
if yellow_tank_collision_sensor_state[0]:
REWARD = - 0.1
elif green_tank_got_hit_by_yellow[0]:
GAME_OVER = True
REWARD = 50
elif yellow_tank_got_hit_by_green[0]:
GAME_OVER = True
REWARD = -50
elif yellow_eagle_got_hit_by_yellow[0]:
GAME_OVER = True
REWARD = -150
elif green_eagle_got_hit_by_yellow[0]:
GAME_OVER = True
REWARD = 150
elif frame_counter_idle >= 1000:
REWARD = - 10
GAME_OVER = True
return REWARD, GAME_OVER
def move_it(self, action):
#[RLUDS]
self.move_tank(action)
if action[4] == 1:
self.execute_shoot(self.tank_yellow_shoot_allow)
def restart(self):
self.TY_mask.x = self.ty_pos_x
self.TY_mask.y = self.ty_pos_y
class Tank_Green:
def __init__(self):
self.x = 0
self.y = 0
self.actions = [False, False, False, False]
self.TG_face = TANK_GREEN_IMG[2]
self.TG_face_txt = 'left'
self.tank_green_shoot_allow = True
self.tank_green_shoot_cooldown = False
self.explosion_l_flag = False
self.explosion_h_flag = False
self.pos_init_find = True
self.green_tank_destroyed = False
self.green_tank_invicible = True
self.frames_inv = 0
self.bullet_dir = None
self.eagle_greens_tank_on_hit_state = False
self.yellow_tank_on_hit_state = False
self.eagle_yellows_tank_on_hit_state = False
self.AI_player = True
self.Human_player = True
for row in MAPPING:
for col in row:
if col == '2':
self.tg_pos_x = self.x
self.tg_pos_y = self.y
self.x+=SQM
self.y+=SQM
self.x=0
self.TG_mask = pygame.Rect(self.tg_pos_x, self.tg_pos_y, 52, 52)
def bind(self, event):
if event.type == KEYDOWN:
if event.key == K_d:
self.actions[0] = True
elif event.key == K_a:
self.actions[1] = True
elif event.key == K_s:
self.actions[2] = True
elif event.key == K_w:
self.actions[3] = True
if event.type == KEYUP:
if event.key == K_d:
self.actions[0] = False
elif event.key == K_a:
self.actions[1] = False
elif event.key == K_s:
self.actions[2] = False
elif event.key == K_w:
self.actions[3] = False
def move_tank(self, action):
self.movement = [0,0]
if action[0]:
self.movement[0] += 8
self.TG_face = TANK_GREEN_IMG[3]
self.TG_face_txt = 'right'
elif action[1]:
self.movement[0] -= 8
self.TG_face = TANK_GREEN_IMG[2]
self.TG_face_txt = 'left'
elif action[3]:
self.movement[1] -= 8
self.TG_face = TANK_GREEN_IMG[0]
self.TG_face_txt = 'up'
elif action[2]:
self.movement[1] += 8
self.TG_face = TANK_GREEN_IMG[1]
self.TG_face_txt = 'down'
self.TG_mask.x += self.movement[0]
self.collisions_h = self.collision_test()
for tile in self.collisions_h:
if self.movement[0] > 0:
self.TG_mask.right = tile.left
if self.movement[0] < 0:
self.TG_mask.left = tile.right
self.TG_mask.y += self.movement[1]
self.collisions_v = self.collision_test()
for tile in self.collisions_v:
if self.movement[1] > 0:
self.TG_mask.bottom = tile.top
if self.movement[1] < 0:
self.TG_mask.top = tile.bottom
self.collisions_sum = [self.collisions_h, self.collisions_v]
def collision_test(self):
colli = []
for back in BACKGROUND_RECT:
if self.TG_mask.colliderect(back):
colli.append(back)
for back in SOLID_RECT:
if self.TG_mask.colliderect(back):
colli.append(back)
for back in BRICK_RECT:
if self.TG_mask.colliderect(back):
colli.append(back)
for back in WATER_RECT:
if self.TG_mask.colliderect(back):
colli.append(back)
for back in EAGLE_Y:
if self.TG_mask.colliderect(back):
colli.append(back)
for back in EAGLE_G:
if self.TG_mask.colliderect(back):
colli.append(back)
for back in BRICK_RECT_MINI:
if self.TG_mask.colliderect(back):
colli.append(back)
return colli
def bind_shoot(self, Flag):
if Flag:
keys = pygame.key.get_pressed()
if keys[pygame.K_SPACE]:
flag_temp = True
self.execute_shoot(flag_temp)
def execute_shoot(self, Flag):
if Flag:
self.frames = 0
self.tank_green_shoot_cooldown = True
self.tank_green_shoot_allow = False
self.b_tg = Bullet_TY(self.TG_mask.x, self.TG_mask.y, self.TG_face_txt)
BULLETS_G_objects.append(self.b_tg)
BULLETS_G_RECT.append(pygame.Rect(self.b_tg.x,self.b_tg.y,self.b_tg.width,self.b_tg.height))
self.OHBG = On_Hit_By_Green(self.b_tg.dir)
self.bullet_dir = self.b_tg.dir
def shoot_delay(self, flag):
if flag:
if len(BULLETS_G_RECT) == 0 and self.frames > 20:
self.tank_green_shoot_allow = True
self.tank_green_shoot_cooldown = False
self.bullet_dir = None
self.frames += 1
def bullets_onhit(self, TY_MASK, TG_CLASS, TY_CLASS, TY_DEST, TY_INVI, MAPPING,screen):
if len(BULLETS_G_RECT) >= 1:
for i, e in enumerate(BULLETS_G_RECT):
self.explosion_l_flag = True
self.explosion_h_flag = True
self.brick_on_hit_state = self.OHBG.brick_on_hit(i, e)
self.background_on_hit_state = self.OHBG.background_on_hit(i, e)
self.yellow_tank_on_hit_state = self.OHBG.yellow_tank_on_hit(i, e, TY_MASK, TG_CLASS, TY_DEST, TY_INVI)
self.solid_on_hit_state = self.OHBG.solid_on_hit(i, e)
self.eagle_greens_tank_on_hit_state = self.OHBG.eagle_greens_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.eagle_yellows_tank_on_hit_state = self.OHBG.eagle_yellows_tank_on_hit(i, e, TG_CLASS, TY_CLASS, MAPPING)
self.enemys_bullet_on_hit_state = self.OHBG.enemys_bullet_on_hit(i, e)
self.states = [self.brick_on_hit_state,
self.background_on_hit_state,
self.yellow_tank_on_hit_state,
self.solid_on_hit_state,
self.eagle_greens_tank_on_hit_state,
self.eagle_yellows_tank_on_hit_state,
self.enemys_bullet_on_hit_state]
for xi in self.states:
if xi:
self.OHBG.break_bullet(i)
if self.explosion_l_flag or self.explosion_h_flag:
self.OHBG.draw_explosion_little(screen, self.explosion_l_flag)
self.OHBG.draw_explosion_hard(screen, self.explosion_h_flag)
def draw(self, screen, flag_1, flag_2):
if flag_1 is False:
screen.blit(self.TG_face,(self.TG_mask.x,self.TG_mask.y))
if flag_2:
if (self.frames_inv % 4) == 0 or (self.frames_inv % 4) == 1:
screen.blit(INVICIBLE_1_IMG,(self.TG_mask.x,self.TG_mask.y))
elif (self.frames_inv % 4) == 2 or (self.frames_inv % 4) == 3:
screen.blit(INVICIBLE_2_IMG,(self.TG_mask.x,self.TG_mask.y))
if self.frames_inv >= 45:
self.green_tank_invicible = False
self.frames_inv += 1
def green_tank_position_relative_with_yellow_tank(self, TY_mask, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
if TG_mask.x <= TY_mask.x:
flags[0] = True
if TG_mask.x >= TY_mask.x:
flags[1] = True
if TG_mask.y >= TY_mask.y:
flags[2] = True
if TG_mask.y <= TY_mask.y:
flags[3] = True
return flags
def green_eagle_position_relative_with_green_tank(self, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_G:
if TG_mask.x <= i.x:
flags[0] = True
if TG_mask.x >= i.x:
flags[1] = True
if TG_mask.y >= i.y:
flags[2] = True
if TG_mask.y <= i.y:
flags[3] = True
return flags
def yellow_eagle_position_relative_with_green_tank(self, TG_mask):
#flags [R,L,U,D]
flags = [False, False, False, False]
for i in EAGLE_G:
if TG_mask.x <= i.x:
flags[0] = True
if TG_mask.x >= i.x:
flags[1] = True
if TG_mask.y >= i.y:
flags[2] = True
if TG_mask.y <= i.y:
flags[3] = True
return flags
def green_tank_direction(self):
#flags [R,L,U,D]
flags = [False, False, False, False]
if self.TG_face_txt == 'right':
flags[0] = True
elif self.TG_face_txt == 'left':
flags[1] = True
elif self.TG_face_txt == 'up':
flags[2] = True
elif self.TG_face_txt == 'down':
flags[3] = True
return flags
def green_tank_bullet_presence(self):
flag = False
if self.tank_green_shoot_allow is True:
flag = False
elif self.tank_green_shoot_allow is False:
flag = True
return [flag]
def green_tank_own_bullet_direction(self, dir, pres):
#flags [R,L,U,D]
flags = [False, False, False, False]
if pres:
if dir == 'right':
flags[0] = True
elif dir == 'left':
flags[1] = True
elif dir == 'up':
flags[2] = True
elif dir == 'down':
flags[3] = True
return flags
def green_tank_faced_to_entity_solid(self, dir, TY_MASK, TG_MASK):
self.xn = TG_MASK.x + 26
self.yn = TG_MASK.y + 26
if dir[0] is True:
for i in range(44):
self.xn += 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
self.loop_logic_background = self.green_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.green_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.green_tank_faced_to_entity_loop(self.sample, EAGLE_G)
#self.loop_logic_enemys_eagle = self.green_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
self.loop_logic_enemy = self.green_tank_faced_to_enemy_loop(self.sample, TY_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[1] is True:
for i in range(44):
self.xn -= 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
self.loop_logic_background = self.green_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.green_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.green_tank_faced_to_entity_loop(self.sample, EAGLE_G)
#self.loop_logic_enemys_eagle = self.green_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
self.loop_logic_enemy = self.green_tank_faced_to_enemy_loop(self.sample, TY_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[2] is True:
for i in range(44):
self.yn -= 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
self.loop_logic_background = self.green_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.green_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.green_tank_faced_to_entity_loop(self.sample, EAGLE_G)
#self.loop_logic_enemys_eagle = self.green_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
self.loop_logic_enemy = self.green_tank_faced_to_enemy_loop(self.sample, TY_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
if dir[3] is True:
for i in range(44):
self.yn += 16
self.sample = pygame.Rect(self.xn,self.yn,1,1)
self.loop_logic_background = self.green_tank_faced_to_entity_loop(self.sample, BACKGROUND_RECT)
self.loop_logic_solid = self.green_tank_faced_to_entity_loop(self.sample, SOLID_RECT)
#self.loop_logic_own_eagle= self.green_tank_faced_to_entity_loop(self.sample, EAGLE_G)
#self.loop_logic_enemys_eagle = self.green_tank_faced_to_entity_loop(self.sample, EAGLE_Y)
self.loop_logic_enemy = self.green_tank_faced_to_enemy_loop(self.sample, TY_MASK)
self.logic_array = np.array([self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy])
self.logic_array_single = np.where(self.logic_array == True)
if len(self.logic_array_single[0]) >= 1:
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
return [self.loop_logic_background, self.loop_logic_solid, self.loop_logic_enemy]
def green_tank_faced_to_entity_loop(self, sample, entity):
self.sample = sample
for ni in entity:
if self.sample.colliderect(ni):
return True
return False
def green_tank_faced_to_enemy_loop(self, sample, TY_MASK):
self.sample = sample
if self.sample.colliderect(TY_MASK):
return True
return False
def green_tank_stuck(self, colli):
if len(colli[0]) >= 1 or len(colli[1]) >= 1:
return [True]
return [False]
def yellow_tank_got_hit(self, flag):
if self.yellow_tank_on_hit_state:
self.yellow_tank_on_hit_state = False
print('Zielony czołg zniszczył Żółty czołg')
return [True]
else:
return [False]
def green_eagle_got_hit_by_green(self, flag):
if self.eagle_greens_tank_on_hit_state:
self.eagle_greens_tank_on_hit_state = False
print('Zielony czołg zniszczył swojego orła')
return [True]
else: return [False]
def yellow_eagle_got_hit_by_green(self, flag):
if self.eagle_yellows_tank_on_hit_state:
self.eagle_yellows_tank_on_hit_state = False
print('Zielony czołg zniszczył orła przeciwnika')
return [False]
else:
return [False]
def green_tank_collision_sensor(self, TG_MASK):
self.xs = TG_MASK.x - 2
self.ys = TG_MASK.y - 2
self.coli_sensor = pygame.Rect(self.xs,self.ys,56,56)
for n in SOLID_RECT:
if self.coli_sensor.colliderect(n):
return [True]
for n in WATER_RECT:
if self.coli_sensor.colliderect(n):
return [True]
for n in BACKGROUND_RECT:
if self.coli_sensor.colliderect(n):
return [True]
return [False]
def play_step(self, action, yellow_tank_got_hit_by_green, green_tank_got_hit_by_yellow, green_eagle_got_hit_by_green, yellow_eagle_got_hit_by_green, green_tank_collision_sensor_state, frame_counter_idle):
self.move_it(action)
REWARD = 0
GAME_OVER = False
if green_tank_collision_sensor_state[0]:
REWARD = - 0.1
elif yellow_tank_got_hit_by_green[0]:
GAME_OVER = True
REWARD = 50
elif green_tank_got_hit_by_yellow[0]:
GAME_OVER = True
REWARD = -50
elif green_eagle_got_hit_by_green[0]:
GAME_OVER = True
REWARD = -150
elif yellow_eagle_got_hit_by_green[0]:
GAME_OVER = True
REWARD = 150
elif frame_counter_idle >= 1000:
REWARD = - 10
GAME_OVER = True
return REWARD, GAME_OVER
def move_it(self, action):
#[RLUDS]
self.move_tank(action)
if action[4] == 1:
self.execute_shoot(self.tank_green_shoot_allow)
def restart(self):
self.TG_mask.x = self.tg_pos_x
self.TG_mask.y = self.tg_pos_y
class Main:
def __init__(self):
pygame.init()
self.frame_counter = 0
self.frame_counter_idle = 0
self.window = pygame.display.set_mode((SQM*17,SQM*9))
self.mapping = Mapping()
self.ty = Tank_Yellow()
self.tg = Tank_Green()
self.AI_Y = AI_YELLOW()
self.AI_G = AI_GREEN()
self.clock = pygame.time.Clock()
def runtime(self):
self.run = True
while self.run:
self.window.fill((0,0,0))
self.ty.move_tank(self.ty.actions)
self.ty.draw(self.window, self.ty.yellow_tank_destroyed, self.ty.yellow_tank_invicible)
self.ty.bind_shoot(self.ty.tank_yellow_shoot_allow)
self.ty.shoot_delay(self.ty.tank_yellow_shoot_cooldown)
self.tg.move_tank(self.tg.actions)
self.tg.draw(self.window, self.tg.green_tank_destroyed, self.tg.green_tank_invicible)
self.tg.bind_shoot(self.tg.tank_green_shoot_allow)
self.tg.shoot_delay(self.tg.tank_green_shoot_cooldown)
self.mapping.draw_props(self.window)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.run = False
self.ty.bind(event)
self.tg.bind(event)
for b_ty in BULLETS_Y_objects:
b_ty.draw(self.window)
b_ty.move()
for i in BULLETS_Y_RECT:
b_ty.movehitbox(i)
for b_tg in BULLETS_G_objects:
b_tg.draw(self.window)
b_tg.move()
for i in BULLETS_G_RECT:
b_tg.movehitbox(i)
self.ty.bullets_onhit(self.tg.TG_mask, self.tg, self.ty, self.tg.green_tank_destroyed, self.tg.green_tank_invicible, self.mapping, self.window)
self.tg.bullets_onhit(self.ty.TY_mask, self.ty, self.tg, self.ty.yellow_tank_destroyed, self.ty.yellow_tank_invicible, self.mapping, self.window)
#Generowanie state
#Pozycje dwóch czołgów względem siebie - 4 State
self.yellow_tank_position_relative_with_green_tank_state = self.ty.yellow_tank_position_relative_with_green_tank(self.ty.TY_mask, self.tg.TG_mask)
self.green_tank_position_relative_with_yellow_tank_state = self.tg.green_tank_position_relative_with_yellow_tank(self.ty.TY_mask, self.tg.TG_mask)
#Pozycja własnego orła względem czołgu - 4 State
self.yellow_eagle_position_relative_with_yellow_tank_state = self.ty.yellow_eagle_position_relative_with_yellow_tank(self.ty.TY_mask)
self.green_eagle_position_relative_with_green_tank_state = self.tg.green_eagle_position_relative_with_green_tank(self.tg.TG_mask)
#Pozycja obcego orła względem czołgu - 4 State
self.green_eagle_position_relative_with_yellow_tank_state = self.ty.green_eagle_position_relative_with_yellow_tank(self.ty.TY_mask)
self.yellow_eagle_position_relative_with_green_tank_state = self.tg.yellow_eagle_position_relative_with_green_tank(self.tg.TG_mask)
#Zwrot swojego czołgu - 4 State
self.yellow_tank_direction_state = self.ty.yellow_tank_direction()
self.green_tank_direction_state = self.ty.yellow_tank_direction()
#Obecność swojego pocisku - 1 State
self.yellow_tank_own_bullet_presence_state = self.ty.yellow_tank_bullet_presence()
self.green_tank_own_bullet_presence_state = self.tg.green_tank_bullet_presence()
#Obecność posicsku swojego przeciwnika - 1 State
self.yellow_tank_enemys_bullet_presence_state = self.green_tank_own_bullet_presence_state
self.green_tank_enemys_bullet_presence_state = self.yellow_tank_own_bullet_presence_state
#Kierunek swojego pocisku - 4 State
self.yellow_tank_own_bullet_direction_state = self.ty.yellow_tank_own_bullet_direction(self.ty.bullet_dir, self.yellow_tank_own_bullet_presence_state)
self.green_tank_own_bullet_direction_state = self.tg.green_tank_own_bullet_direction(self.tg.bullet_dir, self.green_tank_own_bullet_presence_state)
#Kierunek pocisku przeciwnika - 4 State
self.yellow_tank_enemys_bullet_direction_state = self.green_tank_own_bullet_direction_state
self.green_tank_enemys_bullet_direction_state = self.yellow_tank_own_bullet_direction_state
#Kierunek zwrotu czołgu do obiektów - Background, Solid, Eagle_own, Eagle_enemy, Enamy_tank - 5 State
#Wyłączono ją Tymaczasowo
self.yellow_tank_faced_to_entity_solid_state = self.ty.yellow_tank_faced_to_entity_solid(self.yellow_tank_direction_state, self.ty.TY_mask, self.tg.TG_mask, self.window)
self.green_tank_faced_to_entity_solid_state = self.tg.green_tank_faced_to_entity_solid(self.green_tank_direction_state, self.ty.TY_mask, self.tg.TG_mask)
#Czy dany czołg utkną - 1 State
#self.yellow_tank_stuck_state = self.ty.yellow_tank_stuck(self.ty.collisions_sum)
#self.green_tank_stuck_state = self.tg.green_tank_stuck(self.tg.collisions_sum)
#Czy czołg otrzymał obrażenia - 1 State
self.green_tank_got_hit_by_yellow_state = self.ty.green_tank_got_hit(self.yellow_tank_own_bullet_presence_state)
self.yellow_tank_got_hit_by_green_state = self.tg.yellow_tank_got_hit(self.green_tank_own_bullet_presence_state)
#Czy orzeł swój otrzymał obrażenia - 1 State
self.yellow_eagle_got_hit_by_yellow_state = self.ty.yellow_eagle_got_hit_by_yellow(self.yellow_tank_own_bullet_presence_state)
self.green_eagle_got_hit_by_green_state = self.tg.green_eagle_got_hit_by_green(self.green_tank_own_bullet_presence_state)
#Czy orzeł przeciwnika otrzymał obrażenia - 1 State
self.green_eagle_got_hit_by_yellow_state = self.ty.green_eagle_got_hit_by_yellow(self.yellow_tank_own_bullet_presence_state)
self.yellow_eagle_got_hit_by_green_state = self.tg.yellow_eagle_got_hit_by_green(self.green_tank_own_bullet_presence_state)
#Sensor kolizyjny 1 State
self.yellow_tank_collision_sensor_state = self.ty.yellow_tank_collision_sensor(self.ty.TY_mask)
self.green_tank_collision_sensor_state = self.tg.green_tank_collision_sensor(self.tg.TG_mask)
#Get State Yellow
yellow_tank_current_state_old = self.AI_Y.get_state(
self.yellow_tank_position_relative_with_green_tank_state,
#self.yellow_eagle_position_relative_with_yellow_tank_state,
#self.green_eagle_position_relative_with_yellow_tank_state,
self.yellow_tank_direction_state,
self.yellow_tank_own_bullet_presence_state,
self.yellow_tank_enemys_bullet_presence_state,
self.yellow_tank_own_bullet_direction_state,
self.yellow_tank_enemys_bullet_direction_state,
self.yellow_tank_faced_to_entity_solid_state,
self.yellow_tank_collision_sensor_state,
self.green_tank_got_hit_by_yellow_state,
self.yellow_tank_got_hit_by_green_state,
#self.yellow_eagle_got_hit_by_yellow_state,
#self.green_eagle_got_hit_by_yellow_state
)
move_calculated = self.AI_Y.get_action(yellow_tank_current_state_old, self.frame_counter)
reward_y, done_y = self.ty.play_step(move_calculated,
self.green_tank_got_hit_by_yellow_state,
self.yellow_tank_got_hit_by_green_state,
self.yellow_eagle_got_hit_by_yellow_state,
self.green_eagle_got_hit_by_yellow_state,
self.yellow_tank_collision_sensor_state,
self.frame_counter_idle
)
yellow_tank_current_state_new = self.AI_Y.get_state(
self.yellow_tank_position_relative_with_green_tank_state,
#self.yellow_eagle_position_relative_with_yellow_tank_state,
#self.green_eagle_position_relative_with_yellow_tank_state,
self.yellow_tank_direction_state,
self.yellow_tank_own_bullet_presence_state,
self.yellow_tank_enemys_bullet_presence_state,
self.yellow_tank_own_bullet_direction_state,
self.yellow_tank_enemys_bullet_direction_state,
self.yellow_tank_faced_to_entity_solid_state,
self.yellow_tank_collision_sensor_state,
self.green_tank_got_hit_by_yellow_state,
self.yellow_tank_got_hit_by_green_state,
#self.yellow_eagle_got_hit_by_yellow_state,
#self.green_eagle_got_hit_by_yellow_state
)
self.AI_Y.train_short_memory(yellow_tank_current_state_old, move_calculated, reward_y, yellow_tank_current_state_new, done_y)
self.AI_Y.remember(yellow_tank_current_state_old, move_calculated, reward_y, yellow_tank_current_state_new, done_y)
final_score_value_y = self.AI_Y.final_score(reward_y)
self.AI_Y.print_state(yellow_tank_current_state_old, self.frame_counter, final_score_value_y)
#Get State Green
green_tank_current_state_old = self.AI_G.get_state(
self.green_tank_position_relative_with_yellow_tank_state,
#self.green_eagle_position_relative_with_green_tank_state,
#self.yellow_eagle_position_relative_with_green_tank_state,
self.green_tank_direction_state,
self.green_tank_own_bullet_presence_state,
self.green_tank_enemys_bullet_presence_state,
self.green_tank_own_bullet_direction_state,
self.green_tank_enemys_bullet_direction_state,
self.green_tank_faced_to_entity_solid_state,
self.green_tank_collision_sensor_state,
self.yellow_tank_got_hit_by_green_state,
self.green_tank_got_hit_by_yellow_state,
#self.yellow_eagle_got_hit_by_yellow_state,
#self.green_eagle_got_hit_by_yellow_state
)
move_calculated = self.AI_G.get_action(green_tank_current_state_old, self.frame_counter)
reward_g, done_g = self.tg.play_step(move_calculated,
self.yellow_tank_got_hit_by_green_state,
self.green_tank_got_hit_by_yellow_state,
self.green_eagle_got_hit_by_green_state,
self.yellow_eagle_got_hit_by_green_state,
self.green_tank_collision_sensor_state,
self.frame_counter_idle
)
green_tank_current_state_new = self.AI_G.get_state(
self.green_tank_position_relative_with_yellow_tank_state,
#self.green_eagle_position_relative_with_green_tank_state,
#self.yellow_eagle_position_relative_with_green_tank_state,
self.green_tank_direction_state,
self.green_tank_own_bullet_presence_state,
self.green_tank_enemys_bullet_presence_state,
self.green_tank_own_bullet_direction_state,
self.green_tank_enemys_bullet_direction_state,
self.green_tank_faced_to_entity_solid_state,
self.green_tank_collision_sensor_state,
self.yellow_tank_got_hit_by_green_state,
self.green_tank_got_hit_by_yellow_state,
#self.yellow_eagle_got_hit_by_yellow_state,
#self.green_eagle_got_hit_by_yellow_state
)
self.AI_G.train_short_memory(green_tank_current_state_old, move_calculated, reward_g, green_tank_current_state_new, done_g)
self.AI_G.remember(green_tank_current_state_old, move_calculated, reward_g, green_tank_current_state_new, done_g)
final_score_value_g = self.AI_G.final_score(reward_g)
self.AI_G.print_state(green_tank_current_state_old, self.frame_counter, final_score_value_g)
if_done_state = self.if_done(done_g, done_y)
if if_done_state:
self.frame_counter_idle = 0
self.restart_game(if_done_state)
self.frame_counter += 1
self.frame_counter_idle += 1
pygame.display.update()
self.clock.tick(FPS)
def restart_game(self,if_done_state):
keys = pygame.key.get_pressed()
if keys[pygame.K_p] or if_done_state:
self.ty.restart()
self.tg.restart()
def if_done(self, dg, dy):
if dg or dy:
return True
else: return False
if __name__ == '__main__':
main = Main()
main.runtime() | 64,628 | -29 | 3,381 |
0b166977697a50b8b973f408ecd8c891246ce69d | 1,077 | py | Python | Scripts/long term monobit test/graph_results/graph_results.py | robseward/Ziffer | 4113bf7a92fa550195c5cef76fc9f20c46f183ff | [
"MIT"
] | 15 | 2016-12-17T02:49:24.000Z | 2020-05-04T22:51:35.000Z | Scripts/long term monobit test/graph_results/graph_results.py | robseward/Ziffer | 4113bf7a92fa550195c5cef76fc9f20c46f183ff | [
"MIT"
] | 1 | 2018-06-26T22:06:23.000Z | 2020-05-25T17:30:16.000Z | Scripts/long term monobit test/graph_results/graph_results.py | robseward/Ziffer | 4113bf7a92fa550195c5cef76fc9f20c46f183ff | [
"MIT"
] | 2 | 2019-10-27T21:34:15.000Z | 2021-09-29T17:40:04.000Z | import matplotlib.pyplot as plot
import matplotlib.dates as md
from matplotlib.dates import date2num
import datetime
# from pylab import *
from numpy import polyfit
import numpy as np
f = open("deviations.csv")
values = []
timestamps = []
for (i, line) in enumerate(f):
if i >= 1:
lineArray = line.split(",")
date = datetime.datetime.strptime(lineArray[0], '%Y-%m-%d %H:%M:%S')
timestamps.append(date2num(date))
value = lineArray[1].strip()
values.append(value)
if i > 100000:
break
plot.subplots_adjust(bottom=0.2)
plot.xticks( rotation=25 )
ax=plot.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
# countArray = np.arange(0.0, len(timestamps))
floatValues = np.array(map(float, values))
fit = polyfit(timestamps,floatValues,1)
fit_fn = np.poly1d(fit) # fit_fn is now a function which takes in x and returns an estimate for y
# plot(x,y, 'yo', x, fit_fn(x), '--k')
plot.plot(timestamps, values, timestamps, fit_fn(timestamps), '--k')
#plot.plot(timestamps, values)
plot.show()
| 28.342105 | 97 | 0.680594 | import matplotlib.pyplot as plot
import matplotlib.dates as md
from matplotlib.dates import date2num
import datetime
# from pylab import *
from numpy import polyfit
import numpy as np
f = open("deviations.csv")
values = []
timestamps = []
for (i, line) in enumerate(f):
if i >= 1:
lineArray = line.split(",")
date = datetime.datetime.strptime(lineArray[0], '%Y-%m-%d %H:%M:%S')
timestamps.append(date2num(date))
value = lineArray[1].strip()
values.append(value)
if i > 100000:
break
plot.subplots_adjust(bottom=0.2)
plot.xticks( rotation=25 )
ax=plot.gca()
xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S')
ax.xaxis.set_major_formatter(xfmt)
# countArray = np.arange(0.0, len(timestamps))
floatValues = np.array(map(float, values))
fit = polyfit(timestamps,floatValues,1)
fit_fn = np.poly1d(fit) # fit_fn is now a function which takes in x and returns an estimate for y
# plot(x,y, 'yo', x, fit_fn(x), '--k')
plot.plot(timestamps, values, timestamps, fit_fn(timestamps), '--k')
#plot.plot(timestamps, values)
plot.show()
| 0 | 0 | 0 |
a7680cf40e16dc12bf0c188958c9d947ce0852c4 | 1,761 | py | Python | run/benchmark_runs.py | BenRLewis/P4-Source-Routing | f91fef8b0b16318bd613ae8c8e43e18a7d780733 | [
"Apache-2.0"
] | 1 | 2021-03-26T10:48:03.000Z | 2021-03-26T10:48:03.000Z | run/benchmark_runs.py | BenRLewis/P4-Source-Routing | f91fef8b0b16318bd613ae8c8e43e18a7d780733 | [
"Apache-2.0"
] | null | null | null | run/benchmark_runs.py | BenRLewis/P4-Source-Routing | f91fef8b0b16318bd613ae8c8e43e18a7d780733 | [
"Apache-2.0"
] | null | null | null | import argparse
import pexpect
import sys
import time
import timeit
import zmq
parser = argparse.ArgumentParser('Run a range of tests and write the results to a file')
parser.add_argument('runs', type=int, help='The number of runs for each approach')
parser.add_argument('min_number', type=int, help='The starting number of switches to run')
parser.add_argument('max_number', type=int, help='The maximum number of switches to run')
parser.add_argument('steps', type=int, help='Steps between starting and max number of switches')
args = parser.parse_args()
for num_switches in range(args.min_number, args.max_number + 1, args.steps):
fout = open("results-%d-%dswitches.txt" % (time.time(), num_switches), 'w')
for run in range(0, args.runs):
"Run %d" % run
command = "python start_switches.py %d ../p4src/tiered.json ../p4src/tiered.p4info" % num_switches
child = pexpect.spawn(command, timeout=300)
child.logfile = sys.stdout
child.expect("Everything should be running by now...")
print "Switches should have started for run %d. Sleeping for 30 seconds for everything to settle" % run
time.sleep(30)
t = timeit.Timer(lambda: run_table_insert(num_switches))
fout.write(str(t.timeit(1)) + '\n')
print "Run %d complete" % run
child.send('\003')
child.expect(pexpect.EOF)
print "Done with this run"
| 41.928571 | 111 | 0.687677 | import argparse
import pexpect
import sys
import time
import timeit
import zmq
def run_table_insert(dstSwitch):
context = zmq.Context()
socket = context.socket(zmq.REQ)
socket.connect("tcp://localhost:5555")
message = {'dstIsland': dstSwitch, 'dstMac': "AA:00:00:00:05:01", 'egressPort': 1}
socket.send_pyobj(message)
resp = socket.recv_pyobj()
print resp
print "Done with table insert"
return
parser = argparse.ArgumentParser('Run a range of tests and write the results to a file')
parser.add_argument('runs', type=int, help='The number of runs for each approach')
parser.add_argument('min_number', type=int, help='The starting number of switches to run')
parser.add_argument('max_number', type=int, help='The maximum number of switches to run')
parser.add_argument('steps', type=int, help='Steps between starting and max number of switches')
args = parser.parse_args()
for num_switches in range(args.min_number, args.max_number + 1, args.steps):
fout = open("results-%d-%dswitches.txt" % (time.time(), num_switches), 'w')
for run in range(0, args.runs):
"Run %d" % run
command = "python start_switches.py %d ../p4src/tiered.json ../p4src/tiered.p4info" % num_switches
child = pexpect.spawn(command, timeout=300)
child.logfile = sys.stdout
child.expect("Everything should be running by now...")
print "Switches should have started for run %d. Sleeping for 30 seconds for everything to settle" % run
time.sleep(30)
t = timeit.Timer(lambda: run_table_insert(num_switches))
fout.write(str(t.timeit(1)) + '\n')
print "Run %d complete" % run
child.send('\003')
child.expect(pexpect.EOF)
print "Done with this run"
| 329 | 0 | 23 |
07c14b05077c70e13464049d9d52342a04b1dd15 | 22,514 | py | Python | asn1tools/codecs/xer.py | JoelWilloughby/asn1tools | e495bf2c40089c09c950985eccf9633ba2aa30b6 | [
"MIT"
] | 1 | 2020-03-31T07:36:57.000Z | 2020-03-31T07:36:57.000Z | asn1tools/codecs/xer.py | ryanhz/asn1tools | e495bf2c40089c09c950985eccf9633ba2aa30b6 | [
"MIT"
] | null | null | null | asn1tools/codecs/xer.py | ryanhz/asn1tools | e495bf2c40089c09c950985eccf9633ba2aa30b6 | [
"MIT"
] | null | null | null | """XML Encoding Rules (XER) codec.
"""
import time
import sys
from xml.etree import ElementTree
import binascii
import datetime
from ..parser import EXTENSION_MARKER
from . import EncodeError
from . import DecodeError
from . import compiler
from . import format_or
from . import utc_time_to_datetime
from . import utc_time_from_datetime
from . import generalized_time_to_datetime
from . import generalized_time_from_datetime
from .compiler import enum_values_as_dict
| 27.289697 | 89 | 0.567514 | """XML Encoding Rules (XER) codec.
"""
import time
import sys
from xml.etree import ElementTree
import binascii
import datetime
from ..parser import EXTENSION_MARKER
from . import EncodeError
from . import DecodeError
from . import compiler
from . import format_or
from . import utc_time_to_datetime
from . import utc_time_from_datetime
from . import generalized_time_to_datetime
from . import generalized_time_from_datetime
from .compiler import enum_values_as_dict
def indent_xml(element, indent, level=0):
i = "\n" + level * indent
if len(element):
if not element.text or not element.text.strip():
element.text = i + indent
if not element.tail or not element.tail.strip():
element.tail = i
for element in element:
indent_xml(element, indent, level + 1)
if not element.tail or not element.tail.strip():
element.tail = i
else:
if level and (not element.tail or not element.tail.strip()):
element.tail = i
class Type(object):
def __init__(self, name, type_name):
self.name = name.replace(' ', '_')
self.type_name = type_name
self.optional = False
self.default = None
def set_size_range(self, minimum, maximum, has_extension_marker):
pass
def set_default(self, value):
self.default = value
def get_default(self):
return self.default
def has_default(self):
return self.default is not None
def encode(self, data):
raise NotImplementedError('To be implemented by subclasses.')
def encode_of(self, data):
return self.encode(data)
def decode(self, element):
raise NotImplementedError('To be implemented by subclasses.')
def decode_of(self, element):
return self.decode(element)
class StringType(Type):
def __init__(self, name, type_name=None):
if type_name is None:
type_name = self.__class__.__name__
super(StringType, self).__init__(name, type_name)
def encode(self, data):
element = ElementTree.Element(self.name)
if len(data) > 0:
element.text = data
return element
def decode(self, element):
if element.text is None:
return u''
else:
if sys.version_info[0] > 2:
return element.text
else:
return unicode(element.text)
def __repr__(self):
return '{}({})'.format(self.__class__.__name__,
self.name)
class MembersType(Type):
def __init__(self, name, members, type_name):
super(MembersType, self).__init__(name, type_name)
self.members = members
def encode(self, data):
element = ElementTree.Element(self.name)
for member in self.members:
name = member.name
if name in data:
try:
member_element = member.encode(data[name])
except EncodeError as e:
e.location.append(member.name)
raise
elif member.optional or member.has_default():
continue
else:
raise EncodeError(
"{} member '{}' not found in {}.".format(
self.__class__.__name__,
name,
data))
element.append(member_element)
return element
def decode(self, element):
values = {}
for member in self.members:
name = member.name
member_element = element.find(name)
if member_element is not None:
value = member.decode(member_element)
values[name] = value
elif member.optional:
pass
elif member.has_default():
values[name] = member.get_default()
return values
def __repr__(self):
return '{}({}, [{}])'.format(
self.__class__.__name__,
self.name,
', '.join([repr(member) for member in self.members]))
class ArrayType(Type):
def __init__(self, name, element_type, type_name):
super(ArrayType, self).__init__(name, type_name)
self.element_type = element_type
def encode(self, data):
element = ElementTree.Element(self.name)
for entry in data:
element.append(self.element_type.encode_of(entry))
return element
def decode(self, element):
values = []
for member_element in list(element):
value = self.element_type.decode_of(member_element)
values.append(value)
return values
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__,
self.name,
self.element_type)
class Boolean(Type):
def __init__(self, name):
super(Boolean, self).__init__(name, 'BOOLEAN')
def encode(self, data):
element = ElementTree.Element(self.name)
ElementTree.SubElement(element, 'true' if data else 'false')
return element
def decode(self, element):
return element.find('true') is not None
def encode_of(self, data):
return ElementTree.Element('true' if data else 'false')
def decode_of(self, element):
return element.tag == 'true'
def __repr__(self):
return 'Boolean({})'.format(self.name)
class Integer(Type):
def __init__(self, name):
super(Integer, self).__init__(name, 'INTEGER')
def encode(self, data):
element = ElementTree.Element(self.name)
element.text = str(data)
return element
def decode(self, element):
return int(element.text)
def __repr__(self):
return 'Integer({})'.format(self.name)
class Real(Type):
def __init__(self, name):
super(Real, self).__init__(name, 'REAL')
def encode(self, data):
data = float(data)
exponent = 0
while abs(data) >= 10:
data /= 10
exponent += 1
element = ElementTree.Element(self.name)
element.text = '{}E{}'.format(data, exponent)
return element
def decode(self, element):
return float(element.text)
def __repr__(self):
return 'Real({})'.format(self.name)
class Null(Type):
def __init__(self, name):
super(Null, self).__init__(name, 'NULL')
def encode(self, data):
return ElementTree.Element(self.name)
def decode(self, element):
return None
def __repr__(self):
return 'Null({})'.format(self.name)
class BitString(Type):
def __init__(self, name):
super(BitString, self).__init__(name, 'BIT STRING')
def encode(self, data):
element = ElementTree.Element(self.name)
if data[1] > 0:
encoded = int(binascii.hexlify(data[0]), 16)
encoded |= (0x80 << (8 * len(data[0])))
element.text = bin(encoded)[10:10 + data[1]].upper()
return element
def decode(self, element):
encoded = element.text
if encoded is None:
number_of_bits = 0
decoded = b''
else:
number_of_bits = len(encoded)
decoded = int(encoded, 2)
decoded |= (0x80 << number_of_bits)
rest = (number_of_bits % 8)
if rest != 0:
decoded <<= (8 - rest)
decoded = binascii.unhexlify(hex(decoded).rstrip('L')[4:])
return (decoded, number_of_bits)
def __repr__(self):
return 'BitString({})'.format(self.name)
class OctetString(Type):
def __init__(self, name):
super(OctetString, self).__init__(name, 'OCTET STRING')
def encode(self, data):
element = ElementTree.Element(self.name)
if len(data) > 0:
element.text = binascii.hexlify(data).decode('ascii').upper()
return element
def decode(self, element):
if element.text is None:
return b''
else:
return binascii.unhexlify(element.text)
def __repr__(self):
return 'OctetString({})'.format(self.name)
class ObjectIdentifier(StringType):
def __init__(self, name):
super(ObjectIdentifier, self).__init__(name, 'OBJECT IDENTIFIER')
def decode(self, element):
if element.text is None:
raise DecodeError("Expected an OBJECT IDENTIFIER, but got ''.")
return element.text
class Enumerated(Type):
def __init__(self, name, values, numeric):
super(Enumerated, self).__init__(name, 'ENUMERATED')
if numeric:
self.data_to_value = enum_values_as_dict(values)
self.value_to_data = {v: k for k, v in self.data_to_value.items()}
else:
self.value_to_data = {
k: k for k in enum_values_as_dict(values).values()
}
self.data_to_value = self.value_to_data
self.has_extension_marker = (EXTENSION_MARKER in values)
def format_names(self):
return format_or(sorted(list(self.value_to_data.values())))
def format_values(self):
return format_or(sorted(list(self.value_to_data)))
def encode(self, data):
try:
value = self.data_to_value[data]
except KeyError:
raise EncodeError(
"Expected enumeration value {}, but got '{}'.".format(
self.format_names(),
data))
element = ElementTree.Element(self.name)
element.append(ElementTree.Element(value))
return element
def decode(self, element):
value = element[0].tag
if value in self.value_to_data:
return self.value_to_data[value]
elif self.has_extension_marker:
return None
else:
raise DecodeError(
"Expected enumeration value {}, but got '{}'.".format(
self.format_values(),
value))
def encode_of(self, data):
try:
value = self.data_to_value[data]
except KeyError:
raise EncodeError(
"Expected enumeration value {}, but got '{}'.".format(
self.format_names(),
data))
return ElementTree.Element(value)
def decode_of(self, element):
value = element.tag
try:
return self.value_to_data[value]
except KeyError:
raise DecodeError(
"Expected enumeration value {}, but got '{}'.".format(
self.format_values(),
value))
def __repr__(self):
return 'Enumerated({})'.format(self.name)
class Sequence(MembersType):
def __init__(self, name, members):
super(Sequence, self).__init__(name, members, 'SEQUENCE')
class SequenceOf(ArrayType):
def __init__(self, name, element_type):
super(SequenceOf, self).__init__(name,
element_type,
'SEQUENCE OF')
class Set(MembersType):
def __init__(self, name, members):
super(Set, self).__init__(name, members, 'SET')
class SetOf(ArrayType):
def __init__(self, name, element_type):
super(SetOf, self).__init__(name,
element_type,
'SET OF')
class Choice(Type):
def __init__(self, name, members, has_extension_marker):
super(Choice, self).__init__(name, 'CHOICE')
self.members = members
self.name_to_member = {member.name: member for member in self.members}
self.has_extension_marker = has_extension_marker
def format_names(self):
return format_or(sorted([member.name for member in self.members]))
def encode(self, data):
try:
member = self.name_to_member[data[0]]
except KeyError:
raise EncodeError(
"Expected choice {}, but got '{}'.".format(
self.format_names(),
data[0]))
element = ElementTree.Element(self.name)
try:
element.append(member.encode(data[1]))
except EncodeError as e:
e.location.append(member.name)
raise
return element
def decode(self, element):
member_element = element[0]
name = member_element.tag
if name in self.name_to_member:
member = self.name_to_member[name]
elif self.has_extension_marker:
return (None, None)
else:
raise DecodeError(
"Expected choice {}, but got '{}'.".format(
self.format_names(),
name))
return (name, member.decode(member_element))
def encode_of(self, data):
try:
member = self.name_to_member[data[0]]
except KeyError:
raise EncodeError(
"Expected choice {}, but got '{}'.".format(
self.format_names(),
data[0]))
return member.encode(data[1])
def decode_of(self, element):
name = element.tag
try:
member = self.name_to_member[name]
except KeyError:
raise DecodeError(
"Expected choice {}, but got '{}'.".format(
self.format_names(),
name))
return (name, member.decode(element))
def __repr__(self):
return 'Choice({}, [{}])'.format(
self.name,
', '.join([repr(member) for member in self.members]))
class UTF8String(StringType):
pass
class NumericString(StringType):
pass
class PrintableString(StringType):
pass
class IA5String(StringType):
pass
class VisibleString(StringType):
pass
class GeneralString(StringType):
pass
class BMPString(StringType):
pass
class GraphicString(StringType):
pass
class UniversalString(StringType):
pass
class TeletexString(StringType):
pass
class ObjectDescriptor(GraphicString):
pass
class UTCTime(Type):
def __init__(self, name):
super(UTCTime, self).__init__(name, 'UTCTime')
def encode(self, data):
element = ElementTree.Element(self.name)
element.text = utc_time_from_datetime(data)
return element
def decode(self, element):
return utc_time_to_datetime(element.text)
def __repr__(self):
return 'UTCTime({})'.format(self.name)
class GeneralizedTime(Type):
def __init__(self, name):
super(GeneralizedTime, self).__init__(name, 'GeneralizedTime')
def encode(self, data):
element = ElementTree.Element(self.name)
element.text = generalized_time_from_datetime(data)
return element
def decode(self, element):
return generalized_time_to_datetime(element.text)
def __repr__(self):
return 'GeneralizedTime({})'.format(self.name)
class Date(StringType):
def encode(self, data):
element = ElementTree.Element(self.name)
element.text = str(data)
return element
def decode(self, element):
return datetime.date(*time.strptime(element.text, '%Y-%m-%d')[:3])
class TimeOfDay(StringType):
def encode(self, data):
element = ElementTree.Element(self.name)
element.text = str(data)
return element
def decode(self, element):
return datetime.time(*time.strptime(element.text, '%H:%M:%S')[3:6])
class DateTime(StringType):
def encode(self, data):
element = ElementTree.Element(self.name)
element.text = str(data).replace(' ', 'T')
return element
def decode(self, element):
return datetime.datetime(*time.strptime(element.text,
'%Y-%m-%dT%H:%M:%S')[:6])
class Any(Type):
def __init__(self, name):
super(Any, self).__init__(name, 'ANY')
def encode(self, data):
raise NotImplementedError('ANY is not yet implemented.')
def decode(self, element):
raise NotImplementedError('ANY is not yet implemented.')
def __repr__(self):
return 'Any({})'.format(self.name)
class Recursive(Type, compiler.Recursive):
def __init__(self, name, type_name, module_name):
super(Recursive, self).__init__(name, 'RECURSIVE')
self.type_name = type_name
self.module_name = module_name
self._inner = None
def set_inner_type(self, inner):
self._inner = inner
def encode(self, data):
encoded = self._inner.encode(data)
encoded.tag = self.name
return encoded
def decode(self, element):
return self._inner.decode(element)
def __repr__(self):
return 'Recursive({})'.format(self.name)
class CompiledType(compiler.CompiledType):
def __init__(self, type_):
super(CompiledType, self).__init__()
self._type = type_
@property
def type(self):
return self._type
def encode(self, data, indent=None):
element = self._type.encode(data)
if indent is not None:
indent_xml(element, indent * " ")
return ElementTree.tostring(element)
def decode(self, data):
element = ElementTree.fromstring(data.decode('utf-8'))
return self._type.decode(element)
def __repr__(self):
return repr(self._type)
class Compiler(compiler.Compiler):
def process_type(self, type_name, type_descriptor, module_name):
compiled_type = self.compile_type(type_name,
type_descriptor,
module_name)
return CompiledType(compiled_type)
def compile_type(self, name, type_descriptor, module_name):
module_name = type_descriptor.get('module-name', module_name)
type_name = type_descriptor['type']
if type_name == 'SEQUENCE':
members, _ = self.compile_members(
type_descriptor['members'],
module_name)
compiled = Sequence(name, members)
elif type_name == 'SEQUENCE OF':
element = type_descriptor['element']
compiled = SequenceOf(name,
self.compile_type(element['type'],
element,
module_name))
elif type_name == 'SET':
members, _ = self.compile_members(
type_descriptor['members'],
module_name)
compiled = Set(name, members)
elif type_name == 'SET OF':
element = type_descriptor['element']
compiled = SetOf(name,
self.compile_type(element['type'],
element,
module_name))
elif type_name == 'CHOICE':
compiled = Choice(name,
*self.compile_members(
type_descriptor['members'],
module_name))
elif type_name == 'INTEGER':
compiled = Integer(name)
elif type_name == 'REAL':
compiled = Real(name)
elif type_name == 'ENUMERATED':
compiled = Enumerated(name,
type_descriptor['values'],
self._numeric_enums)
elif type_name == 'BOOLEAN':
compiled = Boolean(name)
elif type_name == 'OBJECT IDENTIFIER':
compiled = ObjectIdentifier(name)
elif type_name == 'OCTET STRING':
compiled = OctetString(name)
elif type_name == 'TeletexString':
compiled = TeletexString(name)
elif type_name == 'NumericString':
compiled = NumericString(name)
elif type_name == 'PrintableString':
compiled = PrintableString(name)
elif type_name == 'IA5String':
compiled = IA5String(name)
elif type_name == 'VisibleString':
compiled = VisibleString(name)
elif type_name == 'GeneralString':
compiled = GeneralString(name)
elif type_name == 'UTF8String':
compiled = UTF8String(name)
elif type_name == 'BMPString':
compiled = BMPString(name)
elif type_name == 'GraphicString':
compiled = GraphicString(name)
elif type_name == 'UTCTime':
compiled = UTCTime(name)
elif type_name == 'UniversalString':
compiled = UniversalString(name)
elif type_name == 'GeneralizedTime':
compiled = GeneralizedTime(name)
elif type_name == 'DATE':
compiled = Date(name)
elif type_name == 'TIME-OF-DAY':
compiled = TimeOfDay(name)
elif type_name == 'DATE-TIME':
compiled = DateTime(name)
elif type_name == 'BIT STRING':
compiled = BitString(name)
elif type_name == 'ANY':
compiled = Any(name)
elif type_name == 'ANY DEFINED BY':
compiled = Any(name)
elif type_name == 'NULL':
compiled = Null(name)
elif type_name == 'EXTERNAL':
members, _ = self.compile_members(self.external_type_descriptor()['members'],
module_name)
compiled = Sequence(name, members)
elif type_name == 'ObjectDescriptor':
compiled = ObjectDescriptor(name)
else:
if type_name in self.types_backtrace:
compiled = Recursive(name,
type_name,
module_name)
self.recursive_types.append(compiled)
else:
compiled = self.compile_user_type(name,
type_name,
module_name)
return compiled
def compile_dict(specification, numeric_enums):
return Compiler(specification, numeric_enums).process()
def decode_length(_data):
raise DecodeError('Decode length is not supported for this codec.')
| 18,104 | 469 | 3,431 |
3745f5e5fa0c77bd411057ea744ee2db6a054fcf | 25 | py | Python | foo.py | spderosso/try-gitless | 1cba16e2cf7c0a1b703c7c8e6960e641ff84561c | [
"MIT"
] | null | null | null | foo.py | spderosso/try-gitless | 1cba16e2cf7c0a1b703c7c8e6960e641ff84561c | [
"MIT"
] | null | null | null | foo.py | spderosso/try-gitless | 1cba16e2cf7c0a1b703c7c8e6960e641ff84561c | [
"MIT"
] | null | null | null | # Test file foo.py
exit
| 6.25 | 18 | 0.68 | # Test file foo.py
exit
| 0 | 0 | 0 |
91462b5892791eb59bc01dbefd00fdb25c428e9c | 3,840 | py | Python | ejercicios/alarma.py | carlosviveros/Soluciones | 115f4fa929c7854ca497e4c994352adc64565456 | [
"MIT"
] | 4 | 2021-12-14T23:51:25.000Z | 2022-03-24T11:14:00.000Z | ejercicios/alarma.py | leugimkm/Soluciones | d71601c8d9b5e86e926f48d9e49462af8a956b6d | [
"MIT"
] | null | null | null | ejercicios/alarma.py | leugimkm/Soluciones | d71601c8d9b5e86e926f48d9e49462af8a956b6d | [
"MIT"
] | 5 | 2021-11-10T06:49:50.000Z | 2022-03-24T01:42:28.000Z | """AyudaEnPython: https://www.facebook.com/groups/ayudapython
Crear una aplicación de consola que permita al usuario programar alarmas
de tiempo. Para realizar esta aplicación deberá presentarle al usuario
las siguientes opciones: ver alarmas activas, agregar nueva alarma,
agregar nueva alarma con tiempo aleatorio, editar alarma existente y
quitar alarma.
Para este ejercicio debe crear una clase llamada Reloj que contenga los
atributos necesarios para almacenar el tiempo (horas, minutos y segundos),
guiarse de las siguientes restricciones y utilizar el diagrama de clase:
- Programe un método constructor vacío que cree objetos con un tiempo
(horas, minutos y segundos) aleatorio.
- Programe un método que reciba las horas, minutos y segundos para la nueva
alarma.
- Cree un método para modificar los segundos.
- Cree un método para modificar los minutos.
- Cree un método para modificar las horas.
- Programe un método que devuelva una cadena de texto que incluya la hora
actual de la variable en formato hh:mm:ss.
* Considere el valor actual y el valor máximo que puede contener cada uno
de los atributos al momento de añadir tiempo.
+----------------------------------------+
| Reloj |
+----------------------------------------+
| - horas: int |
| - minutos: int |
| - segundos: int |
+----------------------------------------+
| + agregar_horas(int horas): void |
| + agregar_minutos(int minutos): void |
| + agregar_segundos(int segundos): void |
| + visualizar(): string |
+----------------------------------------+
"""
from random import randint
from prototools import Menu, int_input
alarma = Reloj()
alarmas = []
if __name__ == "__main__":
menu = Menu("Alarmas")
menu.add_options(
("Ver alarmas activas", ver_alarmas),
("Agregar nueva alarma", nueva_alarma),
("Agregar alarma aleatoria", alarma_aleatorio),
("Editar alarma existente", editar_alarma),
("Quitar alarma", quitar_alarma),
)
menu.run() | 32 | 75 | 0.61224 | """AyudaEnPython: https://www.facebook.com/groups/ayudapython
Crear una aplicación de consola que permita al usuario programar alarmas
de tiempo. Para realizar esta aplicación deberá presentarle al usuario
las siguientes opciones: ver alarmas activas, agregar nueva alarma,
agregar nueva alarma con tiempo aleatorio, editar alarma existente y
quitar alarma.
Para este ejercicio debe crear una clase llamada Reloj que contenga los
atributos necesarios para almacenar el tiempo (horas, minutos y segundos),
guiarse de las siguientes restricciones y utilizar el diagrama de clase:
- Programe un método constructor vacío que cree objetos con un tiempo
(horas, minutos y segundos) aleatorio.
- Programe un método que reciba las horas, minutos y segundos para la nueva
alarma.
- Cree un método para modificar los segundos.
- Cree un método para modificar los minutos.
- Cree un método para modificar las horas.
- Programe un método que devuelva una cadena de texto que incluya la hora
actual de la variable en formato hh:mm:ss.
* Considere el valor actual y el valor máximo que puede contener cada uno
de los atributos al momento de añadir tiempo.
+----------------------------------------+
| Reloj |
+----------------------------------------+
| - horas: int |
| - minutos: int |
| - segundos: int |
+----------------------------------------+
| + agregar_horas(int horas): void |
| + agregar_minutos(int minutos): void |
| + agregar_segundos(int segundos): void |
| + visualizar(): string |
+----------------------------------------+
"""
from random import randint
from prototools import Menu, int_input
class Reloj:
def __init__(self) -> None:
self._horas = randint(0, 24)
self._minutos = randint(0, 59)
self._segundos = randint(0, 59)
def agregar_horas(self, horas):
self._horas = horas
def agregar_minutos(self, minutos):
self._minutos = minutos
def agregar_segundos(self, segundos):
self._segundos = segundos
def visualizar(self):
return f"{self._horas:02}:{self._minutos:02}:{self._segundos:02}"
alarma = Reloj()
alarmas = []
def _entradas():
horas = int_input("Ingrese la hora: ", min=0, max=24)
minutos = int_input("Ingrese los minutos: ", min=0, max=59)
segundos = int_input("Ingrese los segundos: ", min=0, max=59)
return horas, minutos, segundos
def _agregar(alarma, horas, minutos, segundos):
alarma.agregar_horas(horas)
alarma.agregar_minutos(minutos)
alarma.agregar_segundos(segundos)
def ver_alarmas():
if alarmas == []:
print("No hay alarmas por el momento")
for n, alarma in enumerate(alarmas, 1):
print(f"{n}. {alarma.visualizar()}")
def nueva_alarma():
alarma = Reloj()
_agregar(alarma, *_entradas())
alarmas.append(alarma)
def alarma_aleatorio():
alarmas.append(Reloj())
print("Alarma aleatoria agregada")
def editar_alarma():
ver_alarmas()
print("Seleccionar la alarma a ser editada")
n = int(input(">>> "))
alarma = alarmas[n-1]
_agregar(alarma, *_entradas())
def quitar_alarma():
ver_alarmas()
print("Seleccionar la alarma a ser removida")
n = int(input(">>> "))
alarmas.pop(n-1)
if __name__ == "__main__":
menu = Menu("Alarmas")
menu.add_options(
("Ver alarmas activas", ver_alarmas),
("Agregar nueva alarma", nueva_alarma),
("Agregar alarma aleatoria", alarma_aleatorio),
("Editar alarma existente", editar_alarma),
("Quitar alarma", quitar_alarma),
)
menu.run() | 1,255 | -9 | 322 |
ca0538382008ecacbc38992d84daa88a595e2acd | 200 | py | Python | test.py | c4s4/http1 | ab2610823f060632227f9ca60e98320800b5c5be | [
"Apache-2.0"
] | 1 | 2019-11-30T14:24:25.000Z | 2019-11-30T14:24:25.000Z | test.py | c4s4/http1 | ab2610823f060632227f9ca60e98320800b5c5be | [
"Apache-2.0"
] | 2 | 2015-04-25T08:14:49.000Z | 2015-04-26T09:08:08.000Z | test.py | c4s4/http1 | ab2610823f060632227f9ca60e98320800b5c5be | [
"Apache-2.0"
] | 1 | 2015-04-25T09:12:59.000Z | 2015-04-25T09:12:59.000Z | import http1
response = http1.request('http://www.google.com')
print(f'Status: {response.status} ({response.message})')
print(f'Headers: {response.headers}')
#print(f'Body: {response.body.strip()}')
| 28.571429 | 56 | 0.715 | import http1
response = http1.request('http://www.google.com')
print(f'Status: {response.status} ({response.message})')
print(f'Headers: {response.headers}')
#print(f'Body: {response.body.strip()}')
| 0 | 0 | 0 |
6e8d76559556aad67a766a1b969129888077d6d4 | 275 | py | Python | src/IceRayPy/type/basic.py | dmilos/IceRay | 4e01f141363c0d126d3c700c1f5f892967e3d520 | [
"MIT-0"
] | 2 | 2020-09-04T12:27:15.000Z | 2022-01-17T14:49:40.000Z | src/IceRayPy/type/basic.py | dmilos/IceRay | 4e01f141363c0d126d3c700c1f5f892967e3d520 | [
"MIT-0"
] | null | null | null | src/IceRayPy/type/basic.py | dmilos/IceRay | 4e01f141363c0d126d3c700c1f5f892967e3d520 | [
"MIT-0"
] | 1 | 2020-09-04T12:27:52.000Z | 2020-09-04T12:27:52.000Z | import ctypes
print( '<' + __name__ + ' file=\'' + __file__ + '\'>' )
Scalar = ctypes.c_double
Unsigned = ctypes.c_uint
Integer = ctypes.c_int
Size = ctypes.c_size_t
VoidPtr = ctypes.c_void_p
print( '</' + __name__ + ' file=\'' + __file__ + '\'>' )
| 22.916667 | 59 | 0.578182 | import ctypes
print( '<' + __name__ + ' file=\'' + __file__ + '\'>' )
Scalar = ctypes.c_double
Unsigned = ctypes.c_uint
Integer = ctypes.c_int
Size = ctypes.c_size_t
VoidPtr = ctypes.c_void_p
print( '</' + __name__ + ' file=\'' + __file__ + '\'>' )
| 0 | 0 | 0 |
90531b3709e7fbc87192a474ce0b31ea346b07bd | 328 | py | Python | data/tasks.py | iColdPlayer/kasir | 5c83c201a6e5e3bc0ca402ed4bb824ea85e02e38 | [
"MIT"
] | 29 | 2019-12-04T16:21:14.000Z | 2022-03-02T23:27:59.000Z | data/tasks.py | mejeng/kasir | cc6f9158b61c0cb45078ddf798af9588c8771311 | [
"MIT"
] | 13 | 2019-11-29T18:12:41.000Z | 2021-06-27T02:01:07.000Z | data/tasks.py | mejeng/kasir | cc6f9158b61c0cb45078ddf798af9588c8771311 | [
"MIT"
] | 14 | 2019-12-04T16:21:15.000Z | 2022-02-24T07:05:12.000Z | from __future__ import absolute_import, unicode_literals
from celery import shared_task
from .models import Stock
@shared_task
@shared_task
@shared_task
@shared_task | 17.263158 | 56 | 0.737805 | from __future__ import absolute_import, unicode_literals
from celery import shared_task
from .models import Stock
@shared_task
def add(x, y):
return x + y
@shared_task
def mul(x, y):
return x * y
@shared_task
def xsum(numbers):
return sum(numbers)
@shared_task
def count_stock():
return Stock.objects.count() | 71 | 0 | 88 |
f460e837d85eacf6d48a5e4585be3a3af165caaf | 357 | py | Python | stable_baselines3/common/pymlmc/__init__.py | atishdixit16/stable-baselines3 | 0188d6a7b0c905693f41a68484d71b02faee6146 | [
"MIT"
] | null | null | null | stable_baselines3/common/pymlmc/__init__.py | atishdixit16/stable-baselines3 | 0188d6a7b0c905693f41a68484d71b02faee6146 | [
"MIT"
] | null | null | null | stable_baselines3/common/pymlmc/__init__.py | atishdixit16/stable-baselines3 | 0188d6a7b0c905693f41a68484d71b02faee6146 | [
"MIT"
] | null | null | null |
__author__ = 'Patrick Farrell'
__credits__ = ['Patrick Farrell', 'Mike Giles']
__license__ = 'GPL-3'
__maintainer__ = 'Patrick Farrell'
__email__ = 'patrick.farrell@maths.ox.ac.uk'
from .mlmc_plot_100 import mlmc_plot_100
from .mlmc_plot import mlmc_plot
from .mlmc_test import mlmc_test
from .mlmc_fn import mlmc_fn
from .mlmc import mlmc
| 27.461538 | 50 | 0.753501 |
__author__ = 'Patrick Farrell'
__credits__ = ['Patrick Farrell', 'Mike Giles']
__license__ = 'GPL-3'
__maintainer__ = 'Patrick Farrell'
__email__ = 'patrick.farrell@maths.ox.ac.uk'
from .mlmc_plot_100 import mlmc_plot_100
from .mlmc_plot import mlmc_plot
from .mlmc_test import mlmc_test
from .mlmc_fn import mlmc_fn
from .mlmc import mlmc
| 0 | 0 | 0 |
e73d97418d8c9a7091446d2eaf9ad35aced734fb | 2,753 | py | Python | player/models.py | DevRx28/dbms-api | 5685555c7c8b6621dc03c4092606c1972b6afda1 | [
"Apache-2.0"
] | null | null | null | player/models.py | DevRx28/dbms-api | 5685555c7c8b6621dc03c4092606c1972b6afda1 | [
"Apache-2.0"
] | null | null | null | player/models.py | DevRx28/dbms-api | 5685555c7c8b6621dc03c4092606c1972b6afda1 | [
"Apache-2.0"
] | null | null | null | from django.db import models
# from dbapi.settings import *
# Create your models here.
| 34.848101 | 60 | 0.682165 | from django.db import models
# from dbapi.settings import *
# Create your models here.
class PlayerManager(models.Manager):
def get_by_id(self, id):
queryset = self.get_queryset().filter(id=id)
if queryset.count() == 1:
return queryset.first()
return None
def featured(self):
return self.get_queryset().filter(featured=True)
class Player(models.Model):
# ID = models.IntegerField()
Name = models.CharField(max_length=100)
Age = models.IntegerField()
Photo = models.CharField(max_length=100)
Nationality = models.CharField(max_length=100)
Overall = models.IntegerField()
Potential = models.IntegerField()
Club = models.CharField(max_length=100)
Value = models.CharField(max_length=100)
Wage = models.CharField(max_length=100)
PreferredFoot = models.CharField(max_length=100)
InternationalReputation = models.IntegerField()
WeakFoot = models.IntegerField()
SkillMoves = models.IntegerField()
Position = models.CharField(max_length=100)
JerseyNumber = models.IntegerField()
Height = models.CharField(max_length=100)
Weight = models.CharField(max_length=100)
Crossing = models.IntegerField()
Finishing = models.IntegerField()
HeadingAccuracy = models.IntegerField()
ShortPassing = models.IntegerField()
Volleys = models.IntegerField()
Dribbling = models.IntegerField()
Curve = models.IntegerField()
FKAccuracy = models.IntegerField()
LongPassing = models.IntegerField()
BallControl = models.IntegerField()
Acceleration = models.IntegerField()
SprintSpeed = models.IntegerField()
Agility = models.IntegerField()
Reactions = models.IntegerField()
Balance = models.IntegerField()
ShotPower = models.IntegerField()
Jumping = models.IntegerField()
Stamina = models.IntegerField()
Strength = models.IntegerField()
LongShots = models.IntegerField()
Aggression = models.IntegerField()
Interceptions = models.IntegerField()
Positioning = models.IntegerField()
Vision = models.IntegerField()
Penalties = models.IntegerField()
Composure = models.IntegerField()
Marking = models.IntegerField()
StandingTackle = models.IntegerField()
SlidingTackle = models.IntegerField()
GKDiving = models.IntegerField()
GKkicking = models.IntegerField()
GKPositioning = models.IntegerField()
GKReflexes = models.IntegerField()
GKHandling = models.IntegerField()
objects = PlayerManager()
# def get_absolute_url(self):
# return reverse('details', kwargs={"pk": self.pk})
def __str__(self):
return self.Name
| 229 | 2,325 | 105 |
72d7963c93d1db40d6333648d00986a10425dc47 | 288 | py | Python | speaksee/data/__init__.py | aimagelab/speaksee | 63700a4062e2ae00132a5c77007604fdaf4bd00b | [
"BSD-3-Clause"
] | 29 | 2019-02-28T05:29:53.000Z | 2021-01-25T06:55:48.000Z | speaksee/data/__init__.py | aimagelab/speaksee | 63700a4062e2ae00132a5c77007604fdaf4bd00b | [
"BSD-3-Clause"
] | 2 | 2019-10-26T02:29:59.000Z | 2021-01-15T13:58:53.000Z | speaksee/data/__init__.py | aimagelab/speaksee | 63700a4062e2ae00132a5c77007604fdaf4bd00b | [
"BSD-3-Clause"
] | 11 | 2019-03-12T08:43:09.000Z | 2021-03-15T03:20:43.000Z | from .field import *
from .dataset import *
from torch.utils.data import DataLoader as TorchDataLoader | 41.142857 | 99 | 0.75 | from .field import *
from .dataset import *
from torch.utils.data import DataLoader as TorchDataLoader
class DataLoader(TorchDataLoader):
def __init__(self, dataset, *args, **kwargs):
super(DataLoader, self).__init__(dataset, *args, collate_fn=dataset.collate_fn(), **kwargs) | 124 | 13 | 49 |
80401d8a668db9453bfaaf0b916b26e67477ae32 | 5,545 | py | Python | src/ctrm/learning/model/cvae.py | omron-sinicx/ctrm | 83e7fe4abb8ad8559bfb6e64170878575a03fd20 | [
"MIT"
] | 8 | 2022-01-25T08:04:32.000Z | 2022-02-20T10:47:40.000Z | src/ctrm/learning/model/cvae.py | omron-sinicx/ctrm | 83e7fe4abb8ad8559bfb6e64170878575a03fd20 | [
"MIT"
] | null | null | null | src/ctrm/learning/model/cvae.py | omron-sinicx/ctrm | 83e7fe4abb8ad8559bfb6e64170878575a03fd20 | [
"MIT"
] | null | null | null | """implementation of F_CTRM
Author: Keisuke Okumura / Ryo Yonetani
Affiliation: TokyoTech & OSX / OSX
"""
from __future__ import annotations
from dataclasses import dataclass
from functools import reduce
from operator import add
from typing import Optional
import torch
import torch.nn as nn
from torch.distributions.relaxed_categorical import RelaxedOneHotCategorical
from .model import Model
@dataclass(eq=False, repr=False)
class CTRMNet(Model):
"""CVAE to construct CTRMs"""
dim_input: int
dim_output: int
dim_indicators: int = 0 # set automatically in train.py
# hyper parameters
dim_hidden: int = 32
dim_latent: int = 64
temp: float = 2.0
num_mid_layers_encoder: int = 1
num_mid_layers_decoder: int = 1
kl_weight: float = 0.1 # weighting KL divergence
def forward(
self, x: torch.Tensor, y: torch.Tensor
) -> tuple[torch.Tensor, ...]:
"""used in training phase"""
# predict next location
assert self.dim_indicators > 0
# indicator is included in y
ind = y[:, -self.dim_indicators :].reshape(-1, self.dim_indicators)
# encode
augmented_x = torch.cat((x, ind), -1)
log_prob_x = self.encoder_input(augmented_x)
log_prob_y = self.encoder_output(torch.cat([x, y], dim=1))
dist_y = RelaxedOneHotCategorical(
self.temp, probs=torch.exp(log_prob_y)
)
# sampling from the latent space
latent_y = dist_y.rsample()
# decode
y_pred = self.decoder(torch.cat([latent_y, augmented_x], dim=1))
# indicator prediction
ind_pred = self.indicator(x)
# all values are for computing loss
return y_pred, log_prob_x, log_prob_y, ind_pred
def predict_with_loss(
self,
x: torch.Tensor,
y: torch.Tensor,
w: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor, dict[str, torch.Tensor]]:
"""used in training phase"""
y_pred, log_prob_x, log_prob_y, ind_pred = self.forward(x, y)
loss_details = self.loss_fn(y, y_pred, log_prob_x, log_prob_y, w)
loss = reduce(add, loss_details.values())
# indicator
ind_pred = nn.LogSoftmax(dim=-1)(ind_pred)
ind_loss = nn.NLLLoss()(ind_pred, torch.where(y[:, 3:])[1])
loss = loss + ind_loss * 1e-3
return y_pred, loss, loss_details
def sample(self, x: torch.Tensor, ind: torch.Tensor,) -> torch.Tensor:
"""sampling function, used in inference phase"""
x = torch.cat((x, ind), -1)
with torch.no_grad():
log_prob_x = self.encoder_input(x)
dist_x = RelaxedOneHotCategorical(
self.temp, probs=torch.exp(log_prob_x)
)
latent_x = dist_x.rsample()
y = self.decoder(torch.cat([latent_x, x], -1))
return y
def loss_fn(
self,
y: torch.Tensor,
y_pred: torch.Tensor,
log_prob_x: torch.Tensor,
log_prob_y: torch.Tensor,
weight: Optional[torch.Tensor] = None,
) -> dict[str, torch.Tensor]:
"""compute loss of the model, used in training phase"""
if self.dim_indicators > 0:
# indicator is included in y, remove this
y = y[:, : -self.dim_indicators]
if weight is None:
recon_loss = nn.MSELoss()(y_pred, y)
kl_loss = torch.sum(
torch.exp(log_prob_x) * (log_prob_x - log_prob_y), dim=-1
).mean()
else:
weight = weight.reshape(-1)
recon_loss = (torch.sum((y_pred - y) ** 2, dim=-1) * weight).mean()
kl_loss = (
torch.sum(
torch.exp(log_prob_x) * (log_prob_x - log_prob_y), dim=-1
)
* weight
).mean() * self.kl_weight
return {
"recon": recon_loss,
"kl": kl_loss,
}
| 32.052023 | 79 | 0.571867 | """implementation of F_CTRM
Author: Keisuke Okumura / Ryo Yonetani
Affiliation: TokyoTech & OSX / OSX
"""
from __future__ import annotations
from dataclasses import dataclass
from functools import reduce
from operator import add
from typing import Optional
import torch
import torch.nn as nn
from torch.distributions.relaxed_categorical import RelaxedOneHotCategorical
from .model import Model
@dataclass(eq=False, repr=False)
class CTRMNet(Model):
"""CVAE to construct CTRMs"""
dim_input: int
dim_output: int
dim_indicators: int = 0 # set automatically in train.py
# hyper parameters
dim_hidden: int = 32
dim_latent: int = 64
temp: float = 2.0
num_mid_layers_encoder: int = 1
num_mid_layers_decoder: int = 1
kl_weight: float = 0.1 # weighting KL divergence
def __post_init__(self) -> None:
super().__init__()
def generate_mlp(
dim_input: int, dim_output: int, num_mid_layers: int = 1,
) -> nn.modules.container.Sequential:
return nn.Sequential(
nn.Linear(dim_input, self.dim_hidden),
nn.BatchNorm1d(self.dim_hidden),
nn.ReLU(),
*(
[
nn.Linear(self.dim_hidden, self.dim_hidden),
nn.BatchNorm1d(self.dim_hidden),
nn.ReLU(),
]
* num_mid_layers
),
nn.Linear(self.dim_hidden, dim_output),
)
def generate_encoder(
dim_input: int,
) -> nn.modules.container.Sequential:
mlp = generate_mlp(
dim_input, self.dim_latent, self.num_mid_layers_encoder,
)
mlp.add_module("log_softmax", nn.LogSoftmax(dim=-1))
return mlp
self.encoder_input = generate_encoder(
self.dim_input + self.dim_indicators
)
self.encoder_output = generate_encoder(
self.dim_input + self.dim_output
)
self.decoder = generate_mlp(
self.dim_latent + self.dim_input + self.dim_indicators,
self.dim_output - self.dim_indicators,
self.num_mid_layers_decoder,
)
self.indicator = generate_mlp(
self.dim_input, self.dim_indicators, self.num_mid_layers_decoder,
)
def forward(
self, x: torch.Tensor, y: torch.Tensor
) -> tuple[torch.Tensor, ...]:
"""used in training phase"""
# predict next location
assert self.dim_indicators > 0
# indicator is included in y
ind = y[:, -self.dim_indicators :].reshape(-1, self.dim_indicators)
# encode
augmented_x = torch.cat((x, ind), -1)
log_prob_x = self.encoder_input(augmented_x)
log_prob_y = self.encoder_output(torch.cat([x, y], dim=1))
dist_y = RelaxedOneHotCategorical(
self.temp, probs=torch.exp(log_prob_y)
)
# sampling from the latent space
latent_y = dist_y.rsample()
# decode
y_pred = self.decoder(torch.cat([latent_y, augmented_x], dim=1))
# indicator prediction
ind_pred = self.indicator(x)
# all values are for computing loss
return y_pred, log_prob_x, log_prob_y, ind_pred
def predict_with_loss(
self,
x: torch.Tensor,
y: torch.Tensor,
w: Optional[torch.Tensor] = None,
) -> tuple[torch.Tensor, torch.Tensor, dict[str, torch.Tensor]]:
"""used in training phase"""
y_pred, log_prob_x, log_prob_y, ind_pred = self.forward(x, y)
loss_details = self.loss_fn(y, y_pred, log_prob_x, log_prob_y, w)
loss = reduce(add, loss_details.values())
# indicator
ind_pred = nn.LogSoftmax(dim=-1)(ind_pred)
ind_loss = nn.NLLLoss()(ind_pred, torch.where(y[:, 3:])[1])
loss = loss + ind_loss * 1e-3
return y_pred, loss, loss_details
def sample(self, x: torch.Tensor, ind: torch.Tensor,) -> torch.Tensor:
"""sampling function, used in inference phase"""
x = torch.cat((x, ind), -1)
with torch.no_grad():
log_prob_x = self.encoder_input(x)
dist_x = RelaxedOneHotCategorical(
self.temp, probs=torch.exp(log_prob_x)
)
latent_x = dist_x.rsample()
y = self.decoder(torch.cat([latent_x, x], -1))
return y
def loss_fn(
self,
y: torch.Tensor,
y_pred: torch.Tensor,
log_prob_x: torch.Tensor,
log_prob_y: torch.Tensor,
weight: Optional[torch.Tensor] = None,
) -> dict[str, torch.Tensor]:
"""compute loss of the model, used in training phase"""
if self.dim_indicators > 0:
# indicator is included in y, remove this
y = y[:, : -self.dim_indicators]
if weight is None:
recon_loss = nn.MSELoss()(y_pred, y)
kl_loss = torch.sum(
torch.exp(log_prob_x) * (log_prob_x - log_prob_y), dim=-1
).mean()
else:
weight = weight.reshape(-1)
recon_loss = (torch.sum((y_pred - y) ** 2, dim=-1) * weight).mean()
kl_loss = (
torch.sum(
torch.exp(log_prob_x) * (log_prob_x - log_prob_y), dim=-1
)
* weight
).mean() * self.kl_weight
return {
"recon": recon_loss,
"kl": kl_loss,
}
| 1,552 | 0 | 27 |
8e461b9f5e80c2dac77fcfd40a02f888de0595da | 246 | py | Python | general/save_float_image.py | miroslavradojevic/python-snippets | 753e1c15dc077d3bcf5de4fd5d3a675daf0da27c | [
"MIT"
] | null | null | null | general/save_float_image.py | miroslavradojevic/python-snippets | 753e1c15dc077d3bcf5de4fd5d3a675daf0da27c | [
"MIT"
] | null | null | null | general/save_float_image.py | miroslavradojevic/python-snippets | 753e1c15dc077d3bcf5de4fd5d3a675daf0da27c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import numpy as np
import cv2
if __name__ == '__main__':
arr = np.random.rand(512, 512) * 255
print("arr {} | {} -- {} | {}".format(arr.shape, np.amin(arr), np.amax(arr), arr.dtype))
cv2.imwrite("arr.jpg", arr)
| 27.333333 | 92 | 0.601626 | #!/usr/bin/env python
import numpy as np
import cv2
if __name__ == '__main__':
arr = np.random.rand(512, 512) * 255
print("arr {} | {} -- {} | {}".format(arr.shape, np.amin(arr), np.amax(arr), arr.dtype))
cv2.imwrite("arr.jpg", arr)
| 0 | 0 | 0 |
575e8126c17a3433ed1ce77ca87e613d080d64f9 | 1,179 | py | Python | knn.py | enesdemirag/music-genre-classification | deb3ff729ae159c3a9eb7433ba5f35ac32fbe3d1 | [
"MIT"
] | null | null | null | knn.py | enesdemirag/music-genre-classification | deb3ff729ae159c3a9eb7433ba5f35ac32fbe3d1 | [
"MIT"
] | 1 | 2020-11-30T20:10:01.000Z | 2020-11-30T20:10:01.000Z | knn.py | enesdemirag/music-genre-classification | deb3ff729ae159c3a9eb7433ba5f35ac32fbe3d1 | [
"MIT"
] | null | null | null | # In this script Kth Nearest Neighbor (Knn) machine learning algorithm used on dataset.csv
# This dataset consist of 1000 samples with 26 features each
# https://scikit-learn.org/stable/modules/neighbors.html
import numpy as np
from utils import load_analytic_data, save_sklearn_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.neighbors import KNeighborsClassifier
dataset = load_analytic_data("dataset.csv")
# Encoding the labels
genres = dataset.iloc[:, -1] # Last column
encoder = LabelEncoder()
labels = encoder.fit_transform(genres)
# Scaling the features
scaler = StandardScaler() # MinMaxScaler() can be also used
features = scaler.fit_transform(np.array(dataset.iloc[:, :-1], dtype=float))
# Dividing dataset into training and testing sets
# 80to20 split
x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.2)
# Create knn model
model = KNeighborsClassifier(n_neighbors=9, weights="distance")
# Training
model.fit(x_train, y_train)
# Testing
accuracy = model.score(x_test, y_test)
print(accuracy)
# Save model
save_sklearn_model(model, "knn.sk") | 31.864865 | 90 | 0.793045 | # In this script Kth Nearest Neighbor (Knn) machine learning algorithm used on dataset.csv
# This dataset consist of 1000 samples with 26 features each
# https://scikit-learn.org/stable/modules/neighbors.html
import numpy as np
from utils import load_analytic_data, save_sklearn_model
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.neighbors import KNeighborsClassifier
dataset = load_analytic_data("dataset.csv")
# Encoding the labels
genres = dataset.iloc[:, -1] # Last column
encoder = LabelEncoder()
labels = encoder.fit_transform(genres)
# Scaling the features
scaler = StandardScaler() # MinMaxScaler() can be also used
features = scaler.fit_transform(np.array(dataset.iloc[:, :-1], dtype=float))
# Dividing dataset into training and testing sets
# 80to20 split
x_train, x_test, y_train, y_test = train_test_split(features, labels, test_size=0.2)
# Create knn model
model = KNeighborsClassifier(n_neighbors=9, weights="distance")
# Training
model.fit(x_train, y_train)
# Testing
accuracy = model.score(x_test, y_test)
print(accuracy)
# Save model
save_sklearn_model(model, "knn.sk") | 0 | 0 | 0 |
a74dc77ce350ad68add9d49a8b8ee049fe32d6ad | 11,391 | py | Python | experiment/Heuristic_based/pooing_guided.py | predoodl/predoo | 3a0ba0515373744364a0dd9daf4251867b39650c | [
"MIT"
] | 14 | 2021-03-27T06:19:39.000Z | 2022-03-07T01:29:42.000Z | experiment/Heuristic_based/pooing_guided.py | predoodl/predoo | 3a0ba0515373744364a0dd9daf4251867b39650c | [
"MIT"
] | null | null | null | experiment/Heuristic_based/pooing_guided.py | predoodl/predoo | 3a0ba0515373744364a0dd9daf4251867b39650c | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import csv
import time
from queue import Queue
import math
a=[]
a1 = 0.0001 * np.ones((1, 2, 4, 4), np.float64)
a2 = 0.000001 * np.ones((1, 2, 4, 4), np.float64)
a3 = 0.00000001 * np.ones((1, 2, 4, 4), np.float64)
a.append(a1)
a.append(a2)
a.append(a3)
if __name__=='__main__':
corpus = createCorpus(1000)
# Max_guided(corpus, "E:\Dtype_test\Max_guided2\\tf_cpu_2.0.0\\tf_pooling.csv","E:\Dtype_test\Max_guided2\\tf_cpu_2.0.0\\tf_pooling_count.csv")
# Mean_guided(corpus,"E:\Dtype_test\Mean_guided2\\tf_cpu_2.0.0\\tf_pooling.csv","E:\Dtype_test\Mean_guided2\\tf_cpu_2.0.0\\tf_pooling_count.csv")
Max_guided(corpus,"/home/ise/opTest/data/Max_guided2/tf_gpu_2.0.0/pooling.csv","/home/ise/opTest/data/Max_guided2/tf_gpu_2.0.0/pooling_count.csv")
Mean_guided(corpus,"/home/ise/opTest/data/Mean_guided2/tf_gpu_2.0.0/pooling.csv","/home/ise/opTest/data/Mean_guided2/tf_gpu_2.0.0/pooling_count.csv") | 32.269122 | 153 | 0.595558 | import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import csv
import time
from queue import Queue
import math
a=[]
a1 = 0.0001 * np.ones((1, 2, 4, 4), np.float64)
a2 = 0.000001 * np.ones((1, 2, 4, 4), np.float64)
a3 = 0.00000001 * np.ones((1, 2, 4, 4), np.float64)
a.append(a1)
a.append(a2)
a.append(a3)
def input_withDiffDype(x,dtype):
return tf.convert_to_tensor(x.transpose((0, 2, 3, 1)), dtype=dtype)
def tf_poolingWithDiffDype(dtype):
return layers.MaxPooling2D(
2, 1, padding='valid',dtype=dtype
)
def createCorpus(n):
q=Queue()
for i in range(n):
x = np.random.randn(1, 2, 4, 4)
q.put(x)
return q
def Max_guided(corpus,f,g):
out=open(file=f,mode='a',newline='')
csv_writer=csv.writer(out)
out1 = open(file=g, mode="a", newline='')
csv_writer1 = csv.writer(out1)
csv_writer.writerow(["No.", "16_32(16)", "16_64(16)", "32_16(32)", "32_64(32)", "64_16(64)", "64_32(64)",
"time1", "32_16(16)", "64_16(16)", "16_32(32)", "64_32(32)", "16_64(64)", "32_64(64)", "time2",
"isNaN"])
csv_writer1.writerow(
["No.", "当前最大误差(同输入)", "全局最大误差(同输入)", "引起最大误差的输入编号1", "当前最大误差(同算子)", "全局最大误差(同算子)", "引起最大误差的输入编号2"])
h_error1 = 0
h_error2 = 0
maxine1 = 0
maxine2 = 0
j = 0
index1 = 0
index2 = 0
while not corpus.empty() and j<20000:
x=corpus.get()
maxse,maxe1,maxe2=getMaxdiff(x, csv_writer, j)
if maxe1 > maxine1:
index1 = j
maxine1 = maxe1 # 最大误差
if maxe2 > maxine2:
index2 = j
maxine2 = maxe2 # 最大误差
if maxse > 0.0005:
corpus.put(x + a1)
corpus.put(x + a2)
corpus.put(x + a3)
if j%999==0:
r = []
h_error1 = max(h_error1, maxine1)
h_error2 = max(h_error2, maxine2)
r.append(j // 999)
r.append(maxine1)
r.append(h_error1)
r.append(index1)
r.append(maxine2)
r.append(h_error2)
r.append(index2)
csv_writer1.writerow(r)
maxine1 = 0
maxine2 = 0
index1 = 0
index2 = 0
j+=1
print(j)
out.close()
out1.close()
def getMaxdiff(x,csv_writer,j):
res = []
maxe=[]
res.append(j)
# weights = torch.empty(3, 3, 3, 8)
# torch.nn.init.constant_(weights, 5e-2)
# Tensorflow padding behavior. Assuming that kH == kW to keep this simple.
x_32 = input_withDiffDype(x, tf.float32)
x_16 = input_withDiffDype(x, tf.float16)
x_64 = input_withDiffDype(x, tf.float64)
s=time.time()
tf_pooling_16 = tf_poolingWithDiffDype('float16')
tf_pooling_32 = tf_poolingWithDiffDype('float32')
tf_pooling_64 = tf_poolingWithDiffDype('float64')
out_16_16_1 = tf_pooling_16(x_16).numpy().astype(np.float32)
out_16_16_2 = tf_pooling_16(x_16).numpy().astype(np.float64)
out_16_32 = tf_pooling_32(x_16)
out_16_64 = tf_pooling_64(x_16)
diff1 = np.mean(np.abs(out_16_32 - out_16_16_1)) # 低精度到高精度
diff2 = np.mean(np.abs(out_16_64 - out_16_16_2)) # 低精度到高精度
dif1 = np.max(np.abs(out_16_32 - out_16_16_1)) # 低精度到高精度
dif2 = np.max(np.abs(out_16_64 - out_16_16_2) ) # 低精度到高精度
out_32_32_1 = tf_pooling_32(x_32)
out_32_32_2 = tf_pooling_32(x_32).numpy().astype(np.float64)
out_32_16 = tf_pooling_16(x_32).numpy().astype(np.float32)
out_32_64 = tf_pooling_64(x_32)
diff3 = np.mean(np.abs(out_32_16 - out_32_32_1)) # 高精度到低精度
diff4 = np.mean(np.abs(out_32_64 - out_32_32_2)) # 低精度到高精度
dif3 = np.max(np.abs(out_32_16 - out_32_32_1)) # 高精度到低精度
dif4 = np.max(np.abs(out_32_64 - out_32_32_2)) # 低精度到高精度
out_64_16 = tf_pooling_16(x_64).numpy().astype(np.float64)
out_64_32 = tf_pooling_32(x_64).numpy().astype(np.float64)
out_64_64 = tf_pooling_64(x_64)
diff5 = np.mean(np.abs(out_64_16 - out_64_64)) # 高精度到低精度
diff6 = np.mean(np.abs(out_64_32 - out_64_64)) # 低精度到高精度
dif5 = np.max(np.abs(out_64_16 - out_64_64)) # 高精度到低精度
dif6 = np.max(np.abs(out_64_32 - out_64_64)) # 低精度到高精度
e=time.time()
res.append(diff1)
res.append(diff2)
res.append(diff3)
res.append(diff4)
res.append(diff5)
res.append(diff6)
res.append(e-s)
s = time.time()
out_16_16 = tf_pooling_16(x_16)
out_32_16_1 = tf_pooling_16(x_32)
out_64_16_1 = tf_pooling_16(x_64)
diff7 = np.mean(np.abs(out_32_16_1 - out_16_16))
diff8 = np.mean(np.abs(out_64_16_1 - out_16_16))
dif7 = np.max(np.abs(out_32_16_1 - out_16_16))
dif8 = np.max(np.abs(out_64_16_1 - out_16_16))
out_64_32_1 = tf_pooling_32(x_64)
diff9 = np.mean(np.abs(out_16_32 - out_32_32_1))
diff10 = np.mean(np.abs(out_64_32_1 - out_32_32_1))
dif9 = np.max(np.abs(out_16_32 - out_32_32_1))
dif10 = np.max(np.abs(out_64_32_1 - out_32_32_1))
diff11 = np.mean(np.abs(out_16_64 - out_64_64))
diff12 = np.mean(np.abs(out_32_64 - out_64_64))
dif11 = np.max(np.abs(out_16_64 - out_64_64))
dif12 = np.max(np.abs(out_32_64 - out_64_64))
e = time.time()
res.append(diff7)
res.append(diff8)
res.append(diff9)
res.append(diff10)
res.append(diff11)
res.append(diff12)
res.append(e - s)
for n in out_32_32_1.numpy().ravel():
if math.isnan(n):
res.append("NAN")
break
maxe.append(dif1)
maxe.append(dif2)
maxe.append(dif3)
maxe.append(dif4)
maxe.append(dif5)
maxe.append(dif6)
maxe.append(dif7)
maxe.append(dif8)
maxe.append(dif9)
maxe.append(dif10)
maxe.append(dif11)
maxe.append(dif12)
csv_writer.writerow(res)
return max(maxe[:]), max(res[1:7]), max(res[8:14])
def Mean_guided(corpus,f,g):
out=open(file=f,mode='a',newline='')
csv_writer=csv.writer(out)
out1 = open(file=g, mode="a", newline='')
csv_writer1 = csv.writer(out1)
csv_writer.writerow(["No.", "16_32(16)", "16_64(16)", "32_16(32)", "32_64(32)", "64_16(64)", "64_32(64)",
"time1", "32_16(16)", "64_16(16)", "16_32(32)", "64_32(32)", "16_64(64)", "32_64(64)", "time2",
"isNaN"])
csv_writer1.writerow(
["No.", "当前最大误差(同输入)", "全局最大误差(同输入)", "引起最大误差的输入编号1", "当前最大误差(同算子)", "全局最大误差(同算子)", "引起最大误差的输入编号2"])
h_error1 = 0
h_error2 = 0
maxine1 = 0
maxine2 = 0
j = 0
index1 = 0
index2 = 0
while not corpus.empty() and j<20000:
x=corpus.get()
maxe1,maxe2=getMeandiff(x, csv_writer, j)
if max(maxe1, maxe2) > 1e-4:
corpus.put(x + a1)
corpus.put(x + a2)
corpus.put(x + a3)
if maxe1 > maxine1:
index1 = j
maxine1 = maxe1 # 最大误差
if maxe2 > maxine2:
index2 = j
maxine2 = maxe2 # 最大误差
if j % 999 == 0:
r = []
h_error1 = max(h_error1, maxine1)
h_error2 = max(h_error2, maxine2)
r.append(j // 999)
r.append(maxine1)
r.append(h_error1)
r.append(index1)
r.append(maxine2)
r.append(h_error2)
r.append(index2)
csv_writer1.writerow(r)
maxine1 = 0
maxine2 = 0
index1 = 0
index2 = 0
j+=1
print(j)
out.close()
out1.close()
def getMeandiff(x,csv_writer,j):
res = []
res.append(j)
# weights = torch.empty(3, 3, 3, 8)
# torch.nn.init.constant_(weights, 5e-2)
# Tensorflow padding behavior. Assuming that kH == kW to keep this simple.
x_32 = input_withDiffDype(x, tf.float32)
x_16 = input_withDiffDype(x, tf.float16)
x_64 = input_withDiffDype(x, tf.float64)
s = time.time()
tf_pooling_16 = tf_poolingWithDiffDype('float16')
tf_pooling_32 = tf_poolingWithDiffDype('float32')
tf_pooling_64 = tf_poolingWithDiffDype('float64')
out_16_16_1 = tf_pooling_16(x_16).numpy().astype(np.float32)
out_16_16_2 = tf_pooling_16(x_16).numpy().astype(np.float64)
out_16_32 = tf_pooling_32(x_16)
out_16_64 = tf_pooling_64(x_16)
diff1 = np.mean(np.abs(out_16_32 - out_16_16_1)) # 低精度到高精度
diff2 = np.mean(np.abs(out_16_64 - out_16_16_2)) # 低精度到高精度
out_32_32_1 = tf_pooling_32(x_32)
out_32_32_2 = tf_pooling_32(x_32).numpy().astype(np.float64)
out_32_16 = tf_pooling_16(x_32).numpy().astype(np.float32)
out_32_64 = tf_pooling_64(x_32)
diff3 = np.mean(np.abs(out_32_16 - out_32_32_1)) # 高精度到低精度
diff4 = np.mean(np.abs(out_32_64 - out_32_32_2)) # 低精度到高精度
out_64_16 = tf_pooling_16(x_64).numpy().astype(np.float64)
out_64_32 = tf_pooling_32(x_64).numpy().astype(np.float64)
out_64_64 = tf_pooling_64(x_64)
diff5 = np.mean(np.abs(out_64_16 - out_64_64)) # 高精度到低精度
diff6 = np.mean(np.abs(out_64_32 - out_64_64)) # 低精度到高精度
e = time.time()
res.append(diff1)
res.append(diff2)
res.append(diff3)
res.append(diff4)
res.append(diff5)
res.append(diff6)
res.append(e-s)
s = time.time()
out_16_16 = tf_pooling_16(x_16)
out_32_16_1 = tf_pooling_16(x_32)
out_64_16_1 = tf_pooling_16(x_64)
diff7 = np.mean(np.abs(out_32_16_1 - out_16_16))
diff8 = np.mean(np.abs(out_64_16_1 - out_16_16))
dif7 = np.max(np.abs(out_32_16_1 - out_16_16))
dif8 = np.max(np.abs(out_64_16_1 - out_16_16))
out_64_32_1 = tf_pooling_32(x_64)
diff9 = np.mean(np.abs(out_16_32 - out_32_32_1))
diff10 = np.mean(np.abs(out_64_32_1 - out_32_32_1))
dif9 = np.max(np.abs(out_16_32 - out_32_32_1))
dif10 = np.max(np.abs(out_64_32_1 - out_32_32_1))
diff11 = np.mean(np.abs(out_16_64 - out_64_64))
diff12 = np.mean(np.abs(out_32_64 - out_64_64))
dif11 = np.max(np.abs(out_16_64 - out_64_64))
dif12 = np.max(np.abs(out_32_64 - out_64_64))
e = time.time()
res.append(diff7)
res.append(diff8)
res.append(diff9)
res.append(diff10)
res.append(diff11)
res.append(diff12)
res.append(e - s)
for n in out_32_32_1.numpy().ravel():
if math.isnan(n):
res.append("NAN")
break
csv_writer.writerow(res)
return max(res[1:7]), max(res[8:14])
if __name__=='__main__':
corpus = createCorpus(1000)
# Max_guided(corpus, "E:\Dtype_test\Max_guided2\\tf_cpu_2.0.0\\tf_pooling.csv","E:\Dtype_test\Max_guided2\\tf_cpu_2.0.0\\tf_pooling_count.csv")
# Mean_guided(corpus,"E:\Dtype_test\Mean_guided2\\tf_cpu_2.0.0\\tf_pooling.csv","E:\Dtype_test\Mean_guided2\\tf_cpu_2.0.0\\tf_pooling_count.csv")
Max_guided(corpus,"/home/ise/opTest/data/Max_guided2/tf_gpu_2.0.0/pooling.csv","/home/ise/opTest/data/Max_guided2/tf_gpu_2.0.0/pooling_count.csv")
Mean_guided(corpus,"/home/ise/opTest/data/Mean_guided2/tf_gpu_2.0.0/pooling.csv","/home/ise/opTest/data/Mean_guided2/tf_gpu_2.0.0/pooling_count.csv") | 10,684 | 0 | 179 |
7fc1b69712b48076e1340be47e7045a99ba6ac20 | 1,813 | py | Python | defs_regression/xgb.py | BrutishGuy/hyperband-astcvs | d52562d64d2b0ba63f4bba7022114da7bb9e0281 | [
"BSD-2-Clause"
] | null | null | null | defs_regression/xgb.py | BrutishGuy/hyperband-astcvs | d52562d64d2b0ba63f4bba7022114da7bb9e0281 | [
"BSD-2-Clause"
] | null | null | null | defs_regression/xgb.py | BrutishGuy/hyperband-astcvs | d52562d64d2b0ba63f4bba7022114da7bb9e0281 | [
"BSD-2-Clause"
] | null | null | null | "function (and parameter space) definitions for hyperband"
"binary classification with XGBoost"
from common_defs import *
# a dict with x_train, y_train, x_test, y_test
from load_data_for_regression import data
from xgboost import XGBRegressor as XGB
#
trees_per_iteration = 5
space = {
'learning_rate': hp.choice( 'lr', [
'default',
hp.uniform( 'lr_', 0.01, 0.2 )
]),
'max_depth': hp.choice( 'md', [
'default',
hp.quniform( 'md_', 2, 10, 1 )
]),
'min_child_weight': hp.choice( 'mcw', [
'default',
hp.quniform( 'mcw_', 1, 10, 1 )
]),
'subsample': hp.choice( 'ss', [
'default',
hp.uniform( 'ss_', 0.5, 1.0 )
]),
'colsample_bytree': hp.choice( 'cbt', [
'default',
hp.uniform( 'cbt_', 0.5, 1.0 )
]),
'colsample_bylevel': hp.choice( 'cbl', [
'default',
hp.uniform( 'cbl_', 0.5, 1.0 )
]),
'gamma': hp.choice( 'g', [
'default',
hp.uniform( 'g_', 0, 1 )
]),
'reg_alpha': hp.choice( 'ra', [
'default',
hp.loguniform( 'ra_', log( 1e-10 ), log( 1 ))
]),
'reg_lambda': hp.choice( 'rl', [
'default',
hp.uniform( 'rl_', 0.1, 10 )
]),
'base_score': hp.choice( 'bs', [
'default',
hp.uniform( 'bs_', 0.1, 0.9 )
]),
'scale_pos_weight': hp.choice( 'spw', [
'default',
hp.uniform( 'spw', 0.1, 10 )
])
}
#
| 22.382716 | 68 | 0.594043 | "function (and parameter space) definitions for hyperband"
"binary classification with XGBoost"
from common_defs import *
# a dict with x_train, y_train, x_test, y_test
from load_data_for_regression import data
from xgboost import XGBRegressor as XGB
#
trees_per_iteration = 5
space = {
'learning_rate': hp.choice( 'lr', [
'default',
hp.uniform( 'lr_', 0.01, 0.2 )
]),
'max_depth': hp.choice( 'md', [
'default',
hp.quniform( 'md_', 2, 10, 1 )
]),
'min_child_weight': hp.choice( 'mcw', [
'default',
hp.quniform( 'mcw_', 1, 10, 1 )
]),
'subsample': hp.choice( 'ss', [
'default',
hp.uniform( 'ss_', 0.5, 1.0 )
]),
'colsample_bytree': hp.choice( 'cbt', [
'default',
hp.uniform( 'cbt_', 0.5, 1.0 )
]),
'colsample_bylevel': hp.choice( 'cbl', [
'default',
hp.uniform( 'cbl_', 0.5, 1.0 )
]),
'gamma': hp.choice( 'g', [
'default',
hp.uniform( 'g_', 0, 1 )
]),
'reg_alpha': hp.choice( 'ra', [
'default',
hp.loguniform( 'ra_', log( 1e-10 ), log( 1 ))
]),
'reg_lambda': hp.choice( 'rl', [
'default',
hp.uniform( 'rl_', 0.1, 10 )
]),
'base_score': hp.choice( 'bs', [
'default',
hp.uniform( 'bs_', 0.1, 0.9 )
]),
'scale_pos_weight': hp.choice( 'spw', [
'default',
hp.uniform( 'spw', 0.1, 10 )
])
}
def get_params():
params = sample( space )
params = { k: v for k, v in params.items() if v is not 'default' }
return handle_integers( params )
#
def try_params( n_iterations, params, get_predictions = False ):
n_estimators = int( round( n_iterations * trees_per_iteration ))
rint("n_estimators: "+ str(n_estimators))
pprint( params )
model = XGB( n_estimators = n_estimators, nthread = -1, **params )
return train_and_eval_sklearn_regressor( model, data )
| 435 | 0 | 50 |
6f6c6d03e03b36532023680f50fe9b1064b11e82 | 672 | py | Python | src/trains/urls.py | LikimiaD/LikimiaD_project | 4967aeb5f513002abd57ae54849020f089fb0bec | [
"MIT"
] | null | null | null | src/trains/urls.py | LikimiaD/LikimiaD_project | 4967aeb5f513002abd57ae54849020f089fb0bec | [
"MIT"
] | null | null | null | src/trains/urls.py | LikimiaD/LikimiaD_project | 4967aeb5f513002abd57ae54849020f089fb0bec | [
"MIT"
] | null | null | null | from django.urls import path
from trains.views import *
urlpatterns = [
#path('', home, name = 'home'),
path('', TrainListView.as_view(), name = 'home'),
# The name of the function that allows you to generate the address dynamically
path('detail/<int:pk>/', TrainDetailView.as_view(), name = 'detail'),
path('detail/<int:pk>/', TrainDetailView.as_view(), name = 'detail'),
path('update/<int:pk>/', TrainUpdateView.as_view(), name = 'update'),
path('delete/<int:pk>/', TrainDeleteView.as_view(), name = 'delete'),
# Can get an integer representation as "pk" and pass it
path('add/', TrainCreateView.as_view(), name = 'create'),
] | 39.529412 | 82 | 0.650298 | from django.urls import path
from trains.views import *
urlpatterns = [
#path('', home, name = 'home'),
path('', TrainListView.as_view(), name = 'home'),
# The name of the function that allows you to generate the address dynamically
path('detail/<int:pk>/', TrainDetailView.as_view(), name = 'detail'),
path('detail/<int:pk>/', TrainDetailView.as_view(), name = 'detail'),
path('update/<int:pk>/', TrainUpdateView.as_view(), name = 'update'),
path('delete/<int:pk>/', TrainDeleteView.as_view(), name = 'delete'),
# Can get an integer representation as "pk" and pass it
path('add/', TrainCreateView.as_view(), name = 'create'),
] | 0 | 0 | 0 |
1a91ddab7ed60e55d202c1cd84ef4067715b3371 | 2,300 | py | Python | pycrust/tools/mako.py | alertedsnake/pycrust | ceb5da9ff92b892adc9a3057afdfe84f3e529313 | [
"MIT"
] | 1 | 2019-03-25T05:33:30.000Z | 2019-03-25T05:33:30.000Z | pycrust/tools/mako.py | iwannaPython/pycrust | ceb5da9ff92b892adc9a3057afdfe84f3e529313 | [
"MIT"
] | null | null | null | pycrust/tools/mako.py | iwannaPython/pycrust | ceb5da9ff92b892adc9a3057afdfe84f3e529313 | [
"MIT"
] | 1 | 2019-03-25T05:33:11.000Z | 2019-03-25T05:33:11.000Z | """
Mako Templates
--------------
Mako templating code was based on the code and discussion at
http://tools.cherrypy.org/wiki/Mako
To use the Mako renderer:
cherrypy.tools.mako = cherrypy.Tool('on_start_resource',
MakoLoader(directories=['/path/to/templates']))
Then in your handler:
@cherrypy.tools.mako(filename='index.html')
def index(self):
return {}
"""
from mako.lookup import TemplateLookup
import cherrypy
try:
import simplejson as json
except ImportError:
import json
from pycrust import url
class MakoHandler(cherrypy.dispatch.LateParamPageHandler):
"""Callable which sets response.body."""
class MakoLoader(object):
"""Template loader for Mako"""
| 28.75 | 94 | 0.593043 | """
Mako Templates
--------------
Mako templating code was based on the code and discussion at
http://tools.cherrypy.org/wiki/Mako
To use the Mako renderer:
cherrypy.tools.mako = cherrypy.Tool('on_start_resource',
MakoLoader(directories=['/path/to/templates']))
Then in your handler:
@cherrypy.tools.mako(filename='index.html')
def index(self):
return {}
"""
from mako.lookup import TemplateLookup
import cherrypy
try:
import simplejson as json
except ImportError:
import json
from pycrust import url
class MakoHandler(cherrypy.dispatch.LateParamPageHandler):
"""Callable which sets response.body."""
def __init__(self, template, next_handler):
self.template = template
self.next_handler = next_handler
def __call__(self):
env = globals().copy()
env.update(self.next_handler())
## Add any default session globals
env.update({
'session': cherrypy.session,
'url': url,
})
return self.template.render_unicode(**env)
class MakoLoader(object):
"""Template loader for Mako"""
def __init__(self, directories=[]):
self.lookups = {}
self.directories = directories
def __call__(self, filename, directories=None, module_directory=None, collection_size=-1):
if not directories:
directories = self.directories
# Find the appropriate template lookup.
key = (tuple(directories), module_directory)
try:
lookup = self.lookups[key]
except KeyError:
lookup = TemplateLookup(directories=directories,
module_directory=module_directory,
collection_size=collection_size,
input_encoding='utf-8',
output_encoding='utf-8',
encoding_errors='replace'
)
self.lookups[key] = lookup
cherrypy.request.lookup = lookup
# Replace the current handler.
cherrypy.request.template = t = lookup.get_template(filename)
cherrypy.request.handler = MakoHandler(t, cherrypy.request.handler)
| 1,451 | 0 | 108 |
a50f82ff1fb6df2ed0a4489725b3568ae23502d0 | 8,950 | py | Python | offline/assistant.py | iseaboy/ok_google | f87bf62b012f8d186da7e4575064ae8e986216e9 | [
"Apache-2.0"
] | 8 | 2017-05-29T03:11:59.000Z | 2018-08-28T02:35:37.000Z | offline/assistant.py | iseaboy/ok_google | f87bf62b012f8d186da7e4575064ae8e986216e9 | [
"Apache-2.0"
] | 2 | 2017-05-12T23:43:56.000Z | 2018-05-31T13:19:37.000Z | offline/assistant.py | iseaboy/ok_google | f87bf62b012f8d186da7e4575064ae8e986216e9 | [
"Apache-2.0"
] | 3 | 2017-09-20T01:53:13.000Z | 2018-04-24T04:54:03.000Z | # Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ctypes import (CFUNCTYPE,
cdll,
c_bool, c_char_p, c_int, c_uint, c_void_p)
#from auth_helpers import CredentialsRefresher
from event import Event, IterableEventQueue
LISTENER = CFUNCTYPE(None, c_int, c_char_p)
class UnsupportedPlatformError(Exception):
"""Raised if the OS is unsupported by the Assistant."""
pass
class Assistant(object):
"""Client for the Google Assistant Library.
Provides basic control functionality and lifecycle handling for the Google
Assistant. It is best practice to use the Assistant as a ContextManager:
with Assistant(credentials) as assistant:
This allows the underlying native implementation to properly handle memory
management. Once started, the Assistant generates a stream of Events
relaying the various states the Assistant is currently in, for example:
ON_CONVERSATION_TURN_STARTED
ON_END_OF_UTTERANCE
ON_RECOGNIZING_SPEECH_FINISHED:
{'text': 'what time is it'}
ON_RESPONDING_STARTED:
{'is_error_response': False}
ON_RESPONDING_FINISHED
ON_CONVERSATION_TURN_FINISHED:
{'with_follow_on_turn': False}
See google.assistant.event.EventType for details on all events and their
arguments.
Glossary:
Hotword: The phrase the Assistant listens for when not muted:
"OK Google" OR "Hey Google"
Turn: A single user request followed by a response from the Assistant.
Conversation: One or more turns which result in a desired final result
from the Assistant:
"What time is it?" -> "The time is 6:24 PM" OR
"Set a timer" -> "Okay, for how long?" ->
"5 minutes" -> "Sure, 5 minutes, starting now!"
"""
def __init__(self, credentials):
"""Initializes a new Assistant with OAuth2 credentials.
If the user has not yet logged into the Assistant, then a new
authentication flow will be started asking the user to login. Once
initialized, the Assistant will be ready to start (see self.start()).
Args:
credentials(google.oauth2.credentials.Credentials): The user's
Google OAuth2 credentials.
Raises:
UnsupportedPlatformError: If the current processor/operating system
is not supported by the Google Assistant.
"""
self._event_queue = IterableEventQueue()
self._load_lib()
self._credentials_refresher = None
self._event_callback = LISTENER(self)
self._inst = c_void_p(
self._lib.assistant_new(self._event_callback))
# self._credentials_refresher = CredentialsRefresher(
# credentials, self._set_credentials)
# self._credentials_refresher.start()
def __enter__(self):
"""Returns self."""
return self
def __exit__(self, exception_type, exception_value, traceback):
"""Frees allocated memory belonging to the Assistant."""
if self._credentials_refresher:
self._credentials_refresher.stop()
self._credentials_refresher = None
self._lib.assistant_free(self._inst)
def __call__(self, event_type, event_data):
"""Adds a new event to the event queue returned from start().
Args:
event_type(int): A numeric id corresponding to an event in
google.assistant.event.EventType.
event_data(str): A serialized JSON string with key/value pairs
for event arguments.
"""
self._event_queue.offer(Event(event_type, event_data))
def start(self):
"""Starts the Assistant, which includes listening for a hotword.
Once start() is called, the Assistant will begin processing data from
the 'default' ALSA audio source, listening for the hotword. This will
also start other services provided by the Assistant, such as
timers/alarms. This method can only be called once. Once called, the
Assistant will continue to run until __exit__ is called.
Returns:
google.assistant.event.IterableEventQueue: A queue of events
that notify of changes to the Assistant state.
"""
self._lib.assistant_start(self._inst)
return self._event_queue
def set_mic_mute(self, is_muted):
"""Stops the Assistant from listening for the hotword.
Allows for disabling the Assistant from listening for the hotword.
This provides functionality similar to the privacy button on the back
of Google Home.
This method is a no-op if the Assistant has not yet been started.
Args:
is_muted(bool): True stops the Assistant from listening and False
allows it to start again.
"""
self._lib.assistant_set_mic_mute(self._inst, is_muted)
def start_conversation(self):
"""Manually starts a new conversation with the Assistant.
Starts both recording the user's speech and sending it to Google,
similar to what happens when the Assistant hears the hotword.
This method is a no-op if the Assistant is not started or has been
muted.
"""
self._lib.assistant_start_conversation(self._inst)
def stop_conversation(self):
"""Stops any active conversation with the Assistant.
The Assistant could be listening to the user's query OR responding. If
there is no active conversation, this is a no-op.
"""
self._lib.assistant_stop_conversation(self._inst)
def _set_credentials(self, credentials):
"""Sets Google account OAuth2 credentials for the current user.
Args:
credentials(google.oauth2.credentials.Credentials): OAuth2
Google account credentials for the current user.
"""
# The access_token should always be made up of only ASCII
# characters so this encoding should never fail.
access_token = credentials.token.encode('ascii')
self._lib.assistant_set_access_token(self._inst,
access_token, len(access_token))
def _load_lib(self):
"""Dynamically loads the Google Assistant Library.
Automatically selects the correct shared library for the current
platform and sets up bindings to its C interface.
Raises:
UnsupportedPlatformError: If the current processor or OS
is not supported by the Google Assistant.
"""
os_name = os.uname()[0]
platform = os.uname()[4]
lib_name = 'libassistant_embedder_' + platform + '.so'
lib_path = os.path.join(os.path.dirname(__file__), lib_name)
if os_name != 'Linux' or not os.path.isfile(lib_path):
raise UnsupportedPlatformError(platform + ' is not supported.')
self._lib = cdll.LoadLibrary(lib_path)
# void* assistant_new(EventCallback listener);
self._lib.assistant_new.arg_types = [LISTENER]
self._lib.assistant_new.restype = c_void_p
# void assistant_free(void* instance);
self._lib.assistant_free.argtypes = [c_void_p]
self._lib.assistant_free.restype = None
# void assistant_start(void* assistant);
self._lib.assistant_start.arg_types = [c_void_p]
self._lib.assistant_start.res_type = None
# void assistant_set_access_token(
# void* assistant, const char* access_token, size_t length);
self._lib.assistant_set_access_token.arg_types = [
c_void_p, c_char_p, c_uint
]
self._lib.assistant_set_access_token.res_type = None
# void assistant_set_mic_mute(void* assistant, bool is_muted);
self._lib.assistant_set_mic_mute.arg_types = [c_void_p, c_bool]
self._lib.assistant_set_mic_mute.res_type = None
# void assistant_start_conversation(void* assistant);
self._lib.assistant_start_conversation.arg_types = [c_void_p]
self._lib.assistant_start_conversation.res_type = None
# void assistant_stop_conversation(void* assistant);
self._lib.assistant_stop_conversation.arg_types = [c_void_p]
self._lib.assistant_stop_conversation.res_type = None
| 38.247863 | 79 | 0.67095 | # Copyright (C) 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from ctypes import (CFUNCTYPE,
cdll,
c_bool, c_char_p, c_int, c_uint, c_void_p)
#from auth_helpers import CredentialsRefresher
from event import Event, IterableEventQueue
LISTENER = CFUNCTYPE(None, c_int, c_char_p)
class UnsupportedPlatformError(Exception):
"""Raised if the OS is unsupported by the Assistant."""
pass
class Assistant(object):
"""Client for the Google Assistant Library.
Provides basic control functionality and lifecycle handling for the Google
Assistant. It is best practice to use the Assistant as a ContextManager:
with Assistant(credentials) as assistant:
This allows the underlying native implementation to properly handle memory
management. Once started, the Assistant generates a stream of Events
relaying the various states the Assistant is currently in, for example:
ON_CONVERSATION_TURN_STARTED
ON_END_OF_UTTERANCE
ON_RECOGNIZING_SPEECH_FINISHED:
{'text': 'what time is it'}
ON_RESPONDING_STARTED:
{'is_error_response': False}
ON_RESPONDING_FINISHED
ON_CONVERSATION_TURN_FINISHED:
{'with_follow_on_turn': False}
See google.assistant.event.EventType for details on all events and their
arguments.
Glossary:
Hotword: The phrase the Assistant listens for when not muted:
"OK Google" OR "Hey Google"
Turn: A single user request followed by a response from the Assistant.
Conversation: One or more turns which result in a desired final result
from the Assistant:
"What time is it?" -> "The time is 6:24 PM" OR
"Set a timer" -> "Okay, for how long?" ->
"5 minutes" -> "Sure, 5 minutes, starting now!"
"""
def __init__(self, credentials):
"""Initializes a new Assistant with OAuth2 credentials.
If the user has not yet logged into the Assistant, then a new
authentication flow will be started asking the user to login. Once
initialized, the Assistant will be ready to start (see self.start()).
Args:
credentials(google.oauth2.credentials.Credentials): The user's
Google OAuth2 credentials.
Raises:
UnsupportedPlatformError: If the current processor/operating system
is not supported by the Google Assistant.
"""
self._event_queue = IterableEventQueue()
self._load_lib()
self._credentials_refresher = None
self._event_callback = LISTENER(self)
self._inst = c_void_p(
self._lib.assistant_new(self._event_callback))
# self._credentials_refresher = CredentialsRefresher(
# credentials, self._set_credentials)
# self._credentials_refresher.start()
def __enter__(self):
"""Returns self."""
return self
def __exit__(self, exception_type, exception_value, traceback):
"""Frees allocated memory belonging to the Assistant."""
if self._credentials_refresher:
self._credentials_refresher.stop()
self._credentials_refresher = None
self._lib.assistant_free(self._inst)
def __call__(self, event_type, event_data):
"""Adds a new event to the event queue returned from start().
Args:
event_type(int): A numeric id corresponding to an event in
google.assistant.event.EventType.
event_data(str): A serialized JSON string with key/value pairs
for event arguments.
"""
self._event_queue.offer(Event(event_type, event_data))
def start(self):
"""Starts the Assistant, which includes listening for a hotword.
Once start() is called, the Assistant will begin processing data from
the 'default' ALSA audio source, listening for the hotword. This will
also start other services provided by the Assistant, such as
timers/alarms. This method can only be called once. Once called, the
Assistant will continue to run until __exit__ is called.
Returns:
google.assistant.event.IterableEventQueue: A queue of events
that notify of changes to the Assistant state.
"""
self._lib.assistant_start(self._inst)
return self._event_queue
def set_mic_mute(self, is_muted):
"""Stops the Assistant from listening for the hotword.
Allows for disabling the Assistant from listening for the hotword.
This provides functionality similar to the privacy button on the back
of Google Home.
This method is a no-op if the Assistant has not yet been started.
Args:
is_muted(bool): True stops the Assistant from listening and False
allows it to start again.
"""
self._lib.assistant_set_mic_mute(self._inst, is_muted)
def start_conversation(self):
"""Manually starts a new conversation with the Assistant.
Starts both recording the user's speech and sending it to Google,
similar to what happens when the Assistant hears the hotword.
This method is a no-op if the Assistant is not started or has been
muted.
"""
self._lib.assistant_start_conversation(self._inst)
def stop_conversation(self):
"""Stops any active conversation with the Assistant.
The Assistant could be listening to the user's query OR responding. If
there is no active conversation, this is a no-op.
"""
self._lib.assistant_stop_conversation(self._inst)
def _set_credentials(self, credentials):
"""Sets Google account OAuth2 credentials for the current user.
Args:
credentials(google.oauth2.credentials.Credentials): OAuth2
Google account credentials for the current user.
"""
# The access_token should always be made up of only ASCII
# characters so this encoding should never fail.
access_token = credentials.token.encode('ascii')
self._lib.assistant_set_access_token(self._inst,
access_token, len(access_token))
def _load_lib(self):
"""Dynamically loads the Google Assistant Library.
Automatically selects the correct shared library for the current
platform and sets up bindings to its C interface.
Raises:
UnsupportedPlatformError: If the current processor or OS
is not supported by the Google Assistant.
"""
os_name = os.uname()[0]
platform = os.uname()[4]
lib_name = 'libassistant_embedder_' + platform + '.so'
lib_path = os.path.join(os.path.dirname(__file__), lib_name)
if os_name != 'Linux' or not os.path.isfile(lib_path):
raise UnsupportedPlatformError(platform + ' is not supported.')
self._lib = cdll.LoadLibrary(lib_path)
# void* assistant_new(EventCallback listener);
self._lib.assistant_new.arg_types = [LISTENER]
self._lib.assistant_new.restype = c_void_p
# void assistant_free(void* instance);
self._lib.assistant_free.argtypes = [c_void_p]
self._lib.assistant_free.restype = None
# void assistant_start(void* assistant);
self._lib.assistant_start.arg_types = [c_void_p]
self._lib.assistant_start.res_type = None
# void assistant_set_access_token(
# void* assistant, const char* access_token, size_t length);
self._lib.assistant_set_access_token.arg_types = [
c_void_p, c_char_p, c_uint
]
self._lib.assistant_set_access_token.res_type = None
# void assistant_set_mic_mute(void* assistant, bool is_muted);
self._lib.assistant_set_mic_mute.arg_types = [c_void_p, c_bool]
self._lib.assistant_set_mic_mute.res_type = None
# void assistant_start_conversation(void* assistant);
self._lib.assistant_start_conversation.arg_types = [c_void_p]
self._lib.assistant_start_conversation.res_type = None
# void assistant_stop_conversation(void* assistant);
self._lib.assistant_stop_conversation.arg_types = [c_void_p]
self._lib.assistant_stop_conversation.res_type = None
| 0 | 0 | 0 |
0aed6119e19574f31d9d53692882645b330f888a | 9,360 | py | Python | pyautonifty/renderer.py | Markichu/PythonAutoNifty | ab646601058297b6bfe14332f17b836ee3dfbe69 | [
"MIT"
] | null | null | null | pyautonifty/renderer.py | Markichu/PythonAutoNifty | ab646601058297b6bfe14332f17b836ee3dfbe69 | [
"MIT"
] | 6 | 2021-11-24T00:48:57.000Z | 2022-03-17T07:51:36.000Z | pyautonifty/renderer.py | Markichu/PythonAutoNifty | ab646601058297b6bfe14332f17b836ee3dfbe69 | [
"MIT"
] | null | null | null | import datetime
import os
import time
import numpy as np
from PIL import Image
# Hide the Pygame support message
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = str()
import pygame
from .constants import BLACK, WHITE, DRAWING_SIZE, TITLE_BAR_HEIGHT, BORDER_WIDTH
from .helper_fns import get_bezier_curve, alpha_blend
# Render the lines to preview in Pygame | 45.658537 | 123 | 0.588248 | import datetime
import os
import time
import numpy as np
from PIL import Image
# Hide the Pygame support message
os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = str()
import pygame
from .constants import BLACK, WHITE, DRAWING_SIZE, TITLE_BAR_HEIGHT, BORDER_WIDTH
from .helper_fns import get_bezier_curve, alpha_blend
class Renderer:
def __init__(self, headless=False, pygame_scale=None):
self.pygame_scale = pygame_scale
self.headless = headless
# Set a fake video driver to hide output
if headless:
os.environ['SDL_VIDEODRIVER'] = 'dummy'
# No screen to get the dimensions, just render at normal size
if pygame_scale is None:
self.pygame_scale = 1
# Init pygame
pygame.init()
# Reposition and change size of surface to account for title bar
if not headless:
info_object = pygame.display.Info()
smallest_dimension = min(info_object.current_w, info_object.current_h)
x = round((info_object.current_w - (smallest_dimension - TITLE_BAR_HEIGHT - (BORDER_WIDTH * 2))) / 2)
y = TITLE_BAR_HEIGHT + BORDER_WIDTH
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (x, y)
# Scale the window and drawing to the maximum square size
if pygame_scale is None:
self.pygame_scale = (smallest_dimension - TITLE_BAR_HEIGHT - (BORDER_WIDTH * 2)) / DRAWING_SIZE
# Initialise the window with dimensions
self.pygame_x = round(DRAWING_SIZE * self.pygame_scale)
self.pygame_y = round(DRAWING_SIZE * self.pygame_scale)
self.screen = pygame.display.set_mode((self.pygame_x, self.pygame_y))
pygame.display.set_caption("Drawing Render")
# Render the lines to preview in Pygame
def render(self, drawing, filename="output.png", simulate=False, speed=None,
allow_transparency=False, fake_transparency=False, proper_line_thickness=False, draw_as_bezier=False,
step_size=10, save_transparent_bg=False, green_screen_colour=(0, 177, 64, 255)):
if step_size < 2:
step_size = 2
self.screen.fill(green_screen_colour) if save_transparent_bg else self.screen.fill(WHITE)
pygame.display.update() # Show the background, (so the screen isn't black on drawings that are slow to process)
def draw_line(surface, colour, start_point, end_point, width, end_caps=False):
if end_caps:
pygame.draw.circle(surface, colour, start_point, width / 2)
pygame.draw.circle(surface, colour, end_point, width / 2)
if start_point == end_point:
return
np.seterr(divide='ignore', invalid='ignore')
vec_start_point = np.array(start_point)
vec_end_point = np.array(end_point)
move_point = vec_end_point - vec_start_point
norm_move = move_point / np.linalg.norm(move_point)
rotated_vec = np.array((-norm_move[1], norm_move[0])) * width / 2
start_point_1 = vec_start_point + rotated_vec
start_point_2 = vec_start_point - rotated_vec
end_point_1 = vec_end_point + rotated_vec
end_point_2 = vec_end_point - rotated_vec
pygame.draw.polygon(surface, colour, [start_point_1, start_point_2, end_point_2, end_point_1], width=0)
def draw_lines(surface, colour, pts, width, end_caps=False):
last_point = None
for pt in pts:
if last_point:
draw_line(surface, colour, last_point, pt, width, end_caps=end_caps)
last_point = pt
def get_midpoint(p1, p2):
x = (p1[0] + p2[0]) / 2
y = (p1[1] + p2[1]) / 2
return [x, y]
def draw_quadratic_bezier_curve_line(surface, colour, pts, width, end_caps=False, step_size=40):
if pts:
last_midpoint = pts[0]
midpoint = last_midpoint
p2 = last_midpoint
for i in range(len(pts)):
p1 = pts[i]
try:
p2 = pts[i + 1]
midpoint = get_midpoint(p1, p2)
# TODO: Write some code to create an appropriate step_size, likely based on the bezier curve length
bezier_curve_points = get_bezier_curve((last_midpoint, p1, midpoint), step_size=step_size,
end_point=True)
draw_lines(surface, colour, bezier_curve_points, width, end_caps=end_caps)
last_midpoint = midpoint
except IndexError: # Draw the last point as a straight line to finish
draw_line(surface, colour, midpoint, p2, width, end_caps=end_caps)
for line in drawing:
brush_radius = line["brushRadius"] * self.pygame_scale
if "rgba" in line["brushColor"]:
colour = [float(cell) for cell in list(line["brushColor"][5:-1].split(","))]
colour[3] *= 255
else:
colour = [float(cell) for cell in list(line["brushColor"][4:-1].split(","))]
colour.append(255)
points = []
if colour[3] != 255 and allow_transparency: # If the brushColour is transparent, draw with transparency
target_surface = pygame.Surface((self.pygame_x, self.pygame_y), 0, 32)
if colour[:-1] != [0, 0, 0]:
target_surface.set_colorkey(BLACK)
else: # Handle the black edge case
target_surface.set_colorkey(WHITE)
target_surface.fill((255, 255, 255, 0))
target_surface.set_alpha(round(colour[3]))
else: # If the brushColour is opaque, draw with no transparency
if fake_transparency:
colour = alpha_blend(colour[3] / 255, colour[:-1], [255, 255, 255])
target_surface = self.screen
for index, point in enumerate(line["points"]):
this_point = (point.x * self.pygame_scale, point.y * self.pygame_scale)
points.append(this_point)
if not proper_line_thickness:
pygame.draw.circle(target_surface, colour, this_point, int(brush_radius))
if proper_line_thickness:
if draw_as_bezier:
draw_quadratic_bezier_curve_line(target_surface, colour, points, brush_radius * 2,
end_caps=True, step_size=step_size)
else:
draw_lines(target_surface, colour, points, brush_radius * 2, end_caps=True)
else:
pygame.draw.lines(target_surface, colour, False, points, int(brush_radius * 2))
# Required for transparency
if colour[3] != 255 and allow_transparency:
self.screen.blit(target_surface, (0, 0))
# Update the drawing line by line to see the drawing process
if simulate:
pygame.display.update()
if speed and speed != 0:
time.sleep(speed / 100)
# Ensure that no events, such as pygame being closed are ignored.
ev = pygame.event.get()
for event in ev:
if event.type == pygame.QUIT:
# Exits before the image is finished, does not take screenshot.
return
# update screen to render the final result of the drawing
pygame.display.update()
# format the filename to include the time how the user chooses
current_time = datetime.datetime.now()
filename = filename.replace('%s', str(int(current_time.timestamp())))
formatted_filename = current_time.strftime(filename)
# TODO: Figure out if Pygame has a method to save a surface with a transparent background
if save_transparent_bg:
# Save the image with a transparent background
image_string = pygame.image.tostring(self.screen, 'RGBA', False)
img = Image.frombytes("RGBA", self.screen.get_size(), image_string)
# https://stackoverflow.com/a/69814643
def convert_png_transparent(image, dst_file, bg_color=(255, 255, 255)):
array = np.array(image, dtype=np.ubyte)
mask = (array[:, :, :3] == bg_color).all(axis=2)
alpha = np.where(mask, 0, 255)
array[:, :, -1] = alpha
Image.fromarray(np.ubyte(array)).save(dst_file, "PNG")
convert_png_transparent(img, formatted_filename, [*green_screen_colour[:-1]])
else:
# Save the image without a transparent background
pygame.image.save(self.screen, formatted_filename)
# enter a loop to prevent pygame from ending
running = True
while running and not self.headless:
ev = pygame.event.get()
for event in ev:
if event.type == pygame.QUIT:
running = False
break
time.sleep(0.2) # Sleep for a short time. Prevents continual use of CPU. | 8,931 | -6 | 75 |
70768d686e0a1a459230775315038d99092f820f | 59,179 | py | Python | XGB_Model.py | whitelightning450/Machine-Learning-Water-Systems-Model | 9e07dd2402ef614dcf404cd28bee518ced7047ad | [
"MIT"
] | null | null | null | XGB_Model.py | whitelightning450/Machine-Learning-Water-Systems-Model | 9e07dd2402ef614dcf404cd28bee518ced7047ad | [
"MIT"
] | 6 | 2022-03-28T17:47:04.000Z | 2022-03-28T20:49:51.000Z | XGB_Model.py | whitelightning450/Machine-Learning-Water-Systems-Model | 9e07dd2402ef614dcf404cd28bee518ced7047ad | [
"MIT"
] | 2 | 2022-02-22T19:48:46.000Z | 2022-03-28T03:51:03.000Z | #Script developed by Ryan C. Johnson, University of Alabama for the
#Salt Lake City Climate Vulnerability Project.
#Date: 3/4/2022
# coding: utf-8
import xgboost as xgb
from xgboost.sklearn import XGBRegressor
from xgboost import cv
import time
import pickle
import joblib
from pickle import dump
import numpy as np
import copy
from collinearity import SelectNonCollinear
from sklearn.feature_selection import f_regression
import pandas as pd
import seaborn as sns
from sklearn.feature_selection import RFE
import xgboost as xgb
from xgboost.sklearn import XGBRegressor
from xgboost import cv
from sklearn.pipeline import Pipeline
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import cross_val_score
from numpy import mean
from numpy import std
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from progressbar import ProgressBar
from collections import defaultdict
import jenkspy
from matplotlib.dates import MonthLocator, DateFormatter
#Make a plot of predictions
#Developing the XGBoost_Tuning package
# evaluate a given model using cross-validation
#These are the top features for XBoost
#RFE feature selection is a good starting point, but these features optimize predictive performance
#Model Training Function
#XGB Prediction Engine
#Data Processing needed to make a prediction
#This uses the XGB model to make predictions for each water system component at a daily time step.
#A function to calculate the daily mean values for each water system component
#Perform a historical analysis of each WSC to compare performance of current scenario
#Create historical RRV Analysis to define historical RRV thresholds to compare predictions with
#we need to calculate the RRV metrics
| 40.31267 | 248 | 0.6102 | #Script developed by Ryan C. Johnson, University of Alabama for the
#Salt Lake City Climate Vulnerability Project.
#Date: 3/4/2022
# coding: utf-8
import xgboost as xgb
from xgboost.sklearn import XGBRegressor
from xgboost import cv
import time
import pickle
import joblib
from pickle import dump
import numpy as np
import copy
from collinearity import SelectNonCollinear
from sklearn.feature_selection import f_regression
import pandas as pd
import seaborn as sns
from sklearn.feature_selection import RFE
import xgboost as xgb
from xgboost.sklearn import XGBRegressor
from xgboost import cv
from sklearn.pipeline import Pipeline
from sklearn.model_selection import RepeatedKFold
from sklearn.model_selection import cross_val_score
from numpy import mean
from numpy import std
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from progressbar import ProgressBar
from collections import defaultdict
import jenkspy
from matplotlib.dates import MonthLocator, DateFormatter
class XGB_model():
def __init__(self, Target, cwd):
self = self
self.Target = Target
self.Prediction = self.Target+'_Pred'
self.Prediction_Rolling = self.Prediction+'_Rolling'
self.T_initial = self.Target+'_Initial'
self.cwd = cwd
def fit(self,param, X,y, M_save_filepath):
self.param=param
self.num_round=param['num_boost_round']
start_time = time.time()
print('Model Training')
y = y[self.Target]
feature_names = list(X.columns)
dtrain = xgb.DMatrix(np.array(X), label=np.array(y),feature_names=feature_names)
model = xgb.Booster(self.param, [dtrain])
model = xgb.train(self.param,dtrain,num_boost_round=self.num_round, xgb_model=model)
c_time = round(time.time() - start_time,2)
print('Calibration time', round(c_time), 's')
print('Saving Model')
#adjust this to match changing models
pickle.dump(model, open(self.cwd + M_save_filepath, "wb"))
self.model_=model
def predict(self,X, model):
self.model_=model
dtest=xgb.DMatrix(X)
return self.model_.predict(dtest)
def XGB_Predict(self, test_feat, test_targ):
#Make predictions with the model
model = pickle.load(open(self.cwd+"/Model_History/V2/XGBoost_"+self.Target+".dat", "rb"))
start_time = time.time()
#since the previous timestep is being used, we need to predict this value
predict = []
featcol = test_feat.columns
for i in range(0,(len(test_feat)-1),1):
t_feat = np.array(test_feat.iloc[i])
t_feat = t_feat.reshape(1,len(t_feat))
t_feat = pd.DataFrame(t_feat, columns = featcol)
p = self.predict(t_feat, model)
if self.T_initial in featcol:
test_feat[self.T_initial].iloc[(i+1)] = p
predict.append(p[0])
#need to manually add one more prediction
predict.append(predict[-1])
#add physical limitations to predictions
if self.Target =='SLCDPU_GW':
predict = np.array(predict)
predict[predict > 89.49] = 89.49
#Use this line for PCA
#predict = Targ_scaler.inverse_transform(predict.reshape(len(predict),1))
c_time = round(time.time() - start_time,8)
print('prediction time', round(c_time), 's')
#Analyze model performance
#use this line for PCA
#Targ_scaler.inverse_transform(test_targ)
Analysis = pd.DataFrame(test_targ, columns = [self.Target])
Analysis[self.Prediction] = predict
Analysis[self.Prediction_Rolling] = Analysis[self.Prediction].rolling(5).mean()
Analysis[self.Prediction_Rolling] = Analysis[self.Prediction_Rolling].interpolate(method='linear',
limit_direction='backward',
limit=5)
RMSEpred = mean_squared_error(Analysis[self.Target],Analysis[self.Prediction], squared=False)
RMSErolling = mean_squared_error(Analysis[self.Target],Analysis[self.Prediction_Rolling], squared=False)
# print('RMSE for predictions: ', RMSEpred, 'af/d. RMSE for rolling prediction mean: ', RMSErolling, 'af/d')
self.Analysis = Analysis
#Make a plot of predictions
def PredictionPerformancePlot(self):
#predicted and observed
labelsize = 14
# better control over ax
fig, ax = plt.subplots(2, 1)
fig.set_size_inches(9,8)
maxGW = max(max(self.Analysis[self.Target]), max(self.Analysis[self.Prediction]))*1.2
self.Analysis.plot( y = self.Target, ax=ax[0], color = 'blue', label = self.Target)
self.Analysis.plot(y = self.Prediction , ax=ax[0], color = 'orange', label = self.Prediction)
self.Analysis.plot(y = self.Prediction_Rolling , ax=ax[0], color = 'green', label = self.Prediction_Rolling)
ax[0].set_xlabel('Time ', size = labelsize)
ax[0].set_ylabel(self.Target, size = labelsize)
#plt.xlim(0,370)
ax[0].set_ylim(0,maxGW*1.4)
ax[0].legend(loc="upper left",title = 'Prediction/Target')
self.Analysis.plot.scatter(x = self.Target, y = self.Prediction_Rolling , ax=ax[1], color = 'green', label = self.Prediction_Rolling)
self.Analysis.plot.scatter(x = self.Target, y = self.Prediction , ax=ax[1], color = 'orange', label = self.Prediction)
ax[1].plot((0,maxGW),(0,maxGW), linestyle = '--', color = 'red')
#plt.title('Production Simulations', size = labelsize+2)
#fig.savefig(O_path + 'Figures/MLP/MLP_Prod.png', dpi = 300)
plt.show()
RMSEpred = mean_squared_error(self.Analysis[self.Target],self.Analysis[self.Prediction], squared=False)
RMSErolling = mean_squared_error(self.Analysis[self.Target],self.Analysis[self.Prediction_Rolling], squared=False)
print('RMSE for predictions: ', RMSEpred, '. RMSE for rolling prediction mean: ', RMSErolling)
#Developing the XGBoost_Tuning package
class XGB_Tuning():
def __init__(self, cwd):
self = self
self.cwd = cwd
def ProcessData(self, df, sim, feat, targ, test_yr, scaling, allData):
print('Processing data to tune XGBoost model for ', targ[0])
print('This may take a few moments depending on computational power and data size')
self.targ = targ[0]
data = copy.deepcopy(df)
#get month, day, year, from df
dflen = len(data[sim])
months = []
days = []
years = []
data[sim]['DOY'] = 0
for t in range(0,dflen,1):
y = data[sim]['Time'][t].year
m = data[sim]['Time'][t].month
d = data[sim]['Time'][t].day
months.append(m)
days.append(d)
years.append(y)
data[sim]['DOY'].iloc[t] = data[sim]['Time'].iloc[t].day_of_year
years = list( dict.fromkeys(years) )
#remove yr 2000 and 2022 as it is not a complete year
#test by removing 2008, 2015, and 2017 too as these are the test years
years = years[1:-1]
data[sim]['Month'] = months
data[sim]['Day'] = days
data[sim].index = data[sim]['Time']
#input each year's initial reservoir conditions./ previous timestep conditions.
data[sim]['Mtn_Dell_Percent_Full_Initial'] = 0
data[sim]['LittleDell_Percent_Full_Initial'] = 0
data[sim]['SLCDPU_GW_Initial'] = 0
data[sim]['SLCDPU_DC_Water_Use_Initial'] = 0
timelen = len(data[sim])
for t in range(0,timelen, 1):
data[sim]['Mtn_Dell_Percent_Full_Initial'].iloc[t] = data[sim]['Mtn_Dell_Percent_Full'].iloc[(t-1)]
data[sim]['LittleDell_Percent_Full_Initial'].iloc[t] = data[sim]['LittleDell_Percent_Full'].iloc[(t-1)]
data[sim]['SLCDPU_GW_Initial'].iloc[t] = data[sim]['SLCDPU_GW'].iloc[(t-1)]
data[sim]['SLCDPU_DC_Water_Use_Initial'].iloc[t] = data[sim]['SLCDPU_DC_Water_Use'].iloc[(t-1)]
#make an aggregated streamflow metric
data[sim]['SLCDPU_Surface_Supplies'] = data[sim]['BCC_Streamflow']+data[sim]['LCC_Streamflow']+data[sim]['CC_Streamflow']+data[sim]['Dell_Streamflow']+data[sim]['Lambs_Streamflow']
features = data[sim][feat]
targets = data[sim][targ]
f_col = list(features.columns)
t_col = list(targets.columns)
if scaling ==True:
del data[sim]['Time']
Feat_scaler = MinMaxScaler()
Targ_scaler = MinMaxScaler()
Feat_scaler.fit(features)
Targ_scaler.fit(targets)
features = Feat_scaler.transform(features)
targets = Targ_scaler.transform(targets)
f = pd.DataFrame(features, columns = f_col)
t = pd.DataFrame(targets, columns = t_col)
f.index = data[sim].index
t.index = data[sim].index
else:
f = features
t = targets
#looks like adding more data can help train models, extending period to include march and april
train_feat = f.loc['2000-10-1':str(test_yr)+'-3-31']
train_targ = t.loc['2000-10-1':str(test_yr)+'-3-31']
test_feat = f.loc[str(test_yr)+'-4-1':str(test_yr)+'-10-31']
test_targs =t.loc[str(test_yr)+'-4-1':str(test_yr)+'-10-31']
if allData == True:
#need to remove years 2008,2015,2017 as these are testing streamflow conditions.
testyrs = [2008,2015,2017]
trainyrs = list(np.arange(2001, 2021, 1))
for t in testyrs:
trainyrs.remove(t)
train_feat.drop(train_feat.loc[str(t)+'-4-1':str(t)+'-10-31'].index, inplace=True)
train_targ.drop(train_targ.loc[str(t)+'-4-1':str(t)+'-10-31'].index, inplace=True)
if allData ==False:
#need to remove years 2008,2015,2017 as these are testing streamflow conditions.
testyrs = [2008,2015,2017]
trainyrs = list(np.arange(2001, 2021, 1))
for t in testyrs:
trainyrs.remove(t)
train_feat.drop(train_feat.loc[str(t-1)+'-11-1':str(t)+'-10-31'].index, inplace=True)
train_targ.drop(train_targ.loc[str(t-1)+'-11-1':str(t)+'-10-31'].index, inplace=True)
# Model is focused on April to October water use, remove dates out of this timeframe
for t in trainyrs:
train_feat.drop(train_feat.loc[str(t-1)+'-12-1':str(t)+'-1-31'].index, inplace=True)
train_targ.drop(train_targ.loc[str(t-1)+'-12-1':str(t)+'-1-31'].index, inplace=True)
#Drop WY2000
train_feat.drop(train_feat.loc['2000-1-1':'2001-3-30'].index, inplace=True)
train_targ.drop(test_targ.loc['2000-1-1':'2001-3-30'].index, inplace=True)
#Shuffle training data to help model training
if scaling ==True:
return train_feat, train_targ, test_feat, test_targs, Targ_scaler
else:
self.train_feat, self.train_targ, self.test_feat,self.test_targs = train_feat, train_targ, test_feat, test_targs
def CollinearityRemoval(self, col_threshold):
print('Calculating collinearity matrix and removing features > ', str(col_threshold))
start_time = time.time()
#look at correlations among features
features = self.train_feat.columns
X = np.array(self.train_feat)
y = np.array(self.train_targ)
selector = SelectNonCollinear(correlation_threshold=col_threshold,scoring=f_regression)
selector.fit(X,y)
mask = selector.get_support()
Col_Check_feat = pd.DataFrame(X[:,mask],columns = np.array(features)[mask])
Col_Check_features = Col_Check_feat.columns
sns.heatmap(Col_Check_feat.corr().abs(),annot=True)
self.Col_Check_feat, self.Col_Check_features =Col_Check_feat, Col_Check_features
c_time = round(time.time() - start_time,8)
print('Feature development time', round(c_time), 's')
# get a list of models to evaluate
def get_models(self):
models = dict()
for i in range(2, len(self.X.columns)):
rfe = RFE(estimator=XGBRegressor(), n_features_to_select=i)
model = XGBRegressor()
models[str(i)] = Pipeline(steps=[('s',rfe),('m',model)])
self.models = models
# evaluate a given model using cross-validation
def evaluate_model(self, model):
#pipeline = Pipeline(steps=[('s',rfe),('m',model)])
pipeline = model
# evaluate model
cv = RepeatedKFold(n_splits=3, n_repeats=3, random_state=1)
n_scores = cross_val_score(pipeline, self.X, self.y, scoring='neg_mean_absolute_error', cv=cv, n_jobs=-1, error_score='raise')
self.scores = n_scores
def FeatureSelection(self):
start_time = time.time()
# define dataset
X = self.Col_Check_feat
self.X =X
y = self.train_targ# get the models to evaluate
self.y = y
self.get_models()
# evaluate the models and store results
results, names = list(), list()
print('Using RFE to determine optimial features, scoring is:')
for name, model in self.models.items():
self.evaluate_model(model)
results.append(self.scores)
names.append(name)
print('>%s %.3f (%.3f)' % (name, mean(self.scores), std(self.scores)))
score_cols = ['n_feat' , 'mean_MAE', 'std_MAE']
Feat_Eval = pd.DataFrame(columns = score_cols)
for i in range(0,len(results)):
feats = i+2
meanMAE = mean(results[i])
stdMAE = std(results[i])
s = [feats, abs(meanMAE), stdMAE]
Feat_Eval.loc[len(Feat_Eval)] = s
#mean and std MAE both are applicable. std works well when feweer features are used
Feat_Eval=Feat_Eval.sort_values(by=['std_MAE', 'n_feat'])
Feat_Eval = Feat_Eval.reset_index()
print(Feat_Eval)
n_feat = int(Feat_Eval['n_feat'][0])
# create pipeline
rfe = RFE(estimator=XGBRegressor(), n_features_to_select=n_feat)
rfe = rfe.fit(X, y)
# summarize the selection of the attributes
print(rfe.support_)
print(rfe.ranking_)
RFE_Feat = pd.DataFrame(self.Col_Check_features, columns = ['Features'])
RFE_Feat['Selected']= rfe.support_
RFE_Feat = RFE_Feat[RFE_Feat['Selected']==True]
RFE_Feat = RFE_Feat['Features']
RFE_Features = self.Col_Check_feat[RFE_Feat]
print('The Recursive Feature Elimination identified features are: ')
print(list(RFE_Feat))
self.Final_FeaturesDF, self.Final_Features = RFE_Features, list(RFE_Feat)
c_time = round(time.time() - start_time,8)
print('Feature selection time: ', round(c_time), 's')
#These are the top features for XBoost
#RFE feature selection is a good starting point, but these features optimize predictive performance
def Feature_Optimization(self):
print(' ')
print('Features optimization identifies the following features best fit for the XGB-WSM')
if self.targ =='LittleDell_Percent_Full':
self.Final_Features = ['Month', 'Dell_Streamflow', 'Mtn_Dell_Percent_Full_Initial', 'LittleDell_Percent_Full_Initial']
self.Final_FeaturesDF = self.Col_Check_feat[self.Final_Features]
if self.targ =='Mtn_Dell_Percent_Full':
self.Final_Features= ['SLCDPU_Surface_Supplies', 'Dell_Streamflow', 'Lambs_Streamflow',
'SLCDPU_GW_Initial', 'Mtn_Dell_Percent_Full_Initial']
self.Final_FeaturesDF = self.Col_Check_feat[self.Final_Features]
if self.targ=='SLCDPU_GW':
self.Final_FeaturesDF = self.Col_Check_feat[self.Final_Features]
if self.targ =='SLCDPU_DC_Water_Use':
self.Final_Features = ['BCC_Streamflow', 'SLCDPU_Prod_Demands', 'SLCDPU_DC_Water_Use_Initial',
'Mtn_Dell_Percent_Full_Initial', 'LittleDell_Percent_Full_Initial']
self.Final_FeaturesDF = self.Col_Check_feat[self.Final_Features]
#save features list
pickle.dump(self.Final_Features, open(self.cwd + "/Model_History/V2/"+self.targ+"_features.pkl", "wb"))
print('The final features for ', self.targ, 'are: ')
print(self.Final_FeaturesDF.columns)
#gridsearch hyper parameter function
def GridSearch(self, parameters):
start_time = time.time()
print('Performing a Grid Search to identify the optimial model hyper-parameters')
xgb1 = XGBRegressor()
xgb_grid = GridSearchCV(xgb1,
parameters,
cv = 3,
n_jobs = -1,
verbose=3)
xgb_grid.fit(self.Final_FeaturesDF, self.train_targ[self.targ])
print('The best hyperparameter three-fold cross validation score is: ')
print(xgb_grid.best_score_)
print(' ')
print('The optimal hyper-parameters are: ')
print(xgb_grid.best_params_)
print(' ')
c_time = round(time.time() - start_time,8)
print('Hyper-parameter Optimization time', round(c_time), 's')
self.xgb_grid = xgb_grid
#Model Training Function
def Train(self, M_save_filepath):
#get the optimial hyperparams
params = {"objective":"reg:squarederror",
'booster' : "gbtree" ,
'eta': self.xgb_grid.best_params_['learning_rate'],
"max_depth":self.xgb_grid.best_params_['max_depth'],
"subsample":self.xgb_grid.best_params_['subsample'],
"colsample_bytree":self.xgb_grid.best_params_['colsample_bytree'],
"reg_lambda":self.xgb_grid.best_params_['reg_lambda'],
'reg_alpha':self.xgb_grid.best_params_['reg_alpha'],
"min_child_weight":self.xgb_grid.best_params_['min_child_weight'],
'num_boost_round':self.xgb_grid.best_params_['n_estimators'],
'verbosity':0,
'nthread':-1
}
#Train the model
model = XGB_model(self.targ, self.cwd)
model.fit(params,self.Final_FeaturesDF, self.train_targ, M_save_filepath)
xgb.plot_importance(model.model_, max_num_features=20)
#XGB Prediction Engine
class XGB_Prediction():
def __init__(self, MDell_Thresh, LDell_Thresh, units):
self = self
#set reservoir level thresholds as global vars
self.MDell_Thresh = MDell_Thresh
self.LDell_Thresh = LDell_Thresh
self.units = units
#Data Processing needed to make a prediction
def ProcessData(self, Sim, scenario, test_yr):
#define global variables
self.scenario = scenario
self.Sim = Sim
self.test_yr = test_yr
print('Processing data into features/targets for ', self.scenario, ' scenario')
#Input optimial features from XGBoost_WSM_Tuning.
LittleDell_Percent_Full = pickle.load(open("Models/V2/LittleDell_Percent_Full_features.pkl", "rb"))
Mtn_Dell_Percent_Full = pickle.load(open("Models/V2/Mtn_Dell_Percent_Full_features.pkl", "rb"))
SLCDPU_GW = pickle.load(open("Models/V2/SLCDPU_GW_features.pkl", "rb"))
SLCDPU_DC_Water_Use = pickle.load(open("Models/V2/SLCDPU_DC_Water_Use_features.pkl", "rb"))
feat = {
'LittleDell_Percent_Full':LittleDell_Percent_Full,
'Mtn_Dell_Percent_Full':Mtn_Dell_Percent_Full,
'SLCDPU_GW': SLCDPU_GW,
'SLCDPU_DC_Water_Use': SLCDPU_DC_Water_Use
}
#make a DF with some additional features (from GS)
data = copy.deepcopy(self.Sim)
dflen = len(data[self.scenario])
months = []
days = []
years = []
data[self.scenario]['DOY'] = 0
for t in range(0,dflen,1):
y = data[self.scenario]['Time'][t].year
m = data[self.scenario]['Time'][t].month
d = data[self.scenario]['Time'][t].day
months.append(m)
days.append(d)
years.append(y)
data[self.scenario]['DOY'].iloc[t] = data[self.scenario]['Time'].iloc[t].day_of_year
years = list( dict.fromkeys(years) )
#remove yr 2000 and 2022 as it is not a complete year
years = years[1:-1]
data[self.scenario]['Month'] = months
data[self.scenario]['Day'] = days
data[self.scenario].index = data[self.scenario]['Time']
#input each year's initial reservoir conditions./ previous timestep conditions.
data[self.scenario]['Mtn_Dell_Percent_Full_Initial'] = 0
data[self.scenario]['LittleDell_Percent_Full_Initial'] = 0
data[self.scenario]['SLCDPU_GW_Initial'] = 0
data[self.scenario]['SLCDPU_DC_Water_Use_Initial'] = 0
timelen = len(data[self.scenario])
for t in range(0,timelen, 1):
data[self.scenario]['Mtn_Dell_Percent_Full_Initial'].iloc[t] = data[self.scenario]['Mtn_Dell_Percent_Full'].iloc[(t-1)]
data[self.scenario]['LittleDell_Percent_Full_Initial'].iloc[t] = data[self.scenario]['LittleDell_Percent_Full'].iloc[(t-1)]
data[self.scenario]['SLCDPU_GW_Initial'].iloc[t] = data[self.scenario]['SLCDPU_GW'].iloc[(t-1)]
data[self.scenario]['SLCDPU_DC_Water_Use_Initial'].iloc[t] = data[self.scenario]['SLCDPU_DC_Water_Use'].iloc[(t-1)]
#make an aggregated streamflow metric
data[self.scenario]['SLCDPU_Surface_Supplies'] = data[self.scenario]['BCC_Streamflow']+data[self.scenario]['LCC_Streamflow']+data[self.scenario]['CC_Streamflow']+data[self.scenario]['Dell_Streamflow']+data[self.scenario]['Lambs_Streamflow']
#Make dictionary of acutal features
features = { 'LittleDell_Percent_Full':data[self.scenario][feat['LittleDell_Percent_Full']],
'Mtn_Dell_Percent_Full':data[self.scenario][feat['Mtn_Dell_Percent_Full']],
'SLCDPU_GW': data[self.scenario][feat['SLCDPU_GW']],
'SLCDPU_DC_Water_Use': data[self.scenario][feat['SLCDPU_DC_Water_Use']]
}
#set up Targets
targ = ['SLCDPU_GW', 'Mtn_Dell_Percent_Full', 'LittleDell_Percent_Full','SLCDPU_DC_Water_Use']
targets = data[self.scenario][targ]
for i in features:
features[i] = features[i].loc[str(self.test_yr)+'-4-1':str(self.test_yr)+'-10-30']
Hist_targs = targets.loc[:str(self.test_yr)+'-3-31'].copy()
targets = targets.loc[str(self.test_yr)+'-4-1':str(self.test_yr)+'-10-30']
self.features, self.targets, self.Hist_targs =features, targets, Hist_targs
#This uses the XGB model to make predictions for each water system component at a daily time step.
def WSM_Predict(self):
#Set up the target labels
#Mountain Dell
self.MDell = 'Mtn_Dell_Percent_Full'
self.MDell_Pred = self.MDell+'_Pred'
self.MDell_Pred_Rol = self.MDell_Pred+'_Rolling'
self.MDell_Initial = self.MDell+'_Initial'
#Little Dell
self.LDell = 'LittleDell_Percent_Full'
self.LDell_Pred = self.LDell+'_Pred'
self.LDell_Pred_Rol = self.LDell_Pred+'_Rolling'
self.LDell_Initial = self.LDell+'_Initial'
#GW
self.GW = 'SLCDPU_GW'
self.GW_Pred = self.GW+'_Pred'
self.GW_Pred_Rol = self.GW_Pred+'_Rolling'
self.GW_Initial = self.GW+'_Initial'
#GW
self.DC = 'SLCDPU_DC_Water_Use'
self.DC_Pred = self.DC+'_Pred'
self.DC_Pred_Rol = self.DC_Pred+'_Rolling'
self.DC_Initial = self.DC+'_Initial'
#Grab features/targets for the respective target
MDell_feat = copy.deepcopy(self.features[self.MDell])
MDell_targ = copy.deepcopy(self.targets[self.MDell])
LDell_feat = copy.deepcopy(self.features[self.LDell])
LDell_targ = copy.deepcopy(self.targets[self.LDell])
GW_feat = copy.deepcopy(self.features[self.GW])
GW_targ = copy.deepcopy(self.targets[self.GW])
DC_feat = copy.deepcopy(self.features[self.DC])
DC_targ = copy.deepcopy(self.targets[self.DC])
#Make predictions with the model, load model from XGBoost_WSM_Tuning
MDell_model = pickle.load(open("Models/V1/XGBoost_"+self.MDell+".dat", "rb"))
LDell_model = pickle.load(open("Models/V2/XGBoost_"+self.LDell+".dat", "rb"))
GW_model = pickle.load(open("Models/V2/XGBoost_"+self.GW+".dat", "rb"))
DC_model = pickle.load(open("Models/V2/XGBoost_"+self.DC+".dat", "rb"))
start_time = time.time()
#since the previous timestep is being used, we need to predict this value
#Mtn dell
MDell_predict = []
MDell_col = MDell_feat.columns
#lil Dell
LDell_predict = []
LDell_col = LDell_feat.columns
#GW
GW_predict = []
GW_col = GW_feat.columns
#GW
DC_predict = []
DC_col = DC_feat.columns
#Make Predictions by row, update DF intitials to make new row prediction based on the current
for i in range(0,(len(LDell_feat)-1),1):
#MOuntain Dell
MDell_t_feat = np.array(MDell_feat.iloc[i])
MDell_t_feat = MDell_t_feat.reshape(1,len(MDell_t_feat))
MDell_t_feat = pd.DataFrame(MDell_t_feat, columns = MDell_col)
M = XGB_model.predict(MDell_model, MDell_t_feat, MDell_model)
#Little Dell
LDell_t_feat = np.array(LDell_feat.iloc[i])
LDell_t_feat = LDell_t_feat.reshape(1,len(LDell_t_feat))
LDell_t_feat = pd.DataFrame(LDell_t_feat, columns = LDell_col)
L = XGB_model.predict(LDell_model, LDell_t_feat, LDell_model)
#GW
GW_t_feat = np.array(GW_feat.iloc[i])
GW_t_feat = GW_t_feat.reshape(1,len(GW_t_feat))
GW_t_feat = pd.DataFrame(GW_t_feat, columns = GW_col)
G = XGB_model.predict(GW_model, GW_t_feat, GW_model)
#add physical limitations to predictions
G = np.array(G)
#DC
DC_t_feat = np.array(DC_feat.iloc[i])
DC_t_feat = DC_t_feat.reshape(1,len(DC_t_feat))
DC_t_feat = pd.DataFrame(DC_t_feat, columns = DC_col)
D = XGB_model.predict(DC_model, DC_t_feat, DC_model)
#This updates each DF with the predictions
#Mountain Dell Features
if self.LDell_Initial in MDell_col:
MDell_feat[self.LDell_Initial].iloc[(i+1)] = L
if self.MDell_Initial in MDell_col:
MDell_feat[self.MDell_Initial].iloc[(i+1)] = M
if self.GW_Initial in MDell_col:
MDell_feat[self.GW_Initial].iloc[(i+1)] = G
if self.DC_Initial in MDell_col:
MDell_feat[self.DC_Initial].iloc[(i+1)] = D
#Little Dell Features
if self.LDell_Initial in LDell_col:
LDell_feat[self.LDell_Initial].iloc[(i+1)] = L
if self.MDell_Initial in LDell_col:
LDell_feat[self.MDell_Initial].iloc[(i+1)] = M
if self.GW_Initial in LDell_col:
LDell_feat[self.GW_Initial].iloc[(i+1)] = G
if self.DC_Initial in LDell_col:
LDell_feat[self.DC_Initial].iloc[(i+1)] = D
#Gw Features
if self.LDell_Initial in GW_col:
GW_feat[self.LDell_Initial].iloc[(i+1)] = L
if self.MDell_Initial in GW_col:
GW_feat[self.MDell_Initial].iloc[(i+1)] = M
if self.GW_Initial in GW_col:
GW_feat[self.GW_Initial].iloc[(i+1)] = G
if self.DC_Initial in GW_col:
GW_feat[self.DC_Initial].iloc[(i+1)] = D
#DC Features
if self.LDell_Initial in DC_col:
DC_feat[self.LDell_Initial].iloc[(i+1)] = L
if self.MDell_Initial in DC_col:
DC_feat[self.MDell_Initial].iloc[(i+1)] = M
if self.GW_Initial in DC_col:
DC_feat[self.GW_Initial].iloc[(i+1)] = G
if self.DC_Initial in DC_col:
DC_feat[self.DC_Initial].iloc[(i+1)] = D
#Append predictions
MDell_predict.append(M[0])
LDell_predict.append(L[0])
GW_predict.append(G[0])
DC_predict.append(D[0])
#need to manually add one more prediction
MDell_predict.append(MDell_predict[-1])
LDell_predict.append(LDell_predict[-1])
GW_predict.append(GW_predict[-1])
DC_predict.append(DC_predict[-1])
#Use this line for PCA
c_time = round(time.time() - start_time,8)
print('prediction time', round(c_time), 's')
#Analyze model performance
#Add Little Dell
Analysis = pd.DataFrame(LDell_predict,index =self.features[self.LDell].index, columns = [self.LDell_Pred])
#non-zero values cannot occur
Analysis[self.LDell_Pred][Analysis[self.LDell_Pred]<0] = 0
#Add Mountain Dell
Analysis[self.MDell_Pred] = MDell_predict
#non-zero values cannot occur
Analysis[self.MDell_Pred][Analysis[self.MDell_Pred]<0] = 0
#Add GW
Analysis[self.GW_Pred] = np.float32(GW_predict)
#non-zero values cannot occur
Analysis[self.GW_Pred][Analysis[self.GW_Pred]<0] = 0
#Add DC
Analysis[self.DC_Pred] = np.float32(DC_predict)
#non-zero values cannot occur
Analysis[self.DC_Pred][Analysis[self.DC_Pred]<0] = 0
print('Predictions Complete')
#input physical limitations to components. the 0.000810714 is a conversion from m3 to af
Analysis[self.GW_Pred].loc[Analysis[self.GW_Pred]<0] =0
Analysis[self.GW_Pred].loc[Analysis[self.GW_Pred]>11.038412*8.10714] = 11.038412*8.10714
Analysis[self.GW_Pred].loc['2021-7-10':'2021-8-30'][Analysis[self.GW_Pred]<11.038416*8.10714]=11.038416*8.10714
Analysis[self.DC_Pred].loc[Analysis[self.DC_Pred]<0.05*8.10714] =0.05*8.10714
Analysis[self.MDell_Pred].loc[Analysis[self.MDell_Pred]<25] =25
Analysis[self.LDell_Pred].loc[Analysis[self.LDell_Pred]<10] =10
#calculate 5day rolling means
Analysis[self.DC_Pred_Rol] = Analysis[self.DC_Pred].rolling(5).mean()
Analysis[self.DC_Pred_Rol] = Analysis[self.DC_Pred_Rol].interpolate(method='linear',
limit_direction='backward',
limit=5)
Analysis[self.GW_Pred_Rol] = Analysis[self.GW_Pred].rolling(5).mean()
Analysis[self.GW_Pred_Rol] = Analysis[self.GW_Pred_Rol].interpolate(method='linear',
limit_direction='backward',
limit=5)
Analysis[self.MDell_Pred_Rol] = Analysis[self.MDell_Pred].rolling(5).mean()
Analysis[self.MDell_Pred_Rol] = Analysis[self.MDell_Pred_Rol].interpolate(method='linear',
limit_direction='backward',
limit=5)
Analysis[self.LDell_Pred_Rol] = Analysis[self.LDell_Pred].rolling(5).mean()
Analysis[self.LDell_Pred_Rol] = Analysis[self.LDell_Pred_Rol].interpolate(method='linear',
limit_direction='backward',
limit=5)
self.Analysis = Analysis
self.HistoricalAnalysis()
self.RRV_Assessment()
print('Plotting results for visual analysis:')
self.WSM_Pred_RRV_Plot()
#A function to calculate the daily mean values for each water system component
def DailyMean(self,component, month, yrs, days, monthnumber, inputyr):
Daylist = defaultdict(list)
DayFrame= defaultdict(list)
timecol = ['Year', 'Month' , 'Day']
for i in days:
Daylist[month+ str(i)]= []
DayFrame[month + str(i)] = pd.DataFrame(yrs, columns=['Year'])
for i in yrs:
for j in days:
Daylist[month+str(j)].append(self.Histyrs.loc[str(i)+'-'+ monthnumber +'-'+str(j)][component])
DayFrame[month+str(j)]['Day']=j
DayFrame[month+str(j)]['Month'] = int(monthnumber)
for i in DayFrame:
DayFrame[i][component] = Daylist[i]
histcomponent = 'Hist_Mean_' + component
for i in DayFrame:
DayFrame[i][histcomponent]= np.mean(DayFrame[i][component])
del DayFrame[i][component]
##put into year of choice
DayFrame[i]['Year']=inputyr
#create the date for input into figure DF
DayFrame[i].insert(loc=0, column='Date', value=pd.to_datetime(DayFrame[i][['Year', 'Month', 'Day']]))
DayFrame[i] = DayFrame[i].drop(columns = timecol)
DayFrame[i]=DayFrame[i].set_index('Date')
DayFrame[i]=DayFrame[i].iloc[0]
DayFrame[i] = pd.DataFrame(DayFrame[i]).T
return DayFrame
#Perform a historical analysis of each WSC to compare performance of current scenario
def HistoricalAnalysis(self):
print('Calculating historical water system component means to create baseline for comparison with prediction')
targets = ['SLCDPU_GW', 'Mtn_Dell_Percent_Full', 'LittleDell_Percent_Full','SLCDPU_DC_Water_Use']
pbar = ProgressBar()
for component in pbar(targets):
histcomponent = 'Hist_Mean_' + component
predcomponent = component+'_Pred'
#Use historical data, prior to WY2021
Histyrs=self.Hist_targs.copy()
Histyrs = Histyrs[:"2020-10-31"]
#Select time of importance 2021, 2022
self.Analysis = self.Analysis[self.Analysis.index.year.isin([self.test_yr])].copy()
#remove months that are not if interst in historical dataset
self.Histyrs = Histyrs[~Histyrs.index.month.isin([1,2,3,11,12])]
'''
Using the historical daily DC water usage, Find the mean daily DC usage and add it to the
Main DF to compare 2021 and 2022 water usage.
'''
yrs = np.arange(2001,2021,1)
Aprdays = np.arange(1,31,1)
Maydays = np.arange(1,32,1)
Jundays = np.arange(1,31,1)
Juldays = np.arange(1,32,1)
Augdays = np.arange(1,32,1)
Sepdays = np.arange(1,31,1)
Octdays = np.arange(1,32,1)
#Set up DF for mean daily DC water usage for WY 2021
Apr = self.DailyMean(component,'Apr', yrs, Aprdays, '04', self.test_yr)
May = self.DailyMean(component,'May', yrs, Maydays, '05', self.test_yr)
Jun = self.DailyMean(component,'Jun', yrs, Jundays, '06', self.test_yr)
Jul = self.DailyMean(component,'Jul', yrs, Juldays, '07', self.test_yr)
Aug = self.DailyMean(component,'Aug', yrs, Augdays, '08', self.test_yr)
Sep = self.DailyMean(component,'Sep', yrs, Sepdays, '09', self.test_yr)
Oct = self.DailyMean(component,'Oct', yrs, Octdays, '10', self.test_yr)
DC_Mean = pd.DataFrame()
for i in Apr:
DC_Mean = DC_Mean.append(Apr[i])
for i in May:
DC_Mean = DC_Mean.append(May[i])
for i in Jun:
DC_Mean = DC_Mean.append(Jun[i])
for i in Jul:
DC_Mean = DC_Mean.append(Jul[i])
for i in Aug:
DC_Mean = DC_Mean.append(Aug[i])
for i in Sep:
DC_Mean = DC_Mean.append(Sep[i])
for i in Oct:
DC_Mean = DC_Mean.append(Oct[i])
#create an empty column for mean delivery
self.Analysis[histcomponent] = 0
#Update the Output2021 with historical period daily DC usage
self.Analysis.update(DC_Mean)
predcomponent_diff = predcomponent+'_diff'
res = ['Mtn_Dell_Percent_Full', 'LittleDell_Percent_Full', 'Mtn_Dell_Percent_Full_Pred',
'LittleDell_Percent_Full','LittleDell_Percent_Full_Pred']
#we want to mark the reservoirs at a concern if they go below a certain level
if component in res:
if component == 'Mtn_Dell_Percent_Full':
#Dead pool for mtn dell is ~25, mark as vulnerable when it gets to 35%
self.Analysis[predcomponent_diff] = self.MDell_Thresh-self.Analysis[predcomponent]
if component == 'LittleDell_Percent_Full':
#Dead pool for lil dell is ~5%, mark as vulnerable when it gets to 15%
self.Analysis[predcomponent_diff] = self.LDell_Thresh-self.Analysis[predcomponent]
else:
self.Analysis[predcomponent_diff] = self.Analysis[predcomponent]-self.Analysis[histcomponent]
self.Prediction_Comparative_Analysis()
#Create historical RRV Analysis to define historical RRV thresholds to compare predictions with
def Prediction_Comparative_Analysis(self):
print('Processing predictions and historical means for comparative performance analysis.')
#Find the historical daily values for the water system.
#This creates a baseline to gage reliability, resilience, vulnerability
self.years = [2001,2002,2003,2004, 2005, 2006, 2007,2008, 2009, 2010,
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019,2020]
#Determine the maximum historical system severity
df = self.Analysis.copy()
#Daily2021 = ForecastDataPrep(Analysis, Hist_targs, 2021)
#Get the historical mean DC deliverity values for one year
Hist_Mean_MDell = list(df['Hist_Mean_Mtn_Dell_Percent_Full'].copy())
Hist_Mean_LDell = list(df['Hist_Mean_LittleDell_Percent_Full'].copy())
Hist_Mean_GW = list(df['Hist_Mean_SLCDPU_GW'].copy())
Hist_Mean_DC = list(df['Hist_Mean_SLCDPU_DC_Water_Use'].copy())
#Get the reference perid simulation results
SimDF = self.Sim[self.scenario].copy()
SimDF.index = SimDF['Time']
del SimDF['Time']
#Convert GS output from AF to M3
#Select the first 20 years
Hist = pd.DataFrame(columns = SimDF.columns)
for y in self.years :
Hist = Hist.append(SimDF.loc[str(y)+'-4-01':str(y)+'-10-30'])
#Make the data to input into long term TS
yearlen = len(Hist_Mean_MDell)
Hist_Mean_MDell = Hist_Mean_MDell*20
Hist_Mean_LDell = Hist_Mean_LDell*20
Hist_Mean_GW = Hist_Mean_GW*20
Hist_Mean_DC = Hist_Mean_DC*20
#Hist_Mean_DC = Oct_Dec_Hist_Mean_DC+Hist_Mean_DC
Hist['Hist_Mean_Mtn_Dell_Percent_Full'] = Hist_Mean_MDell
Hist['Hist_Mean_LittleDell_Percent_Full'] = Hist_Mean_LDell
Hist['Hist_Mean_SLCDPU_GW'] = Hist_Mean_GW
Hist['Hist_Mean_SLCDPU_DC_Water_Use'] = Hist_Mean_DC
#Find above/below specific reservoir levels
Hist['Mtn_Dell_Percent_Full_diff'] = self.MDell_Thresh-Hist['Mtn_Dell_Percent_Full']
Hist['LittleDell_Percent_Full_diff'] = self.LDell_Thresh-Hist['LittleDell_Percent_Full']
#Find above/below historical DC/GW and
Hist['SLCDPU_GW_diff'] = Hist['SLCDPU_GW']-Hist['Hist_Mean_SLCDPU_GW']
Hist['SLCDPU_DC_Water_Use_diff'] = Hist['SLCDPU_DC_Water_Use']-Hist['Hist_Mean_SLCDPU_DC_Water_Use']
for i in np.arange(0,len(Hist),1):
if Hist['Mtn_Dell_Percent_Full_diff'].iloc[i] <1:
Hist['Mtn_Dell_Percent_Full_diff'].iloc[i] = 0
if Hist['LittleDell_Percent_Full_diff'].iloc[i] <1:
Hist['LittleDell_Percent_Full_diff'].iloc[i] = 0
if Hist['SLCDPU_GW_diff'].iloc[i] <1:
Hist['SLCDPU_GW_diff'].iloc[i] = 0
if Hist['SLCDPU_DC_Water_Use_diff'].iloc[i] <1:
Hist['SLCDPU_DC_Water_Use_diff'].iloc[i] = 0
Historical_Max_Daily_MDell = max(Hist['Mtn_Dell_Percent_Full_diff'])
Historical_Max_Daily_LDell = max(Hist['LittleDell_Percent_Full_diff'])
Historical_Max_Daily_GW = max(Hist['SLCDPU_GW_diff'])
Historical_Max_Daily_DC = max(Hist['SLCDPU_DC_Water_Use_diff'])
self.Hist, self.Historical_Max_Daily_MDell, self.Historical_Max_Daily_LDell = Hist, Historical_Max_Daily_MDell, Historical_Max_Daily_LDell
self.Historical_Max_Daily_GW, self.Historical_Max_Daily_DC = Historical_Max_Daily_GW, Historical_Max_Daily_DC
def RRV_Assessment(self):
print('Initiating water system component RRV analysis.')
#Make a dictionary to store each targets RRV information
Target_RRV = [ 'SLCDPU_GW', 'SLCDPU_DC_Water_Use',
'Mtn_Dell_Percent_Full', 'LittleDell_Percent_Full']
Target_RRV= dict.fromkeys(Target_RRV)
self.RRV_DF =pd.DataFrame(columns =['Model', 'Climate', 'Target', 'Reliability',
'Resilience', 'Vulnerability', 'MaxSeverity', 'Maximum_Severity'])
#Find the historical RRV, the jenks breaks will use this
self.TargetRRV(self.Hist,'Hist', 'Mtn_Dell_Percent_Full',
self.Historical_Max_Daily_MDell, self.years)
self.TargetRRV(self.Hist,'Hist', 'LittleDell_Percent_Full',
self.Historical_Max_Daily_LDell, self.years)
self.TargetRRV(self.Hist,'Hist', 'SLCDPU_GW',
self.Historical_Max_Daily_GW, self.years)
self.TargetRRV(self.Hist,'Hist', 'SLCDPU_DC_Water_Use',
self.Historical_Max_Daily_DC, self.years)
#XGB-WSM
self.TargetRRV(self.Analysis, 'XGB_WSM','Mtn_Dell_Percent_Full_Pred', self.Historical_Max_Daily_MDell, [self.test_yr])
self.TargetRRV(self.Analysis, 'XGB_WSM','LittleDell_Percent_Full_Pred', self.Historical_Max_Daily_LDell, [self.test_yr])
self.TargetRRV(self.Analysis, 'XGB_WSM', 'SLCDPU_GW_Pred', self.Historical_Max_Daily_GW, [self.test_yr])
self.TargetRRV(self.Analysis, 'XGB_WSM', 'SLCDPU_DC_Water_Use_Pred',self.Historical_Max_Daily_DC, [self.test_yr])
print('Setting up an RRV dataframe and calculating each water system component RRV')
print('Finalizing analysis and placing into Jenks classification categories.')
for target in Target_RRV:
Target_RRV[target] = self.RRVanalysis(target)
#Make Target_RRV a global variable
self.Target_RRV =Target_RRV
def TargetRRV(self, DF, Sim, Target, Max, years):
preds = ['SLCDPU_GW_Pred', 'SLCDPU_DC_Water_Use_Pred',
'Mtn_Dell_Percent_Full_Pred', 'LittleDell_Percent_Full_Pred']
RRV_Data_D =pd.DataFrame(columns = ['SLCDPU_Prod_Demands', 'SLCDPU_Population', 'BCC_Streamflow',
'LCC_Streamflow', 'CC_Streamflow', 'Dell_Streamflow',
'Lambs_Streamflow', 'SLCDPU_GW', 'SLCDPU_DC_Water_Use',
'Mtn_Dell_Percent_Full', 'LittleDell_Percent_Full',
'Hist_Mean_Mtn_Dell_Percent_Full', 'Hist_Mean_LittleDell_Percent_Full',
'Hist_Mean_SLCDPU_GW', 'Hist_Mean_SLCDPU_DC_Water_Use', 'Mtn_Dell_Percent_Full_diff',
'LittleDell_Percent_Full_diff', 'SLCDPU_GW_diff', 'SLCDPU_DC_Water_Use_diff', 'Clim',
Target+'_Zt', Target+'_Wt',Target+'_WSCI_s', Target+'_Sev',
Target+'_Vul'])
Extra_Targ = Target+'_diff'
for y in years:
DF2 = DF.loc[str(y)+'-04-01':str(y)+'-10-31'].copy()
self.RRV(DF2,Extra_Targ, Target, Max,y)
if Target in preds:
Target = Target[:-5]
RRVass = list([Sim,self.scenario, Target, self.Rel, self.Res, self.Vul, self.Max_Severity, self.MaxSevNorm])
self.RRV_DF.loc[len(self.RRV_DF)] = RRVass
#we need to calculate the RRV metrics
def RRV(self, DF2, Extra_target, target, maxseverity, yr):
preds = ['SLCDPU_GW_Pred', 'SLCDPU_DC_Water_Use_Pred',
'Mtn_Dell_Percent_Full_Pred', 'LittleDell_Percent_Full_Pred']
if target in preds:
hist_target = 'Hist_Mean_'+ target[:-5]
else:
hist_target = 'Hist_Mean_'+ target
df = DF2.copy()
df['Clim'] = self.scenario
#period of interest is from April to October
df = df.loc[str(yr)+'-04-01':str(yr)+'-10-31']
#length of study period
T = len(df)
#make sure ExtraDC is never less than 0
for i in np.arange(1,T,1):
if df[Extra_target].iloc[i] < 0:
df[Extra_target].iloc[i] = 0
'''
Reliability
Reliability = sum of timesteps Zt/T
Zt is 0 if the target exceeds (U) the historical average and 1 if it does not (S)
'''
Zt = target+'_Zt'
df[Zt] = 1
for i in np.arange(0,T,1):
if df[Extra_target].iloc[i] > 1:
df[Zt].iloc[i] = 0
if df[Extra_target].iloc[i] < 1:
df[Extra_target].iloc[i] = 0
Rel = df[Zt].sum()/T
'''
Resilience
Resilience = sum of timesteps Wt/(T-sum(Zt))
Wt is 1 if Xt is U and Xt+1 is S
'''
Wt = target+'_Wt'
df[Wt]=0
for i in np.arange(1,T,1):
if df[Zt].iloc[i-1] == 0 and df[Zt].iloc[i] == 1:
df[Wt].iloc[i] = 1
#To get in days do 1/Res
Res = 1/((1+df[Wt].iloc[0:T-1].sum())/(T+1-df[Zt].sum()))
'''
Vulnerability
We use Exposure and severity to determine Vulnerability
Exposure DCwater requests > hist ave, WRI_s) is an index from 0-1, WRI_s =1- WR_s/WR_h
Severity is the amount of ExtraDC water, and then normalized based on the
largest value to provide values from 0-1
'''
#Exposure
WSCI_s = target+ '_WSCI_s'
df[WSCI_s] = df[target]/(df[hist_target]+1)
for i in np.arange(0,T,1):
if df[WSCI_s].iloc[i] > 1:
df[WSCI_s].iloc[i] = 1
#This is average exposure
Exp = df[WSCI_s].sum()/T
#Severity
Max_Severity = df[Extra_target].max()
#if MaxSeverity == 0:
# MaxSeverity = 1
#This is the maximum found for all simulations
MaxSeverity = maxseverity
Severity = target+'_Sev'
df[Severity] = df[Extra_target]/MaxSeverity
#This is average severity
Sev = df[Severity].sum()/(T+1-df[Zt].sum())
MaxSevSI = df[Severity].max()*MaxSeverity
MaxSevNorm = df[Severity].max()
Vulnerability = target + '_Vul'
df[Vulnerability] = (0.5*df[WSCI_s])+(0.5*df[Severity])
#Vulerability = Exposure +Severity
Vul = (0.5*Exp) + (0.5*Sev)
self.Rel, self.Res, self.Vul, self.df, self.Max_Severity, self.MaxSevSI, self.MaxSevNorm = Rel, Res, Vul, df, Max_Severity, MaxSevSI, MaxSevNorm
def RRVanalysis(self, Target):
#Get the historical RRV for each target
Breaks_Data = self.RRV_DF.loc[(self.RRV_DF['Model'] == 'Hist') & (self.RRV_DF['Target'] == Target)]
Cat_Data = self.RRV_DF.loc[(self.RRV_DF['Target'] == Target)]
#Find the natural breaks in the RRV
#The eval data set has values greater than the historical and are identified as Nan in the
#eval dataframe. These values will be marked as extreme
VBreaks = jenkspy.jenks_breaks(Breaks_Data['Vulnerability'], nb_class=3)
VBreaks[0] = 0.0
Cat_Data['Jenks_Vul'] = pd.cut(Cat_Data['Vulnerability'],
bins=VBreaks,
labels=['low', 'medium', 'high'],
include_lowest=True)
self.VBreaks = [ np.round(v,2) for v in VBreaks ]
# print(Target, ' Vulnerability Breaks')
# print('Low, Medium, High: ', VBreaks)
SBreaks = jenkspy.jenks_breaks(Breaks_Data['Maximum_Severity'], nb_class=3)
SBreaks[0] = 0.0
Cat_Data['Jenks_Sev'] = pd.cut(Cat_Data['Maximum_Severity'],
bins=SBreaks,
labels=[ 'low', 'medium', 'high'],
include_lowest=True)
self.SBreaks = [np.round(s,2) for s in SBreaks]
# print(Target, ' Severity Breaks')
# print('Low, Medium, High: ' ,SBreaks)
return Cat_Data
def WSM_Pred_RRV_Plot(self):
print('Using the ', self.MDell_Thresh,'% & ', self.LDell_Thresh, '% capacities for Mountain & Little Dell Reservoirs')
print('and the historical daily mean municipal groundwater withdrawal and Deer Creek Reservoir use:')
print('\033[0;32;48m Green \033[0;0m shading suggests satisfactory conditions.')
print('\033[0;31;48m Red \033[0;0m shading suggests unsatisfactory conditions.')
print( ' ')
print('Total volume of Groundwater withdrawal is ', round(sum(self.Analysis[self.GW_Pred])), 'acre-feet')
print('Total volume of Deer Creek water requests is ', round(sum(self.Analysis[self.DC_Pred])), 'acre-feet')
#Set up the target labels
#Mountain Dell
MDell_Hist = 'Hist_Mean_'+ self.MDell
#Little Dell
LDell_Hist = 'Hist_Mean_'+ self.LDell
#GW
GW_Hist = 'Hist_Mean_'+ self.GW
#GW
DC_Hist = 'Hist_Mean_'+ self.DC
af_to_MGD = 271328
if self.units == 'MGD':
self.Analysis[self.GW_Pred] = self.Analysis[self.GW_Pred]*af_to_MGD
self.Analysis[self.DC_Pred] = self.Analysis[self.DC_Pred]*af_to_MGD
self.Analysis[GW_Hist] = self.Analysis[GW_Hist]*af_to_MGD
self.Analysis[DC_Hist] = self.Analysis[DC_Hist]*af_to_MGD
#Define max values
max_LDell = max(max(self.Analysis[self.LDell_Pred]), max(self.Analysis[LDell_Hist]))*1.4
max_MDell = max(max(self.Analysis[self.MDell_Pred]), max(self.Analysis[MDell_Hist]))*1.4
max_GW = max(max(self.Analysis[self.GW_Pred]), max(self.Analysis[GW_Hist]))*1.4
max_DC = max(max(self.Analysis[self.DC_Pred]), max(self.Analysis[DC_Hist]))*1.4
All_RRV = pd.DataFrame()
for targs in self.Target_RRV:
targ = pd.DataFrame(self.Target_RRV[targs][-6:])
All_RRV = All_RRV.append(targ)
#Sort DF to make plots more comprehendable
All_RRV.sort_values(['Climate', 'Target'], ascending=[True, True], inplace=True)
All_RRV = All_RRV.reset_index()
self.All_RRV = All_RRV
components ={'Mtn_Dell_Percent_Full':pd.DataFrame(All_RRV.loc[(All_RRV['Model'] == 'XGB_WSM') & (All_RRV['Target']=='Mtn_Dell_Percent_Full')].copy()),
'LittleDell_Percent_Full':pd.DataFrame(All_RRV.loc[(All_RRV['Model'] == 'XGB_WSM') & (All_RRV['Target']=='LittleDell_Percent_Full')].copy()),
'SLCDPU_GW' : pd.DataFrame(All_RRV.loc[(All_RRV['Model'] == 'XGB_WSM') & (All_RRV['Target']=='SLCDPU_GW')].copy()),
'SLCDPU_DC_Water_Use': pd.DataFrame(All_RRV.loc[(All_RRV['Model'] == 'XGB_WSM') & (All_RRV['Target']=='SLCDPU_DC_Water_Use')].copy())
}
delcols = ['index', 'Climate', 'Target', 'Resilience', 'MaxSeverity', 'Jenks_Vul', 'Jenks_Sev' ]
for comp in components:
components[comp] = components[comp].drop(delcols, axis = 1)
components[comp] = components[comp].set_index('Model')
components[comp] = components[comp].T
self.components = components
# better control over ax
fig, ax = plt.subplots(4, 2)
fig.set_size_inches(12,12)
plt.subplots_adjust(wspace = 0.25, hspace = 0.3)
labelsize = 12
width = 0.7
colors = [ 'blue']
self.Analysis['MDell_Thresh'] =self.MDell_Thresh
self.Analysis['LDell_Thresh'] =self.LDell_Thresh
#PLot Mountain Dell
self.Analysis.plot(y = self.MDell_Pred , ax=ax[0,0], color = 'blue', label = 'Predicted')
self.Analysis.plot(y = MDell_Hist , ax=ax[0,0], color = 'black', label = 'Historical Mean Reservoir Level')
ax[0,0].axhline(y = self.MDell_Thresh, color = 'red', label = 'Unsatifactory Conditions Threshold')
ax[0,0].fill_between(self.Analysis.index.values, self.Analysis[self.MDell_Pred], self.Analysis['MDell_Thresh'], where=self.Analysis[self.MDell_Pred] >= self.Analysis['MDell_Thresh'],
facecolor='green', alpha=0.2, interpolate=True)
ax[0,0].fill_between(self.Analysis.index.values, self.Analysis[self.MDell_Pred], self.Analysis['MDell_Thresh'], where=self.Analysis[self.MDell_Pred] < self.Analysis['MDell_Thresh'],
facecolor='red', alpha=0.2, interpolate=True)
ax[0,0].set_xlabel(' ', size = labelsize)
ax[0,0].set_ylabel('Mountain Dell Reservoir \n Level (%)', size = labelsize)
ax[0,0].set_ylim(0,100)
ax[0,0].legend(bbox_to_anchor=(1,1.5), loc="upper center", ncol = 2, fontsize = 14)
ax[0,0].xaxis.set_major_locator(MonthLocator())
ax[0,0].xaxis.set_major_formatter(DateFormatter('%b'))
ax[0,0].tick_params(axis='both', which='major', labelsize=8)
#Mountain Dell
components[self.MDell].plot.bar(width=width, color=colors, legend=False, ax = ax[0,1])
ax[0,1].set_ylim(0,1)
ax[0,1].axes.xaxis.set_ticklabels([])
#PLot Little Dell
self.Analysis.plot(y = self.LDell_Pred , ax=ax[1,0], color = 'blue', label = 'Predicted')
self.Analysis.plot(y = LDell_Hist , ax=ax[1,0], color = 'black', label = 'Historical Mean Reservoir Level')
ax[1,0].fill_between(self.Analysis.index.values, self.Analysis[self.LDell_Pred], self.Analysis['LDell_Thresh'], where=self.Analysis[self.LDell_Pred] >= self.Analysis['LDell_Thresh'],
facecolor='green', alpha=0.2, interpolate=True)
ax[1,0].fill_between(self.Analysis.index.values, self.Analysis[self.LDell_Pred], self.Analysis['LDell_Thresh'], where=self.Analysis[self.LDell_Pred] < self.Analysis['LDell_Thresh'],
facecolor='red', alpha=0.2, interpolate=True)
ax[1,0].axhline(y = self.LDell_Thresh, color = 'red', label = 'Unsatifactory Conditions Threshold')
ax[1,0].set_xlabel(' ', size = labelsize)
ax[1,0].set_ylabel('Little Dell Reservoir \n Level (%)', size = labelsize)
ax[1,0].set_ylim(0,100)
ax[1,0].legend().set_visible(False)
ax[1,0].xaxis.set_major_locator(MonthLocator())
ax[1,0].xaxis.set_major_formatter(DateFormatter('%b'))
ax[1,0].tick_params(axis='both', which='major', labelsize=8)
#Little Dell
components[self.LDell].plot.bar(width=width, color=colors, legend=False, ax = ax[1,1])
ax[1,1].set_ylim(0,1)
ax[1,1].axes.xaxis.set_ticklabels([])
#PLot GW
self.Analysis.plot(y = self.GW_Pred , ax=ax[2,0], color = 'blue', label = 'Predicted')
self.Analysis.plot(y = GW_Hist , ax=ax[2,0], color = 'red', label = 'Historical')
ax[2,0].fill_between(self.Analysis.index.values, self.Analysis[self.GW_Pred], self.Analysis[GW_Hist], where=self.Analysis[self.GW_Pred] >= self.Analysis[GW_Hist],
facecolor='red', alpha=0.2, interpolate=True)
ax[2,0].fill_between(self.Analysis.index.values, self.Analysis[self.GW_Pred], self.Analysis[GW_Hist], where=self.Analysis[self.GW_Pred] < self.Analysis[GW_Hist],
facecolor='green', alpha=0.2, interpolate=True)
ax[2,0].set_xlabel(' ', size = labelsize)
ax[2,0].set_ylabel('Groundwater Withdrawal \n ('+ self.units+')', size = labelsize)
ax[2,0].set_ylim(0,max_GW)
ax[2,0].legend().set_visible(False)
ax[2,0].xaxis.set_major_locator(MonthLocator())
ax[2,0].xaxis.set_major_formatter(DateFormatter('%b'))
ax[2,0].tick_params(axis='both', which='major', labelsize=8)
#GW
components[self.GW].plot.bar(width=width, color=colors, legend=False, ax = ax[2,1])
ax[2,1].set_ylim(0,1)
ax[2,1].axes.xaxis.set_ticklabels([])
#PLot DC
self.Analysis.plot(y = self.DC_Pred , ax=ax[3,0], color = 'blue', label = 'Predicted')
self.Analysis.plot(y = DC_Hist , ax=ax[3,0], color = 'red', label = 'Historical')
ax[3,0].fill_between(self.Analysis.index.values, self.Analysis[self.DC_Pred], self.Analysis[DC_Hist], where=self.Analysis[self.DC_Pred] >= self.Analysis[DC_Hist],
facecolor='red', alpha=0.2, interpolate=True)
ax[3,0].fill_between(self.Analysis.index.values, self.Analysis[self.DC_Pred], self.Analysis[DC_Hist], where=self.Analysis[self.DC_Pred] < self.Analysis[DC_Hist],
facecolor='green', alpha=0.2, interpolate=True)
ax[3,0].set_xlabel('Time ', size = labelsize)
ax[3,0].set_ylabel('Deer Creek Reservoir \n ('+ self.units+')', size = labelsize)
ax[3,0].set_ylim(0,max_DC)
ax[3,0].legend().set_visible(False)
ax[3,0].xaxis.set_major_locator(MonthLocator())
ax[3,0].xaxis.set_major_formatter(DateFormatter('%b'))
ax[3,0].tick_params(axis='both', which='major', labelsize=8)
#DC
components[self.DC].plot.bar(width=width, color=colors, legend=False, ax = ax[3,1])
ax[3,1].set_ylim(0,1)
ax[3,1].set_xticklabels(["Reliability", "Vulnerability", "Max Severity"], rotation=45)
#plt.title('Production Simulations', size = labelsize+2)
fig.savefig('Figures/'+self.scenario+ '_Analysis.pdf')
plt.show()
| 55,968 | -3 | 789 |
4cdc028ab2ad2c9e2b0be22d389a8f86ff60d74f | 1,197 | py | Python | client/verta/verta/_swagger/_public/modeldb/versioning/model/VersioningPathDatasetComponentBlob.py | CaptEmulation/modeldb | 78b10aca553e386554f9740db63466b1cf013a1a | [
"Apache-2.0"
] | 835 | 2017-02-08T20:14:24.000Z | 2020-03-12T17:37:49.000Z | client/verta/verta/_swagger/_public/modeldb/versioning/model/VersioningPathDatasetComponentBlob.py | CaptEmulation/modeldb | 78b10aca553e386554f9740db63466b1cf013a1a | [
"Apache-2.0"
] | 651 | 2019-04-18T12:55:07.000Z | 2022-03-31T23:45:09.000Z | client/verta/verta/_swagger/_public/modeldb/versioning/model/VersioningPathDatasetComponentBlob.py | CaptEmulation/modeldb | 78b10aca553e386554f9740db63466b1cf013a1a | [
"Apache-2.0"
] | 170 | 2017-02-13T14:49:22.000Z | 2020-02-19T17:59:12.000Z | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
| 24.9375 | 96 | 0.614871 | # THIS FILE IS AUTO-GENERATED. DO NOT EDIT
from verta._swagger.base_type import BaseType
class VersioningPathDatasetComponentBlob(BaseType):
def __init__(self, path=None, size=None, last_modified_at_source=None, sha256=None, md5=None):
required = {
"path": False,
"size": False,
"last_modified_at_source": False,
"sha256": False,
"md5": False,
}
self.path = path
self.size = size
self.last_modified_at_source = last_modified_at_source
self.sha256 = sha256
self.md5 = md5
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
tmp = d.get('path', None)
if tmp is not None:
d['path'] = tmp
tmp = d.get('size', None)
if tmp is not None:
d['size'] = tmp
tmp = d.get('last_modified_at_source', None)
if tmp is not None:
d['last_modified_at_source'] = tmp
tmp = d.get('sha256', None)
if tmp is not None:
d['sha256'] = tmp
tmp = d.get('md5', None)
if tmp is not None:
d['md5'] = tmp
return VersioningPathDatasetComponentBlob(**d)
| 990 | 95 | 23 |
7a18c524a815806fa4b942a9a18257b636f44285 | 26,557 | py | Python | app/settings.py | vpont/DemonEditor | 8fee5033a49e21f960d89d6ce9101b0f84a8d354 | [
"MIT"
] | null | null | null | app/settings.py | vpont/DemonEditor | 8fee5033a49e21f960d89d6ce9101b0f84a8d354 | [
"MIT"
] | null | null | null | app/settings.py | vpont/DemonEditor | 8fee5033a49e21f960d89d6ce9101b0f84a8d354 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2022 Dmitriy Yefremov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Dmitriy Yefremov
#
import copy
import json
import locale
import os
import sys
from enum import Enum, IntEnum
from functools import lru_cache
from pathlib import Path
from pprint import pformat
from textwrap import dedent
SEP = os.sep
HOME_PATH = str(Path.home())
CONFIG_PATH = HOME_PATH + "{}.config{}demon-editor{}".format(SEP, SEP, SEP)
CONFIG_FILE = CONFIG_PATH + "config.json"
DATA_PATH = HOME_PATH + "{}DemonEditor{}".format(SEP, SEP)
GTK_PATH = os.environ.get("GTK_PATH", None)
IS_DARWIN = sys.platform == "darwin"
IS_WIN = sys.platform == "win32"
IS_LINUX = sys.platform == "linux"
class Defaults(Enum):
""" Default program settings """
USER = "root"
PASSWORD = ""
HOST = "127.0.0.1"
FTP_PORT = "21"
HTTP_PORT = "80"
TELNET_PORT = "23"
HTTP_USE_SSL = False
# Enigma2.
BOX_SERVICES_PATH = "/etc/enigma2/"
BOX_SATELLITE_PATH = "/etc/tuxbox/"
BOX_PICON_PATH = "/usr/share/enigma2/picon/"
BOX_PICON_PATHS = ("/usr/share/enigma2/picon/",
"/media/hdd/picon/",
"/media/usb/picon/",
"/media/mmc/picon/",
"/media/cf/picon/")
# Neutrino.
NEUTRINO_BOX_SERVICES_PATH = "/var/tuxbox/config/zapit/"
NEUTRINO_BOX_SATELLITE_PATH = "/var/tuxbox/config/"
NEUTRINO_BOX_PICON_PATH = "/usr/share/tuxbox/neutrino/icons/logo/"
NEUTRINO_BOX_PICON_PATHS = ("/usr/share/tuxbox/neutrino/icons/logo/",)
# Paths.
BACKUP_PATH = "{}backup{}".format(DATA_PATH, SEP)
PICON_PATH = "{}picons{}".format(DATA_PATH, SEP)
DEFAULT_PROFILE = "default"
BACKUP_BEFORE_DOWNLOADING = True
BACKUP_BEFORE_SAVE = True
V5_SUPPORT = False
FORCE_BQ_NAMES = False
HTTP_API_SUPPORT = True
ENABLE_YT_DL = False
ENABLE_SEND_TO = False
USE_COLORS = True
NEW_COLOR = "rgb(255,230,204)"
EXTRA_COLOR = "rgb(179,230,204)"
TOOLTIP_LOGO_SIZE = 96
LIST_PICON_SIZE = 32
FAV_CLICK_MODE = 0
PLAY_STREAMS_MODE = 1 if IS_DARWIN else 0
STREAM_LIB = "mpv" if IS_WIN else "vlc"
MAIN_LIST_PLAYBACK = False
PROFILE_FOLDER_DEFAULT = False
RECORDS_PATH = DATA_PATH + "records{}".format(SEP)
ACTIVATE_TRANSCODING = False
ACTIVE_TRANSCODING_PRESET = "720p TV{}device".format(SEP)
class SettingsType(IntEnum):
""" Profiles for settings """
ENIGMA_2 = 0
NEUTRINO_MP = 1
def get_default_settings(self):
""" Returns default settings for current type. """
if self is self.ENIGMA_2:
srv_path = Defaults.BOX_SERVICES_PATH.value
sat_path = Defaults.BOX_SATELLITE_PATH.value
picons_path = Defaults.BOX_PICON_PATH.value
http_timeout = 5
telnet_timeout = 5
else:
srv_path = Defaults.NEUTRINO_BOX_SERVICES_PATH.value
sat_path = Defaults.NEUTRINO_BOX_SATELLITE_PATH.value
picons_path = Defaults.NEUTRINO_BOX_PICON_PATH.value
http_timeout = 2
telnet_timeout = 1
return {"setting_type": self.value,
"host": Defaults.HOST.value,
"port": Defaults.FTP_PORT.value,
"timeout": 5,
"user": Defaults.USER.value,
"password": Defaults.PASSWORD.value,
"http_port": Defaults.HTTP_PORT.value,
"http_timeout": http_timeout,
"http_use_ssl": Defaults.HTTP_USE_SSL.value,
"telnet_port": Defaults.TELNET_PORT.value,
"telnet_timeout": telnet_timeout,
"services_path": srv_path,
"user_bouquet_path": srv_path,
"satellites_xml_path": sat_path,
"picons_path": picons_path}
class PlayStreamsMode(IntEnum):
""" Behavior mode when opening streams. """
BUILT_IN = 0
WINDOW = 1
M3U = 2
if __name__ == "__main__":
pass
| 31.317217 | 119 | 0.661633 | # -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2022 Dmitriy Yefremov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author: Dmitriy Yefremov
#
import copy
import json
import locale
import os
import sys
from enum import Enum, IntEnum
from functools import lru_cache
from pathlib import Path
from pprint import pformat
from textwrap import dedent
SEP = os.sep
HOME_PATH = str(Path.home())
CONFIG_PATH = HOME_PATH + "{}.config{}demon-editor{}".format(SEP, SEP, SEP)
CONFIG_FILE = CONFIG_PATH + "config.json"
DATA_PATH = HOME_PATH + "{}DemonEditor{}".format(SEP, SEP)
GTK_PATH = os.environ.get("GTK_PATH", None)
IS_DARWIN = sys.platform == "darwin"
IS_WIN = sys.platform == "win32"
IS_LINUX = sys.platform == "linux"
class Defaults(Enum):
""" Default program settings """
USER = "root"
PASSWORD = ""
HOST = "127.0.0.1"
FTP_PORT = "21"
HTTP_PORT = "80"
TELNET_PORT = "23"
HTTP_USE_SSL = False
# Enigma2.
BOX_SERVICES_PATH = "/etc/enigma2/"
BOX_SATELLITE_PATH = "/etc/tuxbox/"
BOX_PICON_PATH = "/usr/share/enigma2/picon/"
BOX_PICON_PATHS = ("/usr/share/enigma2/picon/",
"/media/hdd/picon/",
"/media/usb/picon/",
"/media/mmc/picon/",
"/media/cf/picon/")
# Neutrino.
NEUTRINO_BOX_SERVICES_PATH = "/var/tuxbox/config/zapit/"
NEUTRINO_BOX_SATELLITE_PATH = "/var/tuxbox/config/"
NEUTRINO_BOX_PICON_PATH = "/usr/share/tuxbox/neutrino/icons/logo/"
NEUTRINO_BOX_PICON_PATHS = ("/usr/share/tuxbox/neutrino/icons/logo/",)
# Paths.
BACKUP_PATH = "{}backup{}".format(DATA_PATH, SEP)
PICON_PATH = "{}picons{}".format(DATA_PATH, SEP)
DEFAULT_PROFILE = "default"
BACKUP_BEFORE_DOWNLOADING = True
BACKUP_BEFORE_SAVE = True
V5_SUPPORT = False
FORCE_BQ_NAMES = False
HTTP_API_SUPPORT = True
ENABLE_YT_DL = False
ENABLE_SEND_TO = False
USE_COLORS = True
NEW_COLOR = "rgb(255,230,204)"
EXTRA_COLOR = "rgb(179,230,204)"
TOOLTIP_LOGO_SIZE = 96
LIST_PICON_SIZE = 32
FAV_CLICK_MODE = 0
PLAY_STREAMS_MODE = 1 if IS_DARWIN else 0
STREAM_LIB = "mpv" if IS_WIN else "vlc"
MAIN_LIST_PLAYBACK = False
PROFILE_FOLDER_DEFAULT = False
RECORDS_PATH = DATA_PATH + "records{}".format(SEP)
ACTIVATE_TRANSCODING = False
ACTIVE_TRANSCODING_PRESET = "720p TV{}device".format(SEP)
class SettingsType(IntEnum):
""" Profiles for settings """
ENIGMA_2 = 0
NEUTRINO_MP = 1
def get_default_settings(self):
""" Returns default settings for current type. """
if self is self.ENIGMA_2:
srv_path = Defaults.BOX_SERVICES_PATH.value
sat_path = Defaults.BOX_SATELLITE_PATH.value
picons_path = Defaults.BOX_PICON_PATH.value
http_timeout = 5
telnet_timeout = 5
else:
srv_path = Defaults.NEUTRINO_BOX_SERVICES_PATH.value
sat_path = Defaults.NEUTRINO_BOX_SATELLITE_PATH.value
picons_path = Defaults.NEUTRINO_BOX_PICON_PATH.value
http_timeout = 2
telnet_timeout = 1
return {"setting_type": self.value,
"host": Defaults.HOST.value,
"port": Defaults.FTP_PORT.value,
"timeout": 5,
"user": Defaults.USER.value,
"password": Defaults.PASSWORD.value,
"http_port": Defaults.HTTP_PORT.value,
"http_timeout": http_timeout,
"http_use_ssl": Defaults.HTTP_USE_SSL.value,
"telnet_port": Defaults.TELNET_PORT.value,
"telnet_timeout": telnet_timeout,
"services_path": srv_path,
"user_bouquet_path": srv_path,
"satellites_xml_path": sat_path,
"picons_path": picons_path}
class SettingsException(Exception):
pass
class SettingsReadException(SettingsException):
pass
class PlayStreamsMode(IntEnum):
""" Behavior mode when opening streams. """
BUILT_IN = 0
WINDOW = 1
M3U = 2
class Settings:
__INSTANCE = None
__VERSION = 2
def __init__(self, ext_settings=None):
try:
settings = ext_settings or self.get_settings()
except PermissionError as e:
raise SettingsReadException(e)
if self.__VERSION > settings.get("version", 0):
raise SettingsException("Outdated version of the settings format!")
self._settings = settings
self._current_profile = self._settings.get("default_profile", "default")
self._profiles = self._settings.get("profiles", {"default": SettingsType.ENIGMA_2.get_default_settings()})
self._cp_settings = self._profiles.get(self._current_profile, None) # Current profile settings
if not self._cp_settings:
raise SettingsException("Error reading settings [current profile].")
def __str__(self):
return dedent(""" Current profile: {}
Current profile options:
{}
Full config:
{}
""").format(self._current_profile,
pformat(self._cp_settings),
pformat(self._settings))
@classmethod
def get_instance(cls):
if not cls.__INSTANCE:
cls.__INSTANCE = Settings()
return cls.__INSTANCE
def save(self):
self.write_settings(self._settings)
def reset(self, force_write=False):
for k, v in self.setting_type.get_default_settings().items():
self._cp_settings[k] = v
if force_write:
self.save()
@staticmethod
def reset_to_default():
Settings.write_settings(Settings.get_default_settings())
def get_default(self, p_name):
""" Returns default value for current settings type """
return self.setting_type.get_default_settings().get(p_name)
def add(self, name, value):
""" Adds extra options """
self._settings[name] = value
def get(self, name, default=None):
""" Returns extra options or None """
return self._settings.get(name, default)
@property
def settings(self):
""" Returns copy of the current settings! """
return copy.deepcopy(self._settings)
@settings.setter
def settings(self, value):
""" Sets copy of the settings! """
self._settings = copy.deepcopy(value)
@property
def current_profile(self):
return self._current_profile
@current_profile.setter
def current_profile(self, value):
self._current_profile = value
self._cp_settings = self._profiles.get(self._current_profile)
@property
def default_profile(self):
return self._settings.get("default_profile", "default")
@default_profile.setter
def default_profile(self, value):
self._settings["default_profile"] = value
@property
def current_profile_settings(self):
return self._cp_settings
@property
def profiles(self):
return self._profiles
@profiles.setter
def profiles(self, ps):
self._profiles = ps
self._settings["profiles"] = self._profiles
@property
def setting_type(self):
return SettingsType(self._cp_settings.get("setting_type", SettingsType.ENIGMA_2.value))
@setting_type.setter
def setting_type(self, s_type):
self._cp_settings["setting_type"] = s_type.value
# ******* Network ******** #
@property
def host(self):
return self._cp_settings.get("host", self.get_default("host"))
@host.setter
def host(self, value):
self._cp_settings["host"] = value
@property
def port(self):
return self._cp_settings.get("port", self.get_default("port"))
@port.setter
def port(self, value):
self._cp_settings["port"] = value
@property
def user(self):
return self._cp_settings.get("user", self.get_default("user"))
@user.setter
def user(self, value):
self._cp_settings["user"] = value
@property
def password(self):
return self._cp_settings.get("password", self.get_default("password"))
@password.setter
def password(self, value):
self._cp_settings["password"] = value
@property
def http_port(self):
return self._cp_settings.get("http_port", self.get_default("http_port"))
@http_port.setter
def http_port(self, value):
self._cp_settings["http_port"] = value
@property
def http_timeout(self):
return self._cp_settings.get("http_timeout", self.get_default("http_timeout"))
@http_timeout.setter
def http_timeout(self, value):
self._cp_settings["http_timeout"] = value
@property
def http_use_ssl(self):
return self._cp_settings.get("http_use_ssl", self.get_default("http_use_ssl"))
@http_use_ssl.setter
def http_use_ssl(self, value):
self._cp_settings["http_use_ssl"] = value
@property
def telnet_port(self):
return self._cp_settings.get("telnet_port", self.get_default("telnet_port"))
@telnet_port.setter
def telnet_port(self, value):
self._cp_settings["telnet_port"] = value
@property
def telnet_timeout(self):
return self._cp_settings.get("telnet_timeout", self.get_default("telnet_timeout"))
@telnet_timeout.setter
def telnet_timeout(self, value):
self._cp_settings["telnet_timeout"] = value
@property
def services_path(self):
return self._cp_settings.get("services_path", self.get_default("services_path"))
@services_path.setter
def services_path(self, value):
self._cp_settings["services_path"] = value
@property
def user_bouquet_path(self):
return self._cp_settings.get("user_bouquet_path", self.get_default("user_bouquet_path"))
@user_bouquet_path.setter
def user_bouquet_path(self, value):
self._cp_settings["user_bouquet_path"] = value
@property
def satellites_xml_path(self):
return self._cp_settings.get("satellites_xml_path", self.get_default("satellites_xml_path"))
@satellites_xml_path.setter
def satellites_xml_path(self, value):
self._cp_settings["satellites_xml_path"] = value
@property
def picons_path(self):
return self._cp_settings.get("picons_path", self.get_default("picons_path"))
@picons_path.setter
def picons_path(self, value):
self._cp_settings["picons_path"] = value
@property
def picons_paths(self):
if self.setting_type is SettingsType.NEUTRINO_MP:
return self._settings.get("neutrino_picon_paths", Defaults.NEUTRINO_BOX_PICON_PATHS.value)
else:
return self._settings.get("picon_paths", Defaults.BOX_PICON_PATHS.value)
@picons_paths.setter
def picons_paths(self, value):
if self.setting_type is SettingsType.NEUTRINO_MP:
self._settings["neutrino_picon_paths"] = value
else:
self._settings["picon_paths"] = value
# ***** Local paths ***** #
@property
def profile_folder_is_default(self):
return self._settings.get("profile_folder_is_default", Defaults.PROFILE_FOLDER_DEFAULT.value)
@profile_folder_is_default.setter
def profile_folder_is_default(self, value):
self._settings["profile_folder_is_default"] = value
@property
def default_data_path(self):
return self._settings.get("default_data_path", DATA_PATH)
@default_data_path.setter
def default_data_path(self, value):
self._settings["default_data_path"] = value
@property
def default_backup_path(self):
return self._settings.get("default_backup_path", Defaults.BACKUP_PATH.value)
@default_backup_path.setter
def default_backup_path(self, value):
self._settings["default_backup_path"] = value
@property
def default_picon_path(self):
return self._settings.get("default_picon_path", Defaults.PICON_PATH.value)
@default_picon_path.setter
def default_picon_path(self, value):
self._settings["default_picon_path"] = value
@property
def profile_data_path(self):
return f"{self.default_data_path}data{SEP}{self._current_profile}{SEP}"
@profile_data_path.setter
def profile_data_path(self, value):
self._cp_settings["profile_data_path"] = value
@property
def profile_picons_path(self):
if self.profile_folder_is_default:
return f"{self.profile_data_path}picons{SEP}"
return f"{self.default_picon_path}{self._current_profile}{SEP}"
@profile_picons_path.setter
def profile_picons_path(self, value):
self._cp_settings["profile_picons_path"] = value
@property
def profile_backup_path(self):
if self.profile_folder_is_default:
return f"{self.profile_data_path}backup{SEP}"
return f"{self.default_backup_path}{self._current_profile}{SEP}"
@profile_backup_path.setter
def profile_backup_path(self, value):
self._cp_settings["profile_backup_path"] = value
@property
def records_path(self):
return self._settings.get("records_path", Defaults.RECORDS_PATH.value)
@records_path.setter
def records_path(self, value):
self._settings["records_path"] = value
# ******** Streaming ********* #
@property
def activate_transcoding(self):
return self._settings.get("activate_transcoding", Defaults.ACTIVATE_TRANSCODING.value)
@activate_transcoding.setter
def activate_transcoding(self, value):
self._settings["activate_transcoding"] = value
@property
def active_preset(self):
return self._settings.get("active_preset", Defaults.ACTIVE_TRANSCODING_PRESET.value)
@active_preset.setter
def active_preset(self, value):
self._settings["active_preset"] = value
@property
def transcoding_presets(self):
return self._settings.get("transcoding_presets", self.get_default_transcoding_presets())
@transcoding_presets.setter
def transcoding_presets(self, value):
self._settings["transcoding_presets"] = value
@property
def play_streams_mode(self):
return PlayStreamsMode(self._settings.get("play_streams_mode", Defaults.PLAY_STREAMS_MODE.value))
@play_streams_mode.setter
def play_streams_mode(self, value):
self._settings["play_streams_mode"] = value
@property
def stream_lib(self):
return self._settings.get("stream_lib", Defaults.STREAM_LIB.value)
@stream_lib.setter
def stream_lib(self, value):
self._settings["stream_lib"] = value
@property
def fav_click_mode(self):
return self._settings.get("fav_click_mode", Defaults.FAV_CLICK_MODE.value)
@fav_click_mode.setter
def fav_click_mode(self, value):
self._settings["fav_click_mode"] = value
@property
def main_list_playback(self):
return self._settings.get("main_list_playback", Defaults.MAIN_LIST_PLAYBACK.value)
@main_list_playback.setter
def main_list_playback(self, value):
self._settings["main_list_playback"] = value
# *********** EPG ************ #
@property
def epg_options(self):
""" Options used by the EPG dialog. """
return self._cp_settings.get("epg_options", None)
@epg_options.setter
def epg_options(self, value):
self._cp_settings["epg_options"] = value
# *********** FTP ************ #
@property
def ftp_bookmarks(self):
return self._cp_settings.get("ftp_bookmarks", [])
@ftp_bookmarks.setter
def ftp_bookmarks(self, value):
self._cp_settings["ftp_bookmarks"] = value
# ***** Program settings ***** #
@property
def backup_before_save(self):
return self._settings.get("backup_before_save", Defaults.BACKUP_BEFORE_SAVE.value)
@backup_before_save.setter
def backup_before_save(self, value):
self._settings["backup_before_save"] = value
@property
def backup_before_downloading(self):
return self._settings.get("backup_before_downloading", Defaults.BACKUP_BEFORE_DOWNLOADING.value)
@backup_before_downloading.setter
def backup_before_downloading(self, value):
self._settings["backup_before_downloading"] = value
@property
def v5_support(self):
return self._settings.get("v5_support", Defaults.V5_SUPPORT.value)
@v5_support.setter
def v5_support(self, value):
self._settings["v5_support"] = value
@property
def force_bq_names(self):
return self._settings.get("force_bq_names", Defaults.FORCE_BQ_NAMES.value)
@force_bq_names.setter
def force_bq_names(self, value):
self._settings["force_bq_names"] = value
@property
def http_api_support(self):
return self._settings.get("http_api_support", Defaults.HTTP_API_SUPPORT.value)
@http_api_support.setter
def http_api_support(self, value):
self._settings["http_api_support"] = value
@property
def enable_yt_dl(self):
return self._settings.get("enable_yt_dl", Defaults.ENABLE_YT_DL.value)
@enable_yt_dl.setter
def enable_yt_dl(self, value):
self._settings["enable_yt_dl"] = value
@property
def enable_yt_dl_update(self):
return self._settings.get("enable_yt_dl_update", Defaults.ENABLE_YT_DL.value)
@enable_yt_dl_update.setter
def enable_yt_dl_update(self, value):
self._settings["enable_yt_dl_update"] = value
@property
def enable_send_to(self):
return self._settings.get("enable_send_to", Defaults.ENABLE_SEND_TO.value)
@enable_send_to.setter
def enable_send_to(self, value):
self._settings["enable_send_to"] = value
@property
def language(self):
return self._settings.get("language", locale.getlocale()[0] or "en_US")
@language.setter
def language(self, value):
self._settings["language"] = value
@property
def load_last_config(self):
return self._settings.get("load_last_config", False)
@load_last_config.setter
def load_last_config(self, value):
self._settings["load_last_config"] = value
@property
def show_srv_hints(self):
""" Show short info as hints in the main services list. """
return self._settings.get("show_srv_hints", True)
@show_srv_hints.setter
def show_srv_hints(self, value):
self._settings["show_srv_hints"] = value
@property
def show_bq_hints(self):
""" Show detailed info as hints in the bouquet list. """
return self._settings.get("show_bq_hints", True)
@show_bq_hints.setter
def show_bq_hints(self, value):
self._settings["show_bq_hints"] = value
# *********** Appearance *********** #
@property
def list_font(self):
return self._settings.get("list_font", "")
@list_font.setter
def list_font(self, value):
self._settings["list_font"] = value
@property
def list_picon_size(self):
return self._settings.get("list_picon_size", Defaults.LIST_PICON_SIZE.value)
@list_picon_size.setter
def list_picon_size(self, value):
self._settings["list_picon_size"] = value
@property
def tooltip_logo_size(self):
return self._settings.get("tooltip_logo_size", Defaults.TOOLTIP_LOGO_SIZE.value)
@tooltip_logo_size.setter
def tooltip_logo_size(self, value):
self._settings["tooltip_logo_size"] = value
@property
def use_colors(self):
return self._settings.get("use_colors", Defaults.USE_COLORS.value)
@use_colors.setter
def use_colors(self, value):
self._settings["use_colors"] = value
@property
def new_color(self):
return self._settings.get("new_color", Defaults.NEW_COLOR.value)
@new_color.setter
def new_color(self, value):
self._settings["new_color"] = value
@property
def extra_color(self):
return self._settings.get("extra_color", Defaults.EXTRA_COLOR.value)
@extra_color.setter
def extra_color(self, value):
self._settings["extra_color"] = value
@property
def dark_mode(self):
if IS_DARWIN:
import subprocess
cmd = ["defaults", "read", "-g", "AppleInterfaceStyle"]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
return "Dark" in str(p[0])
return self._settings.get("dark_mode", False)
@dark_mode.setter
def dark_mode(self, value):
self._settings["dark_mode"] = value
@property
def display_picons(self):
return self._settings.get("display_picons", True)
@display_picons.setter
def display_picons(self, value):
self._settings["display_picons"] = value
@property
def alternate_layout(self):
return self._settings.get("alternate_layout", IS_DARWIN)
@alternate_layout.setter
def alternate_layout(self, value):
self._settings["alternate_layout"] = value
@property
def bq_details_first(self):
return self._settings.get("bq_details_first", False)
@bq_details_first.setter
def bq_details_first(self, value):
self._settings["bq_details_first"] = value
@property
def is_themes_support(self):
return self._settings.get("is_themes_support", False)
@is_themes_support.setter
def is_themes_support(self, value):
self._settings["is_themes_support"] = value
@property
def theme(self):
return self._settings.get("theme", "Default")
@theme.setter
def theme(self, value):
self._settings["theme"] = value
@property
@lru_cache(1)
def themes_path(self):
return f"{HOME_PATH}{SEP}.themes{SEP}"
@property
def icon_theme(self):
return self._settings.get("icon_theme", "Adwaita")
@icon_theme.setter
def icon_theme(self, value):
self._settings["icon_theme"] = value
@property
@lru_cache(1)
def icon_themes_path(self):
return f"{HOME_PATH}{SEP}.icons{SEP}"
@property
def is_darwin(self):
return IS_DARWIN
# *********** Download dialog *********** #
@property
def use_http(self):
return self._settings.get("use_http", True)
@use_http.setter
def use_http(self, value):
self._settings["use_http"] = value
@property
def remove_unused_bouquets(self):
return self._settings.get("remove_unused_bouquets", True)
@remove_unused_bouquets.setter
def remove_unused_bouquets(self, value):
self._settings["remove_unused_bouquets"] = value
# **************** Debug **************** #
@property
def debug_mode(self):
return self._settings.get("debug_mode", False)
@debug_mode.setter
def debug_mode(self, value):
self._settings["debug_mode"] = value
# **************** Experimental **************** #
@property
def is_enable_experimental(self):
""" Allows experimental functionality. """
return self._settings.get("enable_experimental", False)
@is_enable_experimental.setter
def is_enable_experimental(self, value):
self._settings["enable_experimental"] = value
# **************** Get-Set settings **************** #
@staticmethod
def get_settings():
if not os.path.isfile(CONFIG_FILE) or os.stat(CONFIG_FILE).st_size == 0:
Settings.write_settings(Settings.get_default_settings())
with open(CONFIG_FILE, "r", encoding="utf-8") as config_file:
try:
return json.load(config_file)
except ValueError as e:
raise SettingsReadException(e)
@staticmethod
def get_default_settings(profile_name="default"):
def_settings = SettingsType.ENIGMA_2.get_default_settings()
return {
"version": Settings.__VERSION,
"default_profile": Defaults.DEFAULT_PROFILE.value,
"profiles": {profile_name: def_settings},
"v5_support": Defaults.V5_SUPPORT.value,
"http_api_support": Defaults.HTTP_API_SUPPORT.value,
"enable_yt_dl": Defaults.ENABLE_YT_DL.value,
"enable_send_to": Defaults.ENABLE_SEND_TO.value,
"use_colors": Defaults.USE_COLORS.value,
"new_color": Defaults.NEW_COLOR.value,
"extra_color": Defaults.EXTRA_COLOR.value,
"fav_click_mode": Defaults.FAV_CLICK_MODE.value,
"profile_folder_is_default": Defaults.PROFILE_FOLDER_DEFAULT.value,
"records_path": Defaults.RECORDS_PATH.value
}
@staticmethod
def get_default_transcoding_presets():
return {"720p TV/device": {"vcodec": "h264", "vb": "1500", "width": "1280", "height": "720", "acodec": "mp3",
"ab": "192", "channels": "2", "samplerate": "44100", "scodec": "none"},
"1080p TV/device": {"vcodec": "h264", "vb": "3500", "width": "1920", "height": "1080", "acodec": "mp3",
"ab": "192", "channels": "2", "samplerate": "44100", "scodec": "none"}}
@staticmethod
def write_settings(config):
os.makedirs(os.path.dirname(CONFIG_PATH), exist_ok=True)
with open(CONFIG_FILE, "w", encoding="utf-8") as config_file:
json.dump(config, config_file, indent=" ")
if __name__ == "__main__":
pass
| 13,041 | 8,387 | 69 |
f0500e91bd245db4ab04b7cb09749c51d33607a3 | 549 | py | Python | resources/migrations/0002_auto_20201222_0951.py | alimustafashah/core | 7280c4ca2e88d700ad35af05fbe0766e9ad8e5b4 | [
"MIT"
] | null | null | null | resources/migrations/0002_auto_20201222_0951.py | alimustafashah/core | 7280c4ca2e88d700ad35af05fbe0766e9ad8e5b4 | [
"MIT"
] | 24 | 2021-04-29T18:58:51.000Z | 2021-08-06T23:07:03.000Z | resources/migrations/0002_auto_20201222_0951.py | alimustafashah/core | 7280c4ca2e88d700ad35af05fbe0766e9ad8e5b4 | [
"MIT"
] | 2 | 2021-04-29T23:03:55.000Z | 2021-04-29T23:43:52.000Z | # Generated by Django 3.1.4 on 2020-12-22 09:51
from django.db import migrations, models
| 22.875 | 58 | 0.575592 | # Generated by Django 3.1.4 on 2020-12-22 09:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("resources", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="jobposting",
name="expiry_date",
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name="jobposting",
name="url",
field=models.CharField(max_length=300),
),
]
| 0 | 435 | 23 |
0566eeac1f8eb2659142327625a844985098839c | 1,786 | py | Python | opencon/application/resources.py | sparcopen/opencon-2017-app-code | c70cb929ebd931b1ad991eaf63df10b47e080989 | [
"MIT"
] | null | null | null | opencon/application/resources.py | sparcopen/opencon-2017-app-code | c70cb929ebd931b1ad991eaf63df10b47e080989 | [
"MIT"
] | null | null | null | opencon/application/resources.py | sparcopen/opencon-2017-app-code | c70cb929ebd931b1ad991eaf63df10b47e080989 | [
"MIT"
] | null | null | null | from import_export import resources
from .models import Application2017, Draft
from opencon.rating.models import User, Round0Rating, Round1Rating, Round2Rating
| 33.074074 | 188 | 0.695409 | from import_export import resources
from .models import Application2017, Draft
from opencon.rating.models import User, Round0Rating, Round1Rating, Round2Rating
class Application2017Resource(resources.ModelResource):
class Meta:
model = Application2017
fields = 'id email created_at citizenship residence profession experience field gender engagement referred_by need_rating0 need_rating1 need_rating2 rating1 rating2 status'.split()
# fields = ('id', 'name', 'author', 'price',)
# exclude = ('tags',)
# export_order = ('id', 'price', 'author', 'name',)
# #todo -- check "orcid", resolve "status_by"
def get_queryset(self):
return self._meta.model.objects.order_by('id') # sort
class DraftResource(resources.ModelResource):
class Meta:
model = Draft
def get_queryset(self):
return self._meta.model.objects.order_by('id') # sort
class UserResource(resources.ModelResource):
class Meta:
model = User
def get_queryset(self):
return self._meta.model.objects.order_by('id') # sort
class Round0RatingResource(resources.ModelResource):
class Meta:
model = Round0Rating
fields = 'id decision application created_by'.split()
def get_queryset(self):
return self._meta.model.objects.order_by('id') # sort
class Round1RatingResource(resources.ModelResource):
class Meta:
model = Round1Rating
fields = 'id rating application created_by'.split()
def get_queryset(self):
return self._meta.model.objects.order_by('id') # sort
class Round2RatingResource(resources.ModelResource):
class Meta:
model = Round2Rating
def get_queryset(self):
return self._meta.model.objects.order_by('id') # sort
| 384 | 1,104 | 138 |
132a2bbebdec9db189b8912b27d98ad701f033a6 | 1,286 | py | Python | zhihu/scrapy_redis/BloomfilterOnRedis.py | jfzhang95/zhihu_spider | bf01e1c584302750a749401118291c909dea0d28 | [
"Apache-2.0"
] | null | null | null | zhihu/scrapy_redis/BloomfilterOnRedis.py | jfzhang95/zhihu_spider | bf01e1c584302750a749401118291c909dea0d28 | [
"Apache-2.0"
] | null | null | null | zhihu/scrapy_redis/BloomfilterOnRedis.py | jfzhang95/zhihu_spider | bf01e1c584302750a749401118291c909dea0d28 | [
"Apache-2.0"
] | 1 | 2020-05-16T06:56:59.000Z | 2020-05-16T06:56:59.000Z | # -*- coding: utf-8 -*-
| 29.227273 | 70 | 0.550544 | # -*- coding: utf-8 -*-
class SimpleHash(object):
def __init__(self, cap, seed):
self.cap = cap
self.seed = seed
def hash(self, value):
ret = 0
for i in range(len(value)):
ret += self.seed * ret + ord(value[i])
return (self.cap - 1) & ret
class BloomFilter(object):
def __init__(self, server, key, blockNum=1):
self.bit_size = 1 << 31 # Redis的String类型最大容量为512M,现使用256M
self.seeds = [5, 7, 11, 13, 31]
# self.seeds = [5, 7, 11, 13, 31, 37, 61]
self.server = server
self.key = key
self.blockNum = blockNum
self.hashfunc = []
for seed in self.seeds:
self.hashfunc.append(SimpleHash(self.bit_size, seed))
def isContains(self, str_input):
if not str_input:
return False
ret = True
name = self.key + str(int(str_input[0:2], 16) % self.blockNum)
for f in self.hashfunc:
loc = f.hash(str_input)
ret = ret & self.server.getbit(name, loc)
return ret
def insert(self, str_input):
name = self.key + str(int(str_input[0:2], 16) % self.blockNum)
for f in self.hashfunc:
loc = f.hash(str_input)
self.server.setbit(name, loc, 1) | 1,096 | 9 | 179 |
993fb1bdcfe85063571e39e15677dc3700dd5abe | 363 | py | Python | algs-py/AssemblyLineScheduling.py | kliner/funCode | e4ba2e6484478e4d33746393e3163fa36fffbb9e | [
"MIT"
] | 1 | 2017-02-13T14:46:52.000Z | 2017-02-13T14:46:52.000Z | algs-py/AssemblyLineScheduling.py | kliner/funCode | e4ba2e6484478e4d33746393e3163fa36fffbb9e | [
"MIT"
] | null | null | null | algs-py/AssemblyLineScheduling.py | kliner/funCode | e4ba2e6484478e4d33746393e3163fa36fffbb9e | [
"MIT"
] | null | null | null |
a = [[4, 5, 3, 2], [2, 10, 1, 4]]
t = [[0, 7, 4, 5], [0, 9, 2, 8]]
e = [10, 12]
x = [18, 7]
print carAssembly(a,t,e,x)
| 24.2 | 89 | 0.421488 | def carAssembly(a, t, e, x):
n = len(a[0])
T1, T2 = e[0]+a[0][0], e[1]+a[1][0]
for i in xrange(1, n):
T1, T2 = min(T1+a[0][i], T2+a[0][i]+t[1][i]), min(T1+a[1][i]+t[0][i], T2+a[1][i])
return min(T1+x[0], T2+x[1])
a = [[4, 5, 3, 2], [2, 10, 1, 4]]
t = [[0, 7, 4, 5], [0, 9, 2, 8]]
e = [10, 12]
x = [18, 7]
print carAssembly(a,t,e,x)
| 215 | 0 | 22 |
4a7263faf5239c213d9653a2d19c403cc06c3d0a | 1,487 | py | Python | cms/test_utils/project/pluginapp/plugins/manytomany_rel/models.py | Mario-Kart-Felix/django-cms | 6d68439fe7fd59d000f99e27c1f2135a3f9c816a | [
"BSD-3-Clause"
] | 1 | 2021-02-11T16:20:01.000Z | 2021-02-11T16:20:01.000Z | cms/test_utils/project/pluginapp/plugins/manytomany_rel/models.py | rpep/django-cms | 53dddb106f45963f9f8393d434b4313fa3bbdf54 | [
"BSD-3-Clause"
] | 2 | 2020-10-28T13:48:53.000Z | 2020-10-28T13:52:48.000Z | cms/test_utils/project/pluginapp/plugins/manytomany_rel/models.py | rpep/django-cms | 53dddb106f45963f9f8393d434b4313fa3bbdf54 | [
"BSD-3-Clause"
] | 1 | 2021-07-26T14:43:54.000Z | 2021-07-26T14:43:54.000Z | from django.db import models
from cms.models import CMSPlugin
###
| 24.783333 | 88 | 0.708137 | from django.db import models
from cms.models import CMSPlugin
class Article(models.Model):
title = models.CharField(max_length=50)
section = models.ForeignKey('Section', on_delete=models.CASCADE)
def __str__(self):
return u"%s -- %s" % (self.title, self.section)
class Section(models.Model):
name = models.CharField(max_length=50)
def __str__(self):
return self.name
class ArticlePluginModel(CMSPlugin):
title = models.CharField(max_length=50)
sections = models.ManyToManyField('Section')
def __str__(self):
return self.title
def copy_relations(self, oldinstance):
self.sections.set(oldinstance.sections.all())
###
class FKModel(models.Model):
fk_field = models.ForeignKey('PluginModelWithFKFromModel', on_delete=models.CASCADE)
class M2MTargetModel(models.Model):
title = models.CharField(max_length=50)
class PluginModelWithFKFromModel(CMSPlugin):
title = models.CharField(max_length=50)
def copy_relations(self, oldinstance):
# Like suggested in the docs
for associated_item in oldinstance.fkmodel_set.all():
associated_item.pk = None
associated_item.fk_field = self
associated_item.save()
class PluginModelWithM2MToModel(CMSPlugin):
m2m_field = models.ManyToManyField(M2MTargetModel)
def copy_relations(self, oldinstance):
# Like suggested in the docs
self.m2m_field.set(oldinstance.m2m_field.all())
| 512 | 738 | 161 |
ebd81cf09d811b53d13f71a0ed438a98ed125be8 | 670 | py | Python | packages/postgres-database/src/simcore_postgres_database/migration/versions/bb305829cf83_add_groups_thumbnail.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 25 | 2018-04-13T12:44:12.000Z | 2022-03-12T15:01:17.000Z | packages/postgres-database/src/simcore_postgres_database/migration/versions/bb305829cf83_add_groups_thumbnail.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 2,553 | 2018-01-18T17:11:55.000Z | 2022-03-31T16:26:40.000Z | packages/postgres-database/src/simcore_postgres_database/migration/versions/bb305829cf83_add_groups_thumbnail.py | mrnicegyu11/osparc-simcore | b6fa6c245dbfbc18cc74a387111a52de9b05d1f4 | [
"MIT"
] | 20 | 2018-01-18T19:45:33.000Z | 2022-03-29T07:08:47.000Z | """add groups thumbnail
Revision ID: bb305829cf83
Revises: 1ca14c33e65c
Create Date: 2020-06-02 12:06:21.302890+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bb305829cf83'
down_revision = '1ca14c33e65c'
branch_labels = None
depends_on = None
| 23.103448 | 79 | 0.69403 | """add groups thumbnail
Revision ID: bb305829cf83
Revises: 1ca14c33e65c
Create Date: 2020-06-02 12:06:21.302890+00:00
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bb305829cf83'
down_revision = '1ca14c33e65c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('groups', sa.Column('thumbnail', sa.String(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('groups', 'thumbnail')
# ### end Alembic commands ###
| 312 | 0 | 46 |
d5628a895bbe4b50ce5d0be89000b9d12afb5413 | 1,054 | py | Python | movie/model.py | k0machi/movie-mvc-example | 77963db336ff2bccc8fffbea0478363efe97757b | [
"Apache-2.0"
] | null | null | null | movie/model.py | k0machi/movie-mvc-example | 77963db336ff2bccc8fffbea0478363efe97757b | [
"Apache-2.0"
] | null | null | null | movie/model.py | k0machi/movie-mvc-example | 77963db336ff2bccc8fffbea0478363efe97757b | [
"Apache-2.0"
] | null | null | null | import json
from movie import Actor, Movie
| 34 | 89 | 0.61575 | import json
from movie import Actor, Movie
class Model:
def __init__(self):
self._dump_filename = "./movies.json"
self._all_movies = []
self.load_all()
def get_all_movies(self):
return self._all_movies
def dump_all(self):
with open(self._dump_filename, "wt") as f:
movie_dicts = [dict(movie.__dict__) for movie in self._all_movies]
for movie_dict in movie_dicts:
movie_dict["actors"] = [actor.__dict__ for actor in movie_dict["actors"]]
json.dump(movie_dicts, f)
def load_all(self):
with open(self._dump_filename, "rt") as f:
movies_serialized = json.load(f)
for movie_dict in movies_serialized:
actors = [Actor(**actor) for actor in movie_dict["actors"]]
movie = Movie(movie_dict["title"], movie_dict["release_date"], actors)
self._all_movies.append(movie)
def add_movie(self, movie_obj):
self._all_movies.append(movie_obj)
self.dump_all()
| 863 | -9 | 157 |
da3cff45edfc52dfecfe08550cb342f7afd2e33f | 616 | py | Python | tests/unit/raptiformica/settings/meshnet/test_update_cjdns_config.py | vdloo/raptiformica | e2807e5e913312034161efcbd74525a4b15b37e7 | [
"MIT"
] | 21 | 2016-09-04T11:27:31.000Z | 2019-10-30T08:23:14.000Z | tests/unit/raptiformica/settings/meshnet/test_update_cjdns_config.py | vdloo/raptiformica | e2807e5e913312034161efcbd74525a4b15b37e7 | [
"MIT"
] | 5 | 2017-09-17T15:59:37.000Z | 2018-02-03T14:53:32.000Z | tests/unit/raptiformica/settings/meshnet/test_update_cjdns_config.py | vdloo/raptiformica | e2807e5e913312034161efcbd74525a4b15b37e7 | [
"MIT"
] | 2 | 2017-11-21T18:14:51.000Z | 2017-11-22T01:20:45.000Z | from raptiformica.settings.meshnet import update_cjdns_config
from tests.testcase import TestCase
| 34.222222 | 107 | 0.795455 | from raptiformica.settings.meshnet import update_cjdns_config
from tests.testcase import TestCase
class TestUpdateCjdnsConfig(TestCase):
def setUp(self):
self.ensure_shared_secret = self.set_up_patch('raptiformica.settings.meshnet.ensure_shared_secret')
def test_update_cjdns_config_ensures_cjdns_shared_secret_in_config(self):
update_cjdns_config()
self.ensure_shared_secret.assert_called_once_with('cjdns')
def test_udpate_cjdns_config_returns_updated_config(self):
ret = update_cjdns_config()
self.assertEqual(ret, self.ensure_shared_secret.return_value)
| 397 | 17 | 103 |
e5703b27088761083d4b5f7db98e594367b431f1 | 1,805 | py | Python | fooltrader/proxy/__init__.py | renwenduan/fooltrader | c9ede56d6ce4f952618d14e0ec28479584ad9377 | [
"MIT"
] | null | null | null | fooltrader/proxy/__init__.py | renwenduan/fooltrader | c9ede56d6ce4f952618d14e0ec28479584ad9377 | [
"MIT"
] | null | null | null | fooltrader/proxy/__init__.py | renwenduan/fooltrader | c9ede56d6ce4f952618d14e0ec28479584ad9377 | [
"MIT"
] | null | null | null | import os
import pandas as pd
from fooltrader import settings
# 获取存档的代理列表
if not os.path.exists(get_proxy_dir()):
os.makedirs(get_proxy_dir())
http_proxy_df = get_http_proxy()
https_proxy_df = get_https_proxy()
socks_proxy_df = get_socks_proxy()
| 24.391892 | 64 | 0.73518 | import os
import pandas as pd
from fooltrader import settings
# 获取存档的代理列表
def get_proxy_dir():
return os.path.join(settings.FOOLTRADER_STORE_PATH, "proxy")
def get_http_proxy_path():
return os.path.join(get_proxy_dir(), "http_proxy.csv")
def get_https_proxy_path():
return os.path.join(get_proxy_dir(), "https_proxy.csv")
def get_socks_proxy_path():
return os.path.join(get_proxy_dir(), "socks_proxy.csv")
def get_http_proxy():
if os.path.exists(get_http_proxy_path()):
return pd.read_csv(get_http_proxy_path())
else:
return pd.DataFrame()
def get_https_proxy():
if os.path.exists(get_https_proxy_path()):
return pd.read_csv(get_https_proxy_path())
else:
return pd.DataFrame()
def get_socks_proxy():
if os.path.exists(get_socks_proxy_path()):
return pd.read_csv(get_socks_proxy_path())
else:
return pd.DataFrame()
def save_http_proxy(proxies):
global http_proxy_df
http_proxy_df = http_proxy_df.append(proxies)
http_proxy_df.drop_duplicates(subset=('url'), keep='last')
http_proxy_df.to_csv(get_http_proxy_path(), index=False)
def save_https_proxy(proxies):
global https_proxy_df
https_proxy_df = https_proxy_df.append(proxies)
https_proxy_df.drop_duplicates(subset=('url'), keep='last')
https_proxy_df.to_csv(get_https_proxy_path(), index=False)
def save_socks_proxy(proxies):
global socks_proxy_df
socks_proxy_df = socks_proxy_df.append(proxies)
socks_proxy_df.drop_duplicates(subset=('url'), keep='last')
socks_proxy_df.to_csv(get_socks_proxy_path(), index=False)
if not os.path.exists(get_proxy_dir()):
os.makedirs(get_proxy_dir())
http_proxy_df = get_http_proxy()
https_proxy_df = get_https_proxy()
socks_proxy_df = get_socks_proxy()
| 1,309 | 0 | 230 |
5017718e0c1ca9ca1a3baa72f5f8d88907c9163a | 285 | py | Python | examples/idioms/programs/126.2137-multiple-return-values.py | laowantong/paroxython | 4626798a60eeaa765dbfab9e63e04030c9fcb1d0 | [
"MIT"
] | 31 | 2020-05-02T13:34:26.000Z | 2021-06-06T17:25:52.000Z | examples/idioms/programs/126.2137-multiple-return-values.py | laowantong/paroxython | 4626798a60eeaa765dbfab9e63e04030c9fcb1d0 | [
"MIT"
] | 108 | 2019-11-18T19:41:52.000Z | 2022-03-18T13:58:17.000Z | examples/idioms/programs/126.2137-multiple-return-values.py | laowantong/paroxython | 4626798a60eeaa765dbfab9e63e04030c9fcb1d0 | [
"MIT"
] | 4 | 2020-05-19T08:57:44.000Z | 2020-09-21T08:53:46.000Z | """Multiple return values.
Write a function _foo that returns a _string and a _boolean value.
Source: MLKo
"""
# Implementation author: Oldboy
# Created on 2017-10-28T09:19:40.922778Z
# Last modified on 2017-10-28T09:19:40.922778Z
# Version 1
| 17.8125 | 66 | 0.726316 | """Multiple return values.
Write a function _foo that returns a _string and a _boolean value.
Source: MLKo
"""
# Implementation author: Oldboy
# Created on 2017-10-28T09:19:40.922778Z
# Last modified on 2017-10-28T09:19:40.922778Z
# Version 1
def foo():
return "string", True
| 15 | 0 | 23 |
389425fb7c65aca7aa58902c7aab5ce0b22535af | 964 | py | Python | curate/migrations/0040_auto_20190314_0314.py | JoeAmmar/curate_science | b1ae49721b06c4d9377e59b5c3f9e636786f7090 | [
"MIT"
] | 14 | 2018-10-21T11:52:01.000Z | 2022-01-24T21:38:05.000Z | curate/migrations/0040_auto_20190314_0314.py | JoeAmmar/curate_science | b1ae49721b06c4d9377e59b5c3f9e636786f7090 | [
"MIT"
] | 110 | 2018-10-31T07:56:17.000Z | 2022-01-26T15:44:25.000Z | curate/migrations/0040_auto_20190314_0314.py | JoeAmmar/curate_science | b1ae49721b06c4d9377e59b5c3f9e636786f7090 | [
"MIT"
] | 7 | 2019-07-01T08:48:47.000Z | 2020-04-04T20:54:40.000Z | # Generated by Django 2.1.7 on 2019-03-14 03:14
import autoslug.fields
from django.db import migrations, models
import django.db.models.deletion
| 30.125 | 127 | 0.624481 | # Generated by Django 2.1.7 on 2019-03-14 03:14
import autoslug.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('invitations', '0003_auto_20151126_1523'),
('curate', '0039_auto_20190307_0449'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='invite',
field=models.OneToOneField(null=True, on_delete=django.db.models.deletion.DO_NOTHING, to='invitations.Invitation'),
),
migrations.AddField(
model_name='userprofile',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
migrations.AddField(
model_name='userprofile',
name='slug',
field=autoslug.fields.AutoSlugField(editable=True, null=True, populate_from='name', unique=True),
),
]
| 0 | 794 | 23 |
f168c0ae935a62fb4869b37289ccf95b0cd09327 | 3,144 | py | Python | gp.py | JLustig/python-gp | 2628e31dbed594ef8402a25f61a950af9fa7b544 | [
"MIT"
] | null | null | null | gp.py | JLustig/python-gp | 2628e31dbed594ef8402a25f61a950af9fa7b544 | [
"MIT"
] | null | null | null | gp.py | JLustig/python-gp | 2628e31dbed594ef8402a25f61a950af9fa7b544 | [
"MIT"
] | null | null | null | import pylab as pb
import numpy as np
from math import pi
from scipy . spatial . distance import cdist
from scipy.stats import multivariate_normal
import matplotlib.pyplot as plt
import math
#Prior
#Create a GP-prior with a squared exponential co-variance function.
xdata=[]
x=np.arange(-math.pi,math.pi+0.1,0.05)
x=np.array(x)
priorMu=np.zeros(len(x))
#Sample from this prior and visualise the samples
#Show samples using different length-scale for the squared exponential
#plotSample(0.1,1)
plotSample(0.5,1)
#plotSample(1,1)
#plotSample(1.5,1)
#Generate data
evec=[]
for i in range(0,len(x)):
evec.append(np.random.normal(0, 0.5))
evec=np.array(evec)
y=np.sin(x)+evec
#Show distribution mean and std for points
sigma=1
l=1
xnewList,postSampleList,postCovList=plotforinterval(-5,5,0.2,1,2)
plt.show()
#Show samples of functions fitting the data
xnew=np.arange(-5,5,0.05)
postSample=getPostSample(xnew,1,2)
for sample in postSample:
plt.plot(xnew,sample)
plt.plot(x,y,'or')
plt.show() | 29.942857 | 100 | 0.681298 | import pylab as pb
import numpy as np
from math import pi
from scipy . spatial . distance import cdist
from scipy.stats import multivariate_normal
import matplotlib.pyplot as plt
import math
def kernel(xi,xj,sigma,lengthscale):
return (np.power(sigma,2)*np.exp(-np.power(xi-xj,2)/np.power(lengthscale,2)))
def plotSample(lengthscale,sigma):
priorCov=np.mat(np.zeros((len(x), len(x))))
for i in range(0,len(x)):
for j in range(0,len(x)):
priorCov[i,j]=kernel(x[i],x[j],sigma,lengthscale)
priorsample = np.random.multivariate_normal(priorMu,priorCov,3)
for prior in priorsample:
for xi in x:
plt.plot(x,prior)
plt.plot(x,prior)
plt.plot(x,prior)
plt.show()
def createkernel(lengthscale, sigma, xi, xj):
k = np.zeros((len(xi), len(xj)))
for i in range(len(xi)):
for j in range(len(xj)):
k[i][j] = kernel(xi[i],xj[j],sigma,lengthscale)
return k
def plotforinterval(mini,maxi,step,sigma,l,doplot=True):
xnewList=[]
postSampleList=[]
postCovList=[]
xwide=np.arange(mini,maxi,step)
for xnew in xwide:
xnew=[xnew]
knewold=createkernel(l,sigma,xnew,x)
koldnew=createkernel(l,sigma,x,xnew)
knewnew=createkernel(l,sigma,xnew,xnew)
koldold=createkernel(l,sigma,x,x)+np.power(sigma,2)*np.identity(len(x))
postMu=np.dot(knewold,np.dot(np.linalg.inv(koldold),y))
postCov=knewnew-np.dot(knewold,np.dot(np.linalg.inv((koldold)),koldnew))
postSample = np.random.normal(postMu,postCov)
xnewList.append(xnew)
postSampleList.append(postSample)
postCovList.append(postCov[0][0])
if(doplot):
plt.plot(xnew,postSample,'xg',xnew,postSample+postCov,"_g",xnew,postSample-postCov,"_g")
plt.plot(x,y,'or')
return xnewList,postSampleList,postCovList
def getPostSample(xnew,sigma,l):
knewold=createkernel(l,sigma,xnew,x)
koldnew=createkernel(l,sigma,x,xnew)
knewnew=createkernel(l,sigma,xnew,xnew)
koldold=createkernel(l,sigma,x,x)+np.power(sigma,2)*np.identity(len(x))
postMu=np.dot(knewold,np.dot(np.linalg.inv(koldold),y))
postCov=knewnew-np.dot(knewold,np.dot(np.linalg.inv((koldold)),koldnew))
return np.random.multivariate_normal(postMu,postCov,7)
#Prior
#Create a GP-prior with a squared exponential co-variance function.
xdata=[]
x=np.arange(-math.pi,math.pi+0.1,0.05)
x=np.array(x)
priorMu=np.zeros(len(x))
#Sample from this prior and visualise the samples
#Show samples using different length-scale for the squared exponential
#plotSample(0.1,1)
plotSample(0.5,1)
#plotSample(1,1)
#plotSample(1.5,1)
#Generate data
evec=[]
for i in range(0,len(x)):
evec.append(np.random.normal(0, 0.5))
evec=np.array(evec)
y=np.sin(x)+evec
#Show distribution mean and std for points
sigma=1
l=1
xnewList,postSampleList,postCovList=plotforinterval(-5,5,0.2,1,2)
plt.show()
#Show samples of functions fitting the data
xnew=np.arange(-5,5,0.05)
postSample=getPostSample(xnew,1,2)
for sample in postSample:
plt.plot(xnew,sample)
plt.plot(x,y,'or')
plt.show() | 2,025 | 0 | 115 |
a1be313cfc22f84c348fd7c846cdebf3da02e117 | 402 | py | Python | archives/modules/socket_echo_server.py | mcxiaoke/python-labs | 61c0a1f91008ba82fc2f5a5deb19e60aec9df960 | [
"Apache-2.0"
] | 7 | 2016-07-08T10:53:13.000Z | 2021-07-20T00:20:10.000Z | archives/modules/socket_echo_server.py | mcxiaoke/python-labs | 61c0a1f91008ba82fc2f5a5deb19e60aec9df960 | [
"Apache-2.0"
] | 1 | 2021-05-11T05:20:18.000Z | 2021-05-11T05:20:18.000Z | archives/modules/socket_echo_server.py | mcxiaoke/python-labs | 61c0a1f91008ba82fc2f5a5deb19e60aec9df960 | [
"Apache-2.0"
] | 7 | 2016-10-31T06:31:54.000Z | 2020-08-31T20:55:00.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2015-07-13 22:43:21
import socket
HOST = ''
PORT = 12345
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connected from', addr
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
conn.close()
| 19.142857 | 54 | 0.61194 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2015-07-13 22:43:21
import socket
HOST = ''
PORT = 12345
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
print 'Connected from', addr
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data)
conn.close()
| 0 | 0 | 0 |
af0d553ebdadd9238d2da4d94bac6dc43da400b3 | 1,820 | py | Python | Generate_Data/data_gen_example.py | Sudip-Pandit/spark_book | 199e803664696944b007db5c630c050e7b789698 | [
"CC0-1.0"
] | 1 | 2021-07-27T13:52:56.000Z | 2021-07-27T13:52:56.000Z | Get_test_data/Generate_Data/data_gen_example.py | ghoshm21/spark_book | 199e803664696944b007db5c630c050e7b789698 | [
"CC0-1.0"
] | null | null | null | Get_test_data/Generate_Data/data_gen_example.py | ghoshm21/spark_book | 199e803664696944b007db5c630c050e7b789698 | [
"CC0-1.0"
] | 1 | 2021-12-13T16:29:35.000Z | 2021-12-13T16:29:35.000Z | # This is just an example how to use faker
# faker is extrimly slow.
# check out data_gen_saprk for fas code
import csv
from faker import Faker
import datetime
if __name__ == '__main__':
records = 100000000
headers = ["Email Id", "Prefix", "Name", "Birth Date", "Phone Number", "Additional Email Id",
"Address", "Zip Code", "City","State", "Country", "Year", "Time", "Link", "Text"]
datagenerate(records, headers)
print("CSV generation complete!") | 40.444444 | 104 | 0.502747 | # This is just an example how to use faker
# faker is extrimly slow.
# check out data_gen_saprk for fas code
import csv
from faker import Faker
import datetime
def datagenerate(records, headers):
fake = Faker('en_US')
fake1 = Faker('en_GB') # To generate phone numbers
with open("People_data.csv", 'wt') as csvFile:
writer = csv.DictWriter(csvFile, fieldnames=headers)
writer.writeheader()
for i in range(records):
full_name = fake.name()
FLname = full_name.split(" ")
Fname = FLname[0]
Lname = FLname[1]
domain_name = "@testDomain.com"
userId = Fname +"."+ Lname + domain_name
writer.writerow({
"Email Id" : userId,
"Prefix" : fake.prefix(),
"Name": fake.name(),
"Birth Date" : fake.date(pattern="%d-%m-%Y", end_datetime=datetime.date(2000, 1,1)),
"Phone Number" : fake1.phone_number(),
"Additional Email Id": fake.email(),
"Address" : fake.address(),
"Zip Code" : fake.zipcode(),
"City" : fake.city(),
"State" : fake.state(),
"Country" : fake.country(),
"Year":fake.year(),
"Time": fake.time(),
"Link": fake.url(),
"Text": fake.word(),
})
if __name__ == '__main__':
records = 100000000
headers = ["Email Id", "Prefix", "Name", "Birth Date", "Phone Number", "Additional Email Id",
"Address", "Zip Code", "City","State", "Country", "Year", "Time", "Link", "Text"]
datagenerate(records, headers)
print("CSV generation complete!") | 1,314 | 0 | 23 |
c378645bdc10fb05a172cee3d0f7845c73b21e2f | 283 | py | Python | {{cookiecutter.project_slug}}/backend/main.py | devalv/cookiecutter-fastapi | c7cfd3caa14b40dcc5d8ff6bdb6e25cfed3c9d00 | [
"MIT"
] | 2 | 2021-12-26T00:10:19.000Z | 2022-01-30T21:24:31.000Z | backend/main.py | devalv/yawm | 9f91b96cf6b9a9a1f2026d514ea24edda117e1ba | [
"MIT"
] | 7 | 2020-11-07T16:42:47.000Z | 2022-01-21T23:51:38.000Z | backend/main.py | devalv/yawm | 9f91b96cf6b9a9a1f2026d514ea24edda117e1ba | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Simple debug application runner."""
import uvicorn
from core import config
if __name__ == "__main__":
uvicorn.run(
"api:app",
reload=True,
host=f"{config.API_HOST}",
port=config.API_PORT,
loop="uvloop",
)
| 17.6875 | 38 | 0.572438 | # -*- coding: utf-8 -*-
"""Simple debug application runner."""
import uvicorn
from core import config
if __name__ == "__main__":
uvicorn.run(
"api:app",
reload=True,
host=f"{config.API_HOST}",
port=config.API_PORT,
loop="uvloop",
)
| 0 | 0 | 0 |
50f82f3e58810dabad7aec2cc563b6880c9f27c3 | 318 | py | Python | exercicios-Python/aula22c.py | pedrosimoes-programmer/exercicios-python | 150de037496d63d76086678d87425a8ccfc74573 | [
"MIT"
] | null | null | null | exercicios-Python/aula22c.py | pedrosimoes-programmer/exercicios-python | 150de037496d63d76086678d87425a8ccfc74573 | [
"MIT"
] | null | null | null | exercicios-Python/aula22c.py | pedrosimoes-programmer/exercicios-python | 150de037496d63d76086678d87425a8ccfc74573 | [
"MIT"
] | null | null | null | #Retorno de Variáveis
r1 = somar(2, 4)
r2 = somar(3, 5, 4)
r3 = somar(8)
print(f'Os cálculos foram {r1}, {r2} e {r3}.') | 31.8 | 152 | 0.650943 | #Retorno de Variáveis
def somar(a=0, b=0, c=0):
s = a + b + c
return s #Usando o comando return, é possível retornar um valor a partir da função, e colocá-la numa variável, assim, usando-a da forma que desejar
r1 = somar(2, 4)
r2 = somar(3, 5, 4)
r3 = somar(8)
print(f'Os cálculos foram {r1}, {r2} e {r3}.') | 181 | 0 | 23 |
df4e170ee9233a39c371120b69469a6f9e3bbe28 | 7,038 | py | Python | qpython/_pandas.py | komsit37/sublime-q-2 | a0371c820ad8c8040fbca12bdbf7d2cf90f3c346 | [
"MIT"
] | 2 | 2016-01-04T08:40:15.000Z | 2016-09-16T21:16:26.000Z | qpython/_pandas.py | komsit37/sublime-q-2 | a0371c820ad8c8040fbca12bdbf7d2cf90f3c346 | [
"MIT"
] | null | null | null | qpython/_pandas.py | komsit37/sublime-q-2 | a0371c820ad8c8040fbca12bdbf7d2cf90f3c346 | [
"MIT"
] | null | null | null | #
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas
import struct
from collections import OrderedDict
from qpython import MetaData
from qpython.qreader import QReader, READER_CONFIGURATION, QReaderException
from qpython.qcollection import QDictionary, qlist
from qpython.qwriter import QWriter, QWriterException
from qpython.qtype import *
| 38.043243 | 145 | 0.613242 | #
# Copyright (c) 2011-2014 Exxeleron GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pandas
import struct
from collections import OrderedDict
from qpython import MetaData
from qpython.qreader import QReader, READER_CONFIGURATION, QReaderException
from qpython.qcollection import QDictionary, qlist
from qpython.qwriter import QWriter, QWriterException
from qpython.qtype import *
class PandasQReader(QReader):
parse = Mapper(QReader._reader_map)
@parse(QDICTIONARY)
def _read_dictionary(self, qtype = QDICTIONARY, options = READER_CONFIGURATION):
if options.pandas:
keys = self._read_object(options = options)
values = self._read_object(options = options)
if isinstance(keys, pandas.DataFrame):
if not isinstance(values, pandas.DataFrame):
raise QReaderException('Keyed table creation: values are expected to be of type pandas.DataFrame. Actual: %s' % type(values))
indices = keys.columns
table = keys
table.meta = keys.meta
table.meta.qtype = QKEYED_TABLE
for column in values.columns:
table[column] = values[column]
table.meta[column] = values.meta[column]
table.set_index([column for column in indices], inplace = True)
return table
else:
keys = keys if not isinstance(keys, pandas.Series) else keys.as_matrix()
values = values if not isinstance(values, pandas.Series) else values.as_matrix()
return QDictionary(keys, values)
else:
return QReader._read_dictionary(self, qtype = qtype, options = options)
@parse(QTABLE)
def _read_table(self, qtype = QTABLE, options = READER_CONFIGURATION):
if options.pandas:
self._buffer.skip() # ignore attributes
self._buffer.skip() # ignore dict type stamp
columns = self._read_object(options = options)
data = self._read_object(options = options)
odict = OrderedDict()
meta = MetaData(qtype = QTABLE)
for i in xrange(len(columns)):
if isinstance(data[i], str):
# convert character list (represented as string) to numpy representation
meta[columns[i]] = QSTRING
odict[columns[i]] = numpy.array(list(data[i]), dtype = numpy.str)
elif isinstance(data[i], (list, tuple)):
# convert character list (represented as string) to numpy representation
meta[columns[i]] = QGENERAL_LIST
odict[columns[i]] = numpy.array(list(data[i]))
else:
meta[columns[i]] = data[i].meta.qtype
odict[columns[i]] = data[i]
df = pandas.DataFrame(odict)
df.meta = meta
return df
else:
return QReader._read_table(self, qtype = qtype, options = options)
def _read_list(self, qtype, options):
if options.pandas:
options.numpy_temporals = True
list = QReader._read_list(self, qtype = qtype, options = options)
if options.pandas:
if -abs(qtype) not in [QMONTH, QDATE, QDATETIME, QMINUTE, QSECOND, QTIME, QTIMESTAMP, QTIMESPAN, QSYMBOL]:
null = QNULLMAP[-abs(qtype)][1]
ps = pandas.Series(data = list).replace(null, numpy.NaN)
else:
ps = pandas.Series(data = list)
ps.meta = MetaData(qtype = qtype)
return ps
else:
return list
class PandasQWriter(QWriter):
serialize = Mapper(QWriter._writer_map)
@serialize(pandas.Series)
def _write_pandas_series(self, data, qtype = None):
if qtype is not None:
qtype = -abs(qtype)
if qtype is None and hasattr(data, 'meta'):
qtype = -abs(data.meta.qtype)
if data.dtype == '|S1':
qtype = QCHAR
if qtype is None:
qtype = Q_TYPE.get(data.dtype.type, None)
if qtype is None and data.dtype.type in (numpy.datetime64, numpy.timedelta64):
qtype = TEMPORAL_PY_TYPE.get(str(data.dtype), None)
if qtype is None:
# determinate type based on first element of the numpy array
qtype = Q_TYPE.get(type(data[0]), QGENERAL_LIST)
if qtype is None:
raise QWriterException('Unable to serialize pandas series %s' % data)
if qtype == QGENERAL_LIST:
self._write_generic_list(data.as_matrix())
elif qtype == QCHAR:
self._write_string(data.as_matrix().astype(numpy.string_).tostring())
elif data.dtype.type not in (numpy.datetime64, numpy.timedelta64):
data = data.fillna(QNULLMAP[-abs(qtype)][1])
data = data.as_matrix()
if PY_TYPE[qtype] != data.dtype:
data = data.astype(PY_TYPE[qtype])
self._write_list(data, qtype = qtype)
else:
data = data.as_matrix()
data = data.astype(TEMPORAL_Q_TYPE[qtype])
self._write_list(data, qtype = qtype)
@serialize(pandas.DataFrame)
def _write_pandas_data_frame(self, data, qtype = None):
data_columns = data.columns.values
if hasattr(data, 'meta') and data.meta.qtype == QKEYED_TABLE:
# data frame represents keyed table
self._buffer.write(struct.pack('=b', QDICTIONARY))
self._buffer.write(struct.pack('=bxb', QTABLE, QDICTIONARY))
index_columns = data.index.names
self._write(qlist(numpy.array(index_columns), qtype = QSYMBOL_LIST))
data.reset_index(inplace = True)
self._buffer.write(struct.pack('=bxi', QGENERAL_LIST, len(index_columns)))
for column in index_columns:
self._write_pandas_series(data[column], qtype = data.meta[column] if hasattr(data, 'meta') else None)
data.set_index(index_columns, inplace = True)
self._buffer.write(struct.pack('=bxb', QTABLE, QDICTIONARY))
self._write(qlist(numpy.array(data_columns), qtype = QSYMBOL_LIST))
self._buffer.write(struct.pack('=bxi', QGENERAL_LIST, len(data_columns)))
for column in data_columns:
self._write_pandas_series(data[column], qtype = data.meta[column] if hasattr(data, 'meta') else None)
| 5,736 | 347 | 46 |
4f317c7da3553e370baabd2f644193d2ce306d16 | 154,271 | py | Python | SigProfilerTopography/Topography.py | AlexandrovLab/SigProfilerTopography | 34c7cf24392bc77953370038a520ffc8d0bdee50 | [
"BSD-2-Clause"
] | 5 | 2021-04-02T14:03:45.000Z | 2022-02-21T12:54:52.000Z | SigProfilerTopography/Topography.py | AlexandrovLab/SigProfilerTopography | 34c7cf24392bc77953370038a520ffc8d0bdee50 | [
"BSD-2-Clause"
] | null | null | null | SigProfilerTopography/Topography.py | AlexandrovLab/SigProfilerTopography | 34c7cf24392bc77953370038a520ffc8d0bdee50 | [
"BSD-2-Clause"
] | 1 | 2022-01-22T06:27:49.000Z | 2022-01-22T06:27:49.000Z | # This source code file is a part of SigProfilerTopography
# SigProfilerTopography is a tool included as part of the SigProfiler
# computational framework for comprehensive analysis of mutational
# signatures from next-generation sequencing of cancer genomes.
# SigProfilerTopography provides the downstream data analysis of
# mutations and extracted mutational signatures w.r.t.
# nucleosome occupancy, replication time, strand bias and processivity.
# Copyright (C) 2018-2020 Burcak Otlu
# #############################################################
# import sys
# import os
# current_abs_path = os.path.dirname(os.path.realpath(__file__))
# commonsPath = os.path.join(current_abs_path,'commons')
# sys.path.append(commonsPath)
# #############################################################
import math
import time
import numpy as np
import pandas as pd
import scipy
import statsmodels
import matplotlib as plt
import shutil
import platform
import multiprocessing
import SigProfilerMatrixGenerator as matrix_generator
MATRIX_GENERATOR_PATH = matrix_generator.__path__[0]
from SigProfilerMatrixGenerator import version as matrix_generator_version
from SigProfilerSimulator import version as simulator_version
from SigProfilerMatrixGenerator.scripts import SigProfilerMatrixGeneratorFunc as matGen
from SigProfilerSimulator import SigProfilerSimulator as simulator
from SigProfilerTopography import version as topography_version
from SigProfilerTopography.source.commons.TopographyCommons import readProbabilities
from SigProfilerTopography.source.commons.TopographyCommons import readChrBasedMutationsMergeWithProbabilitiesAndWrite
from SigProfilerTopography.source.commons.TopographyCommons import DATA
from SigProfilerTopography.source.commons.TopographyCommons import FIGURE
from SigProfilerTopography.source.commons.TopographyCommons import SAMPLE
from SigProfilerTopography.source.commons.TopographyCommons import K562
from SigProfilerTopography.source.commons.TopographyCommons import MCF7
from SigProfilerTopography.source.commons.TopographyCommons import MEF
from SigProfilerTopography.source.commons.TopographyCommons import MM10
from SigProfilerTopography.source.commons.TopographyCommons import GRCh37
from SigProfilerTopography.source.commons.TopographyCommons import SIGPROFILERTOPOGRAPHY_DEFAULT_FILES
from SigProfilerTopography.source.commons.TopographyCommons import getNucleosomeFile
from SigProfilerTopography.source.commons.TopographyCommons import getReplicationTimeFiles
from SigProfilerTopography.source.commons.TopographyCommons import available_nucleosome_biosamples
from SigProfilerTopography.source.commons.TopographyCommons import available_replication_time_biosamples
from SigProfilerTopography.source.commons.TopographyCommons import EPIGENOMICSOCCUPANCY
from SigProfilerTopography.source.commons.TopographyCommons import NUCLEOSOMEOCCUPANCY
from SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONTIME
from SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIPTIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import PROCESSIVITY
from SigProfilerTopography.source.commons.TopographyCommons import EPIGENOMICS
from SigProfilerTopography.source.commons.TopographyCommons import STRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K27ME3_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K36ME3_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K9ME3_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K27AC_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K4ME1_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K4ME3_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_CTCF_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_ATAC_SEQ_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import MM10_MEF_NUCLEOSOME_FILE
from SigProfilerTopography.source.commons.TopographyCommons import GM12878_NUCLEOSOME_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import K562_NUCLEOSOME_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF575PMI_mm10_embryonic_facial_prominence_ATAC_seq
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF993SRY_mm10_embryonic_fibroblast_H3K4me1
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF912DNP_mm10_embryonic_fibroblast_H3K4me3
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF611HDQ_mm10_embryonic_fibroblast_CTCF
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF152DUV_mm10_embryonic_fibroblast_POLR2A
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF114VLZ_mm10_embryonic_fibroblast_H3K27ac
from SigProfilerTopography.source.commons.TopographyCommons import SBS
from SigProfilerTopography.source.commons.TopographyCommons import DBS
from SigProfilerTopography.source.commons.TopographyCommons import ID
from SigProfilerTopography.source.commons.TopographyCommons import UNDECLARED
from SigProfilerTopography.source.commons.TopographyCommons import USING_APPLY_ASYNC
from SigProfilerTopography.source.commons.TopographyCommons import USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM
from SigProfilerTopography.source.commons.TopographyCommons import USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM_SPLIT
from SigProfilerTopography.source.commons.TopographyCommons import STRINGENT
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_AVERAGE_PROBABILITY
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_NUM_OF_SBS_REQUIRED
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_NUM_OF_DBS_REQUIRED
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_NUM_OF_ID_REQUIRED
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_NUM_OF_REAL_DATA_OVERLAP_REQUIRED
from SigProfilerTopography.source.commons.TopographyCommons import CONSIDER_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import CONSIDER_DISTANCE
from SigProfilerTopography.source.commons.TopographyCommons import CONSIDER_DISTANCE_ALL_SAMPLES_TOGETHER
from SigProfilerTopography.source.commons.TopographyCommons import MISSING_SIGNAL
from SigProfilerTopography.source.commons.TopographyCommons import NO_SIGNAL
from SigProfilerTopography.source.commons.TopographyCommons import SBS96
from SigProfilerTopography.source.commons.TopographyCommons import ID
from SigProfilerTopography.source.commons.TopographyCommons import DBS
from SigProfilerTopography.source.commons.TopographyCommons import SUBS
from SigProfilerTopography.source.commons.TopographyCommons import INDELS
from SigProfilerTopography.source.commons.TopographyCommons import DINUCS
from SigProfilerTopography.source.commons.TopographyCommons import SBS_CONTEXTS
from SigProfilerTopography.source.commons.TopographyCommons import SNV
from SigProfilerTopography.source.commons.TopographyCommons import CHRBASED
from SigProfilerTopography.source.commons.TopographyCommons import LIB
from SigProfilerTopography.source.commons.TopographyCommons import getChromSizesDict
from SigProfilerTopography.source.commons.TopographyCommons import getShortNames
from SigProfilerTopography.source.commons.TopographyCommons import copyMafFiles
from SigProfilerTopography.source.commons.TopographyCommons import fillCutoff2Signature2PropertiesListDictionary
from SigProfilerTopography.source.commons.TopographyCommons import fill_signature_number_of_mutations_df
from SigProfilerTopography.source.commons.TopographyCommons import fill_mutations_dictionaries_write
from SigProfilerTopography.source.commons.TopographyCommons import get_mutation_type_context_for_probabilities_file
from SigProfilerTopography.source.commons.TopographyCommons import Table_MutationType_NumberofMutations_NumberofSamples_SamplesList_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ChrLong_NumberofMutations_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import NUMBER_OF_MUTATIONS_IN_EACH_SPLIT
from SigProfilerTopography.source.occupancy.OccupancyAnalysis import occupancyAnalysis
from SigProfilerTopography.source.replicationtime.ReplicationTimeAnalysis import replicationTimeAnalysis
from SigProfilerTopography.source.replicationstrandbias.ReplicationStrandBiasAnalysis import replicationStrandBiasAnalysis
from SigProfilerTopography.source.transcriptionstrandbias.TranscriptionStrandBiasAnalysis import transcriptionStrandBiasAnalysis
from SigProfilerTopography.source.processivity.ProcessivityAnalysis import processivityAnalysis
from SigProfilerTopography.source.plotting.OccupancyAverageSignalFigures import occupancyAverageSignalFigures
from SigProfilerTopography.source.plotting.OccupancyAverageSignalFigures import compute_fold_change_with_p_values_plot_heatmaps
from SigProfilerTopography.source.plotting.ReplicationTimeNormalizedMutationDensityFigures import replicationTimeNormalizedMutationDensityFigures
from SigProfilerTopography.source.plotting.TranscriptionReplicationStrandBiasFigures import transcriptionReplicationStrandBiasFiguresUsingDataframes
from SigProfilerTopography.source.plotting.ProcessivityFigures import processivityFigures
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING
from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL
from SigProfilerTopography.source.commons.TopographyCommons import COMBINE_P_VALUES_METHOD_FISHER
from SigProfilerTopography.source.commons.TopographyCommons import WEIGHTED_AVERAGE_METHOD
from SigProfilerTopography.source.commons.TopographyCommons import COLORBAR_SEISMIC
from SigProfilerTopography.source.commons.TopographyCommons import natural_key
############################################################
#Can be move to DataPreparationCommons under /source/commons
#read chr based dinucs (provided by SigProfilerMatrixGenerator) and merge with probabilities (provided by SigProfilerTopography)
############################################################
#######################################################
#JAN 9, 2020
#######################################################
#######################################################
#Nov25, 2019
# Download nucleosome occupancy chr based npy files from ftp alexandrovlab if they do not exists
# We are using this function if user is using our available nucleosome data for GM12878 adnd K562 cell lines
#######################################################
#######################################################
#For Skin-Melanoma USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM_SPLIT is better
#For others USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM is better
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
#######################################################
# Depreceated.
# We assume that simulated data will have the same number_of_splits as the real data
#######################################################
# inputDir ='/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/input_for_matgen/BreastCancer560_subs_indels_dinucs'
# outputDir = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/output_test/'
# jobname = 'BreastCancer560'
#Run SigProfilerTopography Analyses
#Former full path now only the filename with extension
# nucleosomeOccupancy = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/lib/nucleosome/wgEncodeSydhNsomeGm12878Sig.wig'
# replicationSignal = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/lib/replication/GSM923442_hg19_wgEncodeUwRepliSeqMcf7WaveSignalRep1.wig'
# replicationValley = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/lib/replication/GSM923442_hg19_wgEncodeUwRepliSeqMcf7ValleysRep1.bed'
# replicationPeak = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/lib/replication/GSM923442_hg19_wgEncodeUwRepliSeqMcf7PkRep1.bed'
# subs_probabilities_file_path = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/output/560_BRCA_WGS_DINUCS/SBS96/Suggested_Solution/Decomposed_Solution/Mutation_Probabilities.txt'
# indels_probabilities_file_path = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/output/560_BRCA_WGS_DINUCS/ID83/Suggested_Solution/Decomposed_Solution/Mutation_Probabilities.txt'
# dinucs_probabilities_file_path = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/output/560_BRCA_WGS_DINUCS/DBS78/Suggested_Solution/Decomposed_Solution/Mutation_Probabilities.txt'
#######################################################
# Plot figures for the attainded data after SigProfilerTopography Analyses
##############################################################
#To run on laptob
import os
if __name__== "__main__":
genome = 'GRCh37'
jobname = 'Test-Skin-Melanoma'
numberofSimulations = 2
inputDir = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/input/PCAWG_Matlab_Clean/Skin-Melanoma/filtered/'
outputDir = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','output_test')
sbs_probabilities_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','output_for_extractor','PCAWG_Matlab','Skin-Melanoma_sbs96_mutation_probabilities.txt')
id_probabilities_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','output_for_extractor','PCAWG_Matlab','Skin-Melanoma_id83_mutation_probabilities.txt')
dbs_probabilities_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','output_for_extractor','PCAWG_Matlab','Skin-Melanoma_dbs_mutation_probabilities.txt')
# user_provided_replication_time_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','replication','wgEncodeUwRepliSeqNhekWaveSignalRep1.wig')
# user_provided_replication_time_valley_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','replication','wgEncodeUwRepliSeqNhekValleysRep1.bed')
# user_provided_replication_time_peak_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','replication','wgEncodeUwRepliSeqNhekPkRep1.bed')
# user_provided_nucleosome_file_path= os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','nucleosome','wgEncodeSydhNsomeK562Sig.wig')
user_provided_nucleosome_file_path = os.path.join('C:\\', 'Users', 'burcak', 'Developer', 'Python','SigProfilerTopography', 'SigProfilerTopography', 'lib','nucleosome', 'wgEncodeSydhNsomeGm12878Sig.wig')
# user_provided_nucleosome_file_path= os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','nucleosome','wgEncodeSydhNsomeGm12878Sig.bigWig')
runAnalyses(genome, inputDir, outputDir, jobname, numberofSimulations,
sbs_probabilities=sbs_probabilities_file_path,
id_probabilities=id_probabilities_file_path,
dbs_probabilities=dbs_probabilities_file_path,
# nucleosome_biosample='K562',
# replication_time_biosample='NHEK',
# nucleosome_file=user_provided_nucleosome_file_path,
# replication_time_signal_file=user_provided_replication_time_file_path,
# replication_time_valley_file=user_provided_replication_time_valley_file_path,
# replication_time_peak_file=user_provided_replication_time_peak_file_path,
epigenomics=True, nucleosome=False, replication_time=False, strand_bias=False, processivity=False,
sample_based=False, new_simulations_enforced=False, full_mode=False, verbose=False,necessary_dictionaries_already_exists=True)
##############################################################
| 61.981117 | 350 | 0.555639 | # This source code file is a part of SigProfilerTopography
# SigProfilerTopography is a tool included as part of the SigProfiler
# computational framework for comprehensive analysis of mutational
# signatures from next-generation sequencing of cancer genomes.
# SigProfilerTopography provides the downstream data analysis of
# mutations and extracted mutational signatures w.r.t.
# nucleosome occupancy, replication time, strand bias and processivity.
# Copyright (C) 2018-2020 Burcak Otlu
# #############################################################
# import sys
# import os
# current_abs_path = os.path.dirname(os.path.realpath(__file__))
# commonsPath = os.path.join(current_abs_path,'commons')
# sys.path.append(commonsPath)
# #############################################################
import math
import time
import numpy as np
import pandas as pd
import scipy
import statsmodels
import matplotlib as plt
import shutil
import platform
import multiprocessing
import SigProfilerMatrixGenerator as matrix_generator
MATRIX_GENERATOR_PATH = matrix_generator.__path__[0]
from SigProfilerMatrixGenerator import version as matrix_generator_version
from SigProfilerSimulator import version as simulator_version
from SigProfilerMatrixGenerator.scripts import SigProfilerMatrixGeneratorFunc as matGen
from SigProfilerSimulator import SigProfilerSimulator as simulator
from SigProfilerTopography import version as topography_version
from SigProfilerTopography.source.commons.TopographyCommons import readProbabilities
from SigProfilerTopography.source.commons.TopographyCommons import readChrBasedMutationsMergeWithProbabilitiesAndWrite
from SigProfilerTopography.source.commons.TopographyCommons import DATA
from SigProfilerTopography.source.commons.TopographyCommons import FIGURE
from SigProfilerTopography.source.commons.TopographyCommons import SAMPLE
from SigProfilerTopography.source.commons.TopographyCommons import K562
from SigProfilerTopography.source.commons.TopographyCommons import MCF7
from SigProfilerTopography.source.commons.TopographyCommons import MEF
from SigProfilerTopography.source.commons.TopographyCommons import MM10
from SigProfilerTopography.source.commons.TopographyCommons import GRCh37
from SigProfilerTopography.source.commons.TopographyCommons import SIGPROFILERTOPOGRAPHY_DEFAULT_FILES
from SigProfilerTopography.source.commons.TopographyCommons import getNucleosomeFile
from SigProfilerTopography.source.commons.TopographyCommons import getReplicationTimeFiles
from SigProfilerTopography.source.commons.TopographyCommons import available_nucleosome_biosamples
from SigProfilerTopography.source.commons.TopographyCommons import available_replication_time_biosamples
from SigProfilerTopography.source.commons.TopographyCommons import EPIGENOMICSOCCUPANCY
from SigProfilerTopography.source.commons.TopographyCommons import NUCLEOSOMEOCCUPANCY
from SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONTIME
from SigProfilerTopography.source.commons.TopographyCommons import REPLICATIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIPTIONSTRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import PROCESSIVITY
from SigProfilerTopography.source.commons.TopographyCommons import EPIGENOMICS
from SigProfilerTopography.source.commons.TopographyCommons import STRANDBIAS
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K27ME3_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K36ME3_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K9ME3_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K27AC_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K4ME1_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_H3K4ME3_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_CTCF_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_ATAC_SEQ_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import MM10_MEF_NUCLEOSOME_FILE
from SigProfilerTopography.source.commons.TopographyCommons import GM12878_NUCLEOSOME_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import K562_NUCLEOSOME_OCCUPANCY_FILE
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF575PMI_mm10_embryonic_facial_prominence_ATAC_seq
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF993SRY_mm10_embryonic_fibroblast_H3K4me1
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF912DNP_mm10_embryonic_fibroblast_H3K4me3
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF611HDQ_mm10_embryonic_fibroblast_CTCF
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF152DUV_mm10_embryonic_fibroblast_POLR2A
from SigProfilerTopography.source.commons.TopographyCommons import ENCFF114VLZ_mm10_embryonic_fibroblast_H3K27ac
from SigProfilerTopography.source.commons.TopographyCommons import SBS
from SigProfilerTopography.source.commons.TopographyCommons import DBS
from SigProfilerTopography.source.commons.TopographyCommons import ID
from SigProfilerTopography.source.commons.TopographyCommons import UNDECLARED
from SigProfilerTopography.source.commons.TopographyCommons import USING_APPLY_ASYNC
from SigProfilerTopography.source.commons.TopographyCommons import USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM
from SigProfilerTopography.source.commons.TopographyCommons import USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM_SPLIT
from SigProfilerTopography.source.commons.TopographyCommons import STRINGENT
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_AVERAGE_PROBABILITY
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_NUM_OF_SBS_REQUIRED
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_NUM_OF_DBS_REQUIRED
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_NUM_OF_ID_REQUIRED
from SigProfilerTopography.source.commons.TopographyCommons import DEFAULT_NUM_OF_REAL_DATA_OVERLAP_REQUIRED
from SigProfilerTopography.source.commons.TopographyCommons import CONSIDER_COUNT
from SigProfilerTopography.source.commons.TopographyCommons import CONSIDER_DISTANCE
from SigProfilerTopography.source.commons.TopographyCommons import CONSIDER_DISTANCE_ALL_SAMPLES_TOGETHER
from SigProfilerTopography.source.commons.TopographyCommons import MISSING_SIGNAL
from SigProfilerTopography.source.commons.TopographyCommons import NO_SIGNAL
from SigProfilerTopography.source.commons.TopographyCommons import SBS96
from SigProfilerTopography.source.commons.TopographyCommons import ID
from SigProfilerTopography.source.commons.TopographyCommons import DBS
from SigProfilerTopography.source.commons.TopographyCommons import SUBS
from SigProfilerTopography.source.commons.TopographyCommons import INDELS
from SigProfilerTopography.source.commons.TopographyCommons import DINUCS
from SigProfilerTopography.source.commons.TopographyCommons import SBS_CONTEXTS
from SigProfilerTopography.source.commons.TopographyCommons import SNV
from SigProfilerTopography.source.commons.TopographyCommons import CHRBASED
from SigProfilerTopography.source.commons.TopographyCommons import LIB
from SigProfilerTopography.source.commons.TopographyCommons import getChromSizesDict
from SigProfilerTopography.source.commons.TopographyCommons import getShortNames
from SigProfilerTopography.source.commons.TopographyCommons import copyMafFiles
from SigProfilerTopography.source.commons.TopographyCommons import fillCutoff2Signature2PropertiesListDictionary
from SigProfilerTopography.source.commons.TopographyCommons import fill_signature_number_of_mutations_df
from SigProfilerTopography.source.commons.TopographyCommons import fill_mutations_dictionaries_write
from SigProfilerTopography.source.commons.TopographyCommons import get_mutation_type_context_for_probabilities_file
from SigProfilerTopography.source.commons.TopographyCommons import Table_MutationType_NumberofMutations_NumberofSamples_SamplesList_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ChrLong_NumberofMutations_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename
from SigProfilerTopography.source.commons.TopographyCommons import NUMBER_OF_MUTATIONS_IN_EACH_SPLIT
from SigProfilerTopography.source.occupancy.OccupancyAnalysis import occupancyAnalysis
from SigProfilerTopography.source.replicationtime.ReplicationTimeAnalysis import replicationTimeAnalysis
from SigProfilerTopography.source.replicationstrandbias.ReplicationStrandBiasAnalysis import replicationStrandBiasAnalysis
from SigProfilerTopography.source.transcriptionstrandbias.TranscriptionStrandBiasAnalysis import transcriptionStrandBiasAnalysis
from SigProfilerTopography.source.processivity.ProcessivityAnalysis import processivityAnalysis
from SigProfilerTopography.source.plotting.OccupancyAverageSignalFigures import occupancyAverageSignalFigures
from SigProfilerTopography.source.plotting.OccupancyAverageSignalFigures import compute_fold_change_with_p_values_plot_heatmaps
from SigProfilerTopography.source.plotting.ReplicationTimeNormalizedMutationDensityFigures import replicationTimeNormalizedMutationDensityFigures
from SigProfilerTopography.source.plotting.TranscriptionReplicationStrandBiasFigures import transcriptionReplicationStrandBiasFiguresUsingDataframes
from SigProfilerTopography.source.plotting.ProcessivityFigures import processivityFigures
from SigProfilerTopography.source.commons.TopographyCommons import TRANSCRIBED_VERSUS_UNTRANSCRIBED
from SigProfilerTopography.source.commons.TopographyCommons import GENIC_VERSUS_INTERGENIC
from SigProfilerTopography.source.commons.TopographyCommons import LAGGING_VERSUS_LEADING
from SigProfilerTopography.source.commons.TopographyCommons import PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL
from SigProfilerTopography.source.commons.TopographyCommons import COMBINE_P_VALUES_METHOD_FISHER
from SigProfilerTopography.source.commons.TopographyCommons import WEIGHTED_AVERAGE_METHOD
from SigProfilerTopography.source.commons.TopographyCommons import COLORBAR_SEISMIC
from SigProfilerTopography.source.commons.TopographyCommons import natural_key
############################################################
#Can be move to DataPreparationCommons under /source/commons
#read chr based dinucs (provided by SigProfilerMatrixGenerator) and merge with probabilities (provided by SigProfilerTopography)
def prepareMutationsDataAfterMatrixGenerationAndExtractorForTopography(chromShortNamesList,
inputDir,
outputDir,
jobname,
mutation_type_context,
mutations_probabilities_file_path,
mutation_type_context_for_probabilities,
startSimNum,
endSimNum,
partialDirname,
PCAWG,
verbose):
###########################################################################################
#original matrix generator chrbased data will be under inputDir/output/vcf_files/SNV
#original matrix generator chrbased data will be under inputDir/output/vcf_files/DBS
#original matrix generator chrbased data will be under inputDir/output/vcf_files/ID
#sim1 matrix generator chrbased data will be under inputDir/output/simulations/sim1/96/output/vcf_files/SNV
#sim1 matrix generator chrbased data will be under inputDir/output/simulations/sim1/ID/output/vcf_files/ID
#sim1 matrix generator chrbased data will be under inputDir/output/simulations/sim1/DBS/output/vcf_files/DBS
df_columns_contain_ordered_signatures = None
os.makedirs(os.path.join(outputDir,jobname,DATA,CHRBASED),exist_ok=True)
for simNum in range(1,endSimNum+1):
simName = 'sim%d' % (simNum)
os.makedirs(os.path.join(outputDir,jobname,DATA,CHRBASED,simName), exist_ok=True)
###########################################################################################
###########################################################################################
if ((mutations_probabilities_file_path is not None) and (os.path.exists(mutations_probabilities_file_path))):
##########################################################################################
mutations_probabilities_df = readProbabilities(mutations_probabilities_file_path, verbose)
df_columns_contain_ordered_signatures = mutations_probabilities_df.columns.values
##########################################################################################
if verbose:
print('\tVerbose mutations_probabilities_df.head()')
print('\tVerbose %s' %(mutations_probabilities_df.head()))
print('\tVerbose mutations_probabilities_df.columns.values')
print('\tVerbose %s' %(mutations_probabilities_df.columns.values))
##########################################################################################
#Step1 SigProfilerTopography Python Package
#For Release we will use SAMPLE as it is, no change in SAMPLE column is needed.
# For PCAWG_Matlab
# This statement below is customized for PCAWG_Matlab
# To get rid of inconsistent cancer type names in sample column of chrbased mutation files and probabilities files
# Breast-LobularCA_SP124191
if PCAWG:
mutations_probabilities_df[SAMPLE] = mutations_probabilities_df[SAMPLE].str.split('_',expand=True)[1]
##########################################################################################
############################################################################################
############################## pool.apply_async starts ####################################
############################################################################################
################################
numofProcesses = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=numofProcesses)
################################
################################
jobs = []
################################
sim_nums = range(startSimNum,endSimNum+1)
sim_num_chr_tuples = ((sim_num, chrShort) for sim_num in sim_nums for chrShort in chromShortNamesList)
for simNum, chrShort in sim_num_chr_tuples:
simName = 'sim%d' % (simNum)
chr_based_mutation_filename = '%s_seqinfo.txt' % (chrShort)
if (simNum == 0):
matrix_generator_output_dir_path = os.path.join(inputDir, 'output', 'vcf_files', partialDirname)
else:
matrix_generator_output_dir_path = os.path.join(inputDir, 'output', 'simulations', simName,mutation_type_context, 'output', 'vcf_files',partialDirname)
if (os.path.exists(matrix_generator_output_dir_path)):
chr_based_mutation_filepath = os.path.join(matrix_generator_output_dir_path,chr_based_mutation_filename)
inputList = []
inputList.append(chrShort)
inputList.append(outputDir)
inputList.append(jobname)
inputList.append(chr_based_mutation_filepath)
inputList.append(mutations_probabilities_df)
inputList.append(mutation_type_context_for_probabilities)
inputList.append(mutation_type_context)
inputList.append(simNum)
inputList.append(PCAWG)
jobs.append(pool.apply_async(readChrBasedMutationsMergeWithProbabilitiesAndWrite,args=(inputList,)))
################################################################################
##############################################################################
# wait for all jobs to finish
for job in jobs:
if verbose: print('\tVerbose Transcription Strand Bias Worker pid %s job.get():%s ' % (str(os.getpid()), job.get()))
##############################################################################
################################
pool.close()
pool.join()
################################
############################################################################################
############################## pool.apply_async ends ######################################
############################################################################################
###########################################################################################
###########################################################################################
elif ((mutations_probabilities_file_path is None) or (not (os.path.exists(mutations_probabilities_file_path)))):
#For Information
print('--- There is a situation/problem: mutations_probabilities_file_path:%s is None or does not exist.' %(mutations_probabilities_file_path))
############################################################################################
############################## pool.apply_async starts ####################################
############################################################################################
################################
numofProcesses = multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=numofProcesses)
################################
################################
jobs = []
################################
sim_nums = range(startSimNum,endSimNum+1)
sim_num_chr_tuples = ((sim_num, chrShort) for sim_num in sim_nums for chrShort in chromShortNamesList)
for simNum, chrShort in sim_num_chr_tuples:
simName = 'sim%d' % (simNum)
chr_based_mutation_filename = '%s_seqinfo.txt' % (chrShort)
if (simNum == 0):
matrix_generator_output_dir_path = os.path.join(inputDir, 'output', 'vcf_files', partialDirname)
else:
matrix_generator_output_dir_path = os.path.join(inputDir, 'output', 'simulations', simName,mutation_type_context, 'output', 'vcf_files',partialDirname)
if (os.path.exists(matrix_generator_output_dir_path)):
chr_based_mutation_filepath = os.path.join(matrix_generator_output_dir_path,chr_based_mutation_filename)
inputList = []
inputList.append(chrShort)
inputList.append(outputDir)
inputList.append(jobname)
inputList.append(chr_based_mutation_filepath)
inputList.append(None)
inputList.append(mutation_type_context_for_probabilities)
inputList.append(mutation_type_context)
inputList.append(simNum)
inputList.append(PCAWG)
jobs.append(pool.apply_async(readChrBasedMutationsMergeWithProbabilitiesAndWrite,args=(inputList,)))
################################################################################
##############################################################################
# wait for all jobs to finish
for job in jobs:
if verbose: print('\tVerbose Transcription Strand Bias Worker pid %s job.get():%s ' % (str(os.getpid()), job.get()))
##############################################################################
################################
pool.close()
pool.join()
################################
############################################################################################
############################## pool.apply_async ends ######################################
############################################################################################
return df_columns_contain_ordered_signatures
###########################################################################################
############################################################
#######################################################
#JAN 9, 2020
def check_download_replication_time_files(replication_time_signal_file,replication_time_valley_file,replication_time_peak_file):
current_abs_path = os.path.dirname(os.path.abspath(__file__))
# print(current_abs_path)
#These are currently full path, therefore convert them to filename
replication_time_signal_file=os.path.basename(replication_time_signal_file)
replication_time_valley_file=os.path.basename(replication_time_valley_file)
replication_time_peak_file=os.path.basename(replication_time_peak_file)
os.makedirs(os.path.join(current_abs_path,'lib','replication'),exist_ok=True)
lib_replication_path = os.path.join(current_abs_path,'lib','replication')
if os.path.isabs(lib_replication_path):
# print('%s an absolute path.' %(lib_replication_path))
os.chdir(lib_replication_path)
replication_time_signal_file_path= os.path.join(lib_replication_path,replication_time_signal_file)
replication_time_valley_file_path= os.path.join(lib_replication_path,replication_time_valley_file)
replication_time_peak_file_path= os.path.join(lib_replication_path,replication_time_peak_file)
if not os.path.exists(replication_time_signal_file_path):
print('Does not exists: %s' %(replication_time_signal_file_path))
try:
# print('Downloading %s_signal_wgEncodeSydhNsome_%sSig.npy under %s' %(chrLong,cell_line,chrbased_npy_array_path))
print('Downloading %s under %s' % (replication_time_signal_file, lib_replication_path))
#wget -c Continue getting a partially-downloaded file
#wget -nc If a file is downloaded more than once in the same directory, the local file will be clobbered, or overwritten
# cmd="bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chrombased_npy_path + ' ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/lib/nucleosome/chrbased/' + filename + "'"
cmd="bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/lib/replication/' + replication_time_signal_file + "'"
# print(cmd)
os.system(cmd)
except:
# print("The UCSD ftp site is not responding...pulling from sanger ftp now.")
print("The ftp://alexandrovlab-ftp.ucsd.edu site is not responding...")
if not os.path.exists(replication_time_valley_file_path):
print('Does not exists: %s' %(replication_time_valley_file_path))
try:
# print('Downloading %s_signal_wgEncodeSydhNsome_%sSig.npy under %s' %(chrLong,cell_line,chrbased_npy_array_path))
print('Downloading %s under %s' % (replication_time_valley_file, lib_replication_path))
#wget -c Continue getting a partially-downloaded file
#wget -nc If a file is downloaded more than once in the same directory, the local file will be clobbered, or overwritten
# cmd="bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chrombased_npy_path + ' ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/lib/nucleosome/chrbased/' + filename + "'"
cmd="bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/lib/replication/' + replication_time_valley_file + "'"
# print(cmd)
os.system(cmd)
except:
# print("The UCSD ftp site is not responding...pulling from sanger ftp now.")
print("The ftp://alexandrovlab-ftp.ucsd.edu site is not responding...")
if not os.path.exists(replication_time_peak_file_path):
print('Does not exists: %s' %(replication_time_peak_file_path))
try:
# print('Downloading %s_signal_wgEncodeSydhNsome_%sSig.npy under %s' %(chrLong,cell_line,chrbased_npy_array_path))
print('Downloading %s under %s' % (replication_time_peak_file, lib_replication_path))
#wget -c Continue getting a partially-downloaded file
#wget -nc If a file is downloaded more than once in the same directory, the local file will be clobbered, or overwritten
# cmd="bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chrombased_npy_path + ' ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/lib/nucleosome/chrbased/' + filename + "'"
cmd="bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/lib/replication/' + replication_time_peak_file + "'"
# print(cmd)
os.system(cmd)
except:
# print("The UCSD ftp site is not responding...pulling from sanger ftp now.")
print("The ftp://alexandrovlab-ftp.ucsd.edu site is not responding...")
else:
#It has to be an absolute path
print('%s is not an absolute path.' %(lib_replication_path))
#go back
os.chdir(current_abs_path)
#######################################################
def check_download_sample_probability_files():
current_path = os.getcwd()
os.makedirs(os.path.join(current_path, 'sample_probabilities'), exist_ok=True)
sample_probability_files_path = os.path.join(current_path, 'sample_probabilities')
probability_files = ['COSMIC_DBS78_Decomposed_Mutation_Probabilities.txt',
'COSMIC_SBS96_Decomposed_Mutation_Probabilities.txt']
if os.path.isabs(sample_probability_files_path):
os.chdir(sample_probability_files_path)
for probability_filename in probability_files:
probability_file_path = os.path.join(sample_probability_files_path, probability_filename)
if not os.path.exists(probability_file_path):
print('Does not exists: %s' % (probability_file_path))
try:
print('Downloading %s under %s' % (probability_filename, sample_probability_files_path))
# wget -c Continue getting a partially-downloaded file
# wget -nc If a file is downloaded more than once in the same directory, the local file will be clobbered, or overwritten
# cmd="bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chrombased_npy_path + ' ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/lib/nucleosome/chrbased/' + filename + "'"
# -r When included, the wget will recursively traverse subdirectories in order to obtain all content.
# -l1 Limit recursion depth to a specific number of levels, by setting the <#> variable to the desired number.
# -c option to resume a download
# -nc, --no-clobber If a file is downloaded more than once in the same directory, Wget's behavior depends on a few options, including -nc. In certain cases, the local file will be clobbered, or overwritten, upon repeated download. In other cases it will be preserved.
# -np, --no-parent Do not ever ascend to the parent directory when retrieving recursively. This is a useful option, since it guarantees that only the files below a certain hierarchy will be downloaded.
# -nd, --no-directories When included, directories will not be created. All files captured in the wget will be copied directly in to the active directory
cmd = "bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/sample_probability_files/' + probability_filename + "'"
print("cmd: %s" % cmd)
os.system(cmd)
except:
print("The UCSD ftp site is not responding...")
else:
# It has to be an absolute path
print('%s is not an absolute path.' % (sample_probability_files_path))
# go back
os.chdir(current_path)
def check_download_sample_vcf_files():
current_path = os.getcwd()
os.makedirs(os.path.join(current_path, 'sample_vcfs'), exist_ok=True)
sample_vcf_files_path = os.path.join(current_path, 'sample_vcfs')
vcf_files = ['PD4248a.vcf', 'PD4199a.vcf', 'PD4198a.vcf', 'PD4194a.vcf', 'PD4192a.vcf', 'PD4120a.vcf',
'PD4116a.vcf', 'PD4115a.vcf', 'PD4109a.vcf', 'PD4107a.vcf', 'PD4103a.vcf', 'PD4088a.vcf',
'PD4086a.vcf', 'PD4085a.vcf', 'PD4006a.vcf', 'PD4005a.vcf', 'PD3945a.vcf', 'PD3905a.vcf',
'PD3904a.vcf', 'PD3890a.vcf', 'PD3851a.vcf']
if os.path.isabs(sample_vcf_files_path):
os.chdir(sample_vcf_files_path)
for vcf_filename in vcf_files:
vcf_file_path = os.path.join(sample_vcf_files_path, vcf_filename)
if not os.path.exists(vcf_file_path):
print('Does not exists: %s' % (vcf_file_path))
try:
print('Downloading %s under %s' % (vcf_filename, sample_vcf_files_path))
# wget -c Continue getting a partially-downloaded file
# wget -nc If a file is downloaded more than once in the same directory, the local file will be clobbered, or overwritten
# cmd="bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chrombased_npy_path + ' ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/lib/nucleosome/chrbased/' + filename + "'"
# -r When included, the wget will recursively traverse subdirectories in order to obtain all content.
# -l1 Limit recursion depth to a specific number of levels, by setting the <#> variable to the desired number.
# -c option to resume a download
# -nc, --no-clobber If a file is downloaded more than once in the same directory, Wget's behavior depends on a few options, including -nc. In certain cases, the local file will be clobbered, or overwritten, upon repeated download. In other cases it will be preserved.
# -np, --no-parent Do not ever ascend to the parent directory when retrieving recursively. This is a useful option, since it guarantees that only the files below a certain hierarchy will be downloaded.
# -nd, --no-directories When included, directories will not be created. All files captured in the wget will be copied directly in to the active directory
cmd = "bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/sample_vcf_files/' + vcf_filename + "'"
print("cmd: %s" % cmd)
os.system(cmd)
except:
print("The UCSD ftp site is not responding...")
else:
# It has to be an absolute path
print('%s is not an absolute path.' % (sample_vcf_files_path))
# go back
os.chdir(current_path)
def check_download_chrbased_npy_atac_seq_files(atac_seq_file,chromNamesList):
current_abs_path = os.path.dirname(os.path.abspath(__file__))
# print(current_abs_path)
os.makedirs(os.path.join(current_abs_path,'lib','epigenomics','chrbased'),exist_ok=True)
chrombased_npy_path = os.path.join(current_abs_path,'lib','epigenomics','chrbased')
# print(chrombased_npy_path)
if os.path.isabs(chrombased_npy_path):
# print('%s an absolute path.' %(chrombased_npy_path))
os.chdir(chrombased_npy_path)
atac_seq_filename_wo_extension = os.path.splitext(os.path.basename(atac_seq_file))[0]
for chrLong in chromNamesList:
filename = '%s_signal_%s.npy' % (chrLong, atac_seq_filename_wo_extension)
chrbased_npy_array_path = os.path.join(chrombased_npy_path, filename)
if not os.path.exists(chrbased_npy_array_path):
print('Does not exists: %s' % (chrbased_npy_array_path))
try:
print('Downloading %s under %s' % (filename, chrbased_npy_array_path))
# wget -c Continue getting a partially-downloaded file
# wget -nc If a file is downloaded more than once in the same directory, the local file will be clobbered, or overwritten
# cmd="bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chrombased_npy_path + ' ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/lib/nucleosome/chrbased/' + filename + "'"
# -r When included, the wget will recursively traverse subdirectories in order to obtain all content.
# -l1 Limit recursion depth to a specific number of levels, by setting the <#> variable to the desired number.
# -c option to resume a download
# -nc, --no-clobber If a file is downloaded more than once in the same directory, Wget's behavior depends on a few options, including -nc. In certain cases, the local file will be clobbered, or overwritten, upon repeated download. In other cases it will be preserved.
# -np, --no-parent Do not ever ascend to the parent directory when retrieving recursively. This is a useful option, since it guarantees that only the files below a certain hierarchy will be downloaded.
# -nd, --no-directories When included, directories will not be created. All files captured in the wget will be copied directly in to the active directory
cmd = "bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/lib/epigenomics/chrbased/' + filename + "'"
print("cmd: %s" %cmd)
os.system(cmd)
except:
# print("The UCSD ftp site is not responding...pulling from sanger ftp now.")
print("The UCSD ftp site is not responding...")
else:
#It has to be an absolute path
print('%s is not an absolute path.' %(chrombased_npy_path))
#go back
os.chdir(current_abs_path)
#######################################################
#Nov25, 2019
# Download nucleosome occupancy chr based npy files from ftp alexandrovlab if they do not exists
# We are using this function if user is using our available nucleosome data for GM12878 adnd K562 cell lines
def check_download_chrbased_npy_nuclesome_files(nucleosome_file,chromNamesList):
current_abs_path = os.path.dirname(os.path.abspath(__file__))
# print(current_abs_path)
os.makedirs(os.path.join(current_abs_path,'lib','nucleosome','chrbased'),exist_ok=True)
chrombased_npy_path = os.path.join(current_abs_path,'lib','nucleosome','chrbased')
# print(chrombased_npy_path)
if os.path.isabs(chrombased_npy_path):
# print('%s an absolute path.' %(chrombased_npy_path))
os.chdir(chrombased_npy_path)
nucleosome_filename_wo_extension = os.path.splitext(os.path.basename(nucleosome_file))[0]
for chrLong in chromNamesList:
# GM12878 and K562 comes from woman samples therefore there is no chrY
if chrLong != 'chrY':
# filename = '%s_signal_wgEncodeSydhNsome%sSig.npy' %(chrLong,cell_line)
filename = '%s_signal_%s.npy' % (chrLong, nucleosome_filename_wo_extension)
chrbased_npy_array_path = os.path.join(chrombased_npy_path, filename)
if not os.path.exists(chrbased_npy_array_path):
print('Does not exists: %s' % (chrbased_npy_array_path))
try:
# print('Downloading %s_signal_wgEncodeSydhNsome_%sSig.npy under %s' %(chrLong,cell_line,chrbased_npy_array_path))
print('Downloading %s_signal_%s.npy under %s' % (
chrLong, nucleosome_filename_wo_extension, chrbased_npy_array_path))
# wget -c Continue getting a partially-downloaded file
# wget -nc If a file is downloaded more than once in the same directory, the local file will be clobbered, or overwritten
# cmd="bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd -P ' + chrombased_npy_path + ' ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/lib/nucleosome/chrbased/' + filename + "'"
cmd = "bash -c '" + 'wget -r -l1 -c -nc --no-parent -nd ftp://alexandrovlab-ftp.ucsd.edu/pub/tools/SigProfilerTopography/lib/nucleosome/chrbased/' + filename + "'"
# print(cmd)
os.system(cmd)
except:
# print("The UCSD ftp site is not responding...pulling from sanger ftp now.")
print("The UCSD ftp site is not responding...")
else:
#It has to be an absolute path
print('%s is not an absolute path.' %(chrombased_npy_path))
#go back
os.chdir(current_abs_path)
#######################################################
def install_default_nucleosome(genome):
chromSizesDict = getChromSizesDict(genome)
chromNamesList = list(chromSizesDict.keys())
if genome==MM10:
#Case1: File is not set, Biosample is not set
nucleosome_biosample = MEF
nucleosome_file = MM10_MEF_NUCLEOSOME_FILE
check_download_chrbased_npy_nuclesome_files(nucleosome_file, chromNamesList)
elif genome == GRCh37:
# Case1: File is not set, Biosample is not set
nucleosome_biosample = K562
nucleosome_file = K562_NUCLEOSOME_OCCUPANCY_FILE
# nucleosome_biosample = GM12878
# nucleosome_file = GM12878_NUCLEOSOME_OCCUPANCY_FILE
check_download_chrbased_npy_nuclesome_files(nucleosome_file, chromNamesList)
def install_default_atac_seq(genome):
chromSizesDict = getChromSizesDict(genome)
chromNamesList = list(chromSizesDict.keys())
if genome==GRCh37:
atac_seq_file = DEFAULT_ATAC_SEQ_OCCUPANCY_FILE
check_download_chrbased_npy_atac_seq_files(atac_seq_file,chromNamesList)
def install_sample_vcf_files():
# Download to where the SigProfilerTopography is run
check_download_sample_vcf_files()
def install_sample_probability_files():
# Download to where the SigProfilerTopography is run
check_download_sample_probability_files()
#######################################################
#For Skin-Melanoma USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM_SPLIT is better
#For others USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM is better
def runOccupancyAnalyses(genome,
outputDir,
jobname,
numofSimulations,
job_tuples,
sample_based,
library_file_with_path,
library_file_memo,
chromSizesDict,
chromNamesList,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures_with_cutoffs_array,
ordered_dbs_signatures_with_cutoffs_array,
ordered_id_signatures_with_cutoffs_array,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
computation_type,
occupancy_type,
occupancy_calculation_type,
plusorMinus,
remove_outliers,
quantileValue,
is_discreet,
verbose):
#######################################################################
if (os.path.basename(library_file_with_path) not in SIGPROFILERTOPOGRAPHY_DEFAULT_FILES) and (not os.path.exists(library_file_with_path)):
print('There is no such file under %s' %(library_file_with_path))
#######################################################################
# computation_type = USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM
# computation_type =USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM_SPLIT
occupancyAnalysis(genome,
computation_type,
occupancy_type,
occupancy_calculation_type,
sample_based,
plusorMinus,
chromSizesDict,
chromNamesList,
outputDir,
jobname,
numofSimulations,
job_tuples,
library_file_with_path,
library_file_memo,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures_with_cutoffs_array,
ordered_dbs_signatures_with_cutoffs_array,
ordered_id_signatures_with_cutoffs_array,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
remove_outliers,
quantileValue,
is_discreet,
verbose)
#######################################################
#######################################################
def runReplicationTimeAnalysis(genome,
outputDir,
jobname,
numofSimulations,
job_tuples,
sample_based,
replicationTimeFilename,
chromSizesDict,
chromNamesList,
computation_type,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures_with_cutoffs,
ordered_dbs_signatures_with_cutoffs,
ordered_id_signatures_with_cutoffs,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
is_discreet,
verbose,
matrix_generator_path):
# Fill np array during runtime managed by replication_time_np_arrays_fill_runtime=True
# Supported computation types
# computation_type= USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM
# computation_type =USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM_SPLIT
replicationTimeAnalysis(computation_type,
sample_based,
genome,
chromSizesDict,
chromNamesList,
outputDir,
jobname,
numofSimulations,
job_tuples,
replicationTimeFilename,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures_with_cutoffs,
ordered_dbs_signatures_with_cutoffs,
ordered_id_signatures_with_cutoffs,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
is_discreet,
verbose,
matrix_generator_path)
###############################################
#######################################################
#######################################################
def runReplicationStrandBiasAnalysis(outputDir,
jobname,
numofSimulations,
job_tuples,
sample_based,
all_samples_np_array,
replicationTimeFilename,
replicationTimeValleyFilename,
replicationTimePeakFilename,
chromSizesDict,
chromNamesList,
computation_type,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures,
ordered_dbs_signatures,
ordered_id_signatures,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
is_discreet,
verbose):
os.makedirs(os.path.join(outputDir,jobname,DATA,REPLICATIONSTRANDBIAS),exist_ok=True)
smoothedWaveletRepliseqDataFilename = replicationTimeFilename
valleysBEDFilename = replicationTimeValleyFilename
peaksBEDFilename = replicationTimePeakFilename
# Supported computation types
# computation_type= USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM
# computation_type =USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM_SPLIT
replicationStrandBiasAnalysis(outputDir,
jobname,
numofSimulations,
job_tuples,
sample_based,
all_samples_np_array,
chromSizesDict,
chromNamesList,
computation_type,
smoothedWaveletRepliseqDataFilename,
valleysBEDFilename,
peaksBEDFilename,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures,
ordered_dbs_signatures,
ordered_id_signatures,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
is_discreet,
verbose)
###############################################
#######################################################
#######################################################
def runTranscriptionStradBiasAnalysis(outputDir,
jobname,
numofSimulations,
job_tuples,
sample_based,
all_samples_np_array,
chromNamesList,
computation_type,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures,
ordered_dbs_signatures,
ordered_id_signatures,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
is_discreet,
verbose):
os.makedirs(os.path.join(outputDir,jobname,DATA,TRANSCRIPTIONSTRANDBIAS),exist_ok=True)
# Supported computation types
# computation_type= USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM
# computation_type =USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM_SPLIT
transcriptionStrandBiasAnalysis(outputDir,
jobname,
numofSimulations,
job_tuples,
sample_based,
all_samples_np_array,
computation_type,
chromNamesList,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures,
ordered_dbs_signatures,
ordered_id_signatures,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
is_discreet,
verbose)
###############################################
#######################################################
#######################################################
def runProcessivityAnalysis(mutation_types_contexts,
outputDir,
jobname,
numofSimulations,
chromNamesList,
processivity_calculation_type,
inter_mutational_distance_for_processivity,
subsSignature_cutoff_numberofmutations_averageprobability_df,
verbose):
os.makedirs(os.path.join(outputDir,jobname,DATA,PROCESSIVITY),exist_ok=True)
#Internally Set
considerProbabilityInProcessivityAnalysis = True
processivityAnalysis(mutation_types_contexts,
chromNamesList,
processivity_calculation_type,
inter_mutational_distance_for_processivity,
outputDir,
jobname,
numofSimulations,
considerProbabilityInProcessivityAnalysis,
subsSignature_cutoff_numberofmutations_averageprobability_df,
verbose)
###############################################
#######################################################
#######################################################
def deleteOldData(outputDir,jobname,occupancy_type):
#############################################
# Delete the output/jobname/DATA/occupancy_type if exists
jobnamePath = os.path.join(outputDir,jobname,DATA,occupancy_type)
################################################
if (os.path.exists(jobnamePath)):
try:
shutil.rmtree(jobnamePath)
except OSError as e:
print('Error: %s - %s.' % (e.filename, e.strerror))
################################################
#######################################################
#######################################################
def deleteOldFigures(outputDir, jobname, occupancy_type):
jobnamePath = os.path.join(outputDir, jobname, FIGURE, occupancy_type)
print('Topography.py jobnamePath:%s ' %jobnamePath)
############################################################
if (os.path.exists(jobnamePath)):
try:
shutil.rmtree(jobnamePath)
except OSError as e:
print('Error: %s - %s.' % (e.filename, e.strerror))
############################################################
#######################################################
# Depreceated.
# We assume that simulated data will have the same number_of_splits as the real data
def get_job_tuples(chrlong_numberofmutations_df,numofSimulations):
job_tuples = []
sim_nums = range(0, numofSimulations + 1)
for chrLong in chrlong_numberofmutations_df['chrLong'].unique():
number_of_mutations=int(chrlong_numberofmutations_df[chrlong_numberofmutations_df['chrLong']==chrLong]['number_of_mutations'].values[0])
number_of_splits = math.ceil(number_of_mutations / NUMBER_OF_MUTATIONS_IN_EACH_SPLIT)
split_indexes = range(0, number_of_splits)
###############################################################
for sim_num in sim_nums:
for split_index in split_indexes:
job_tuples.append((chrLong, sim_num, split_index))
###############################################################
return job_tuples
def get_all_signatures_array(ordered_all_sbs_signatures_wrt_probabilities_file_array, signature_starts_with):
ordered_all_sbs_signatures = []
if ordered_all_sbs_signatures_wrt_probabilities_file_array is not None:
for i in ordered_all_sbs_signatures_wrt_probabilities_file_array:
if i.startswith(signature_starts_with):
ordered_all_sbs_signatures.append(i)
return np.array(ordered_all_sbs_signatures)
#######################################################
# inputDir ='/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/input_for_matgen/BreastCancer560_subs_indels_dinucs'
# outputDir = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/output_test/'
# jobname = 'BreastCancer560'
#Run SigProfilerTopography Analyses
#Former full path now only the filename with extension
# nucleosomeOccupancy = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/lib/nucleosome/wgEncodeSydhNsomeGm12878Sig.wig'
# replicationSignal = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/lib/replication/GSM923442_hg19_wgEncodeUwRepliSeqMcf7WaveSignalRep1.wig'
# replicationValley = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/lib/replication/GSM923442_hg19_wgEncodeUwRepliSeqMcf7ValleysRep1.bed'
# replicationPeak = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/lib/replication/GSM923442_hg19_wgEncodeUwRepliSeqMcf7PkRep1.bed'
# subs_probabilities_file_path = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/output/560_BRCA_WGS_DINUCS/SBS96/Suggested_Solution/Decomposed_Solution/Mutation_Probabilities.txt'
# indels_probabilities_file_path = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/output/560_BRCA_WGS_DINUCS/ID83/Suggested_Solution/Decomposed_Solution/Mutation_Probabilities.txt'
# dinucs_probabilities_file_path = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/output/560_BRCA_WGS_DINUCS/DBS78/Suggested_Solution/Decomposed_Solution/Mutation_Probabilities.txt'
def runAnalyses(genome,
inputDir,
outputDir,
jobname,
numofSimulations,
sbs_probabilities = None,
dbs_probabilities = None,
id_probabilities = None,
mutation_types_contexts = None,
mutation_types_contexts_for_signature_probabilities = None,
epigenomics_files = None,
epigenomics_files_memos = None,
epigenomics_biosamples = None,
epigenomics_dna_elements = None,
epigenomics_dir_name = None,
nucleosome_biosample = None,
nucleosome_file = None,
replication_time_biosample = None,
replication_time_signal_file = None,
replication_time_valley_file = None,
replication_time_peak_file = None,
computation_type = USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM,
epigenomics = False,
nucleosome = False,
replication_time = False,
strand_bias = False,
replication_strand_bias = False,
transcription_strand_bias = False,
processivity = False,
sample_based = False,
plot_figures = True,
step1_sim_data = True,
step2_matgen_data = True,
step3_prob_merged_data = True,
step4_tables = True,
is_discreet = True,
average_probability = DEFAULT_AVERAGE_PROBABILITY,
num_of_sbs_required = DEFAULT_NUM_OF_SBS_REQUIRED,
num_of_dbs_required = DEFAULT_NUM_OF_DBS_REQUIRED,
num_of_id_required = DEFAULT_NUM_OF_ID_REQUIRED,
plusorMinus_epigenomics = 1000,
plusorMinus_nucleosome = 1000,
epigenomics_heatmap_significance_level = 0.01,
verbose = False,
matrix_generator_path = MATRIX_GENERATOR_PATH,
PCAWG = False,
plot_epigenomics = False,
plot_nucleosome = False,
plot_replication_time = False,
plot_strand_bias = False,
plot_replication_strand_bias = False,
plot_transcription_strand_bias = False,
plot_processivity = False,
remove_outliers = False,
quantileValue = 0.97,
delete_old = False,
plot_mode = PLOTTING_FOR_SIGPROFILERTOPOGRAPHY_TOOL,
occupancy_calculation_type = MISSING_SIGNAL,
processivity_calculation_type = CONSIDER_DISTANCE,
inter_mutational_distance_for_processivity = 10000,
combine_p_values_method = COMBINE_P_VALUES_METHOD_FISHER,
fold_change_window_size = 100,
num_of_real_data_avg_overlap = DEFAULT_NUM_OF_REAL_DATA_OVERLAP_REQUIRED):
current_abs_path = os.path.dirname(os.path.realpath(__file__))
chromSizesDict = getChromSizesDict(genome)
chromNamesList = list(chromSizesDict.keys())
chromShortNamesList=getShortNames(chromNamesList)
# Filled in Step3
# contains all the columns in order w.r.t. probabilities file
ordered_all_sbs_signatures_wrt_probabilities_file_array = None
ordered_all_dbs_signatures_wrt_probabilities_file_array = None
ordered_all_id_signatures_wrt_probabilities_file_array = None
###################################################
if mutation_types_contexts is None:
mutation_types_contexts=[]
if (sbs_probabilities is not None):
mutation_types_contexts.append(SBS96)
if (id_probabilities is not None):
mutation_types_contexts.append(ID)
if (dbs_probabilities is not None):
mutation_types_contexts.append(DBS)
# If still None
if mutation_types_contexts is None:
print('--- There is a situation/problem: mutation_types_contexts is None.')
print('--- mutation_types_contexts has to be set before SigProfilerTopography run.')
if mutation_types_contexts_for_signature_probabilities is None:
mutation_types_contexts_for_signature_probabilities=mutation_types_contexts
###################################################
###################################################
if step1_sim_data:
step2_matgen_data = True
step3_prob_merged_data = True
step4_tables = True
elif step2_matgen_data:
step3_prob_merged_data = True
step4_tables = True
elif step3_prob_merged_data:
step4_tables = True
###################################################
###################################################
if (average_probability!=DEFAULT_AVERAGE_PROBABILITY) or \
(num_of_sbs_required!=DEFAULT_NUM_OF_SBS_REQUIRED) or \
(num_of_dbs_required!=DEFAULT_NUM_OF_DBS_REQUIRED) or \
(num_of_id_required!=DEFAULT_NUM_OF_ID_REQUIRED):
step4_tables = True
###################################################
#################################################################################
################################## Setting starts ###############################
################## Set full path library files starts ###########################
#################################################################################
if genome is None:
print('Parameter genome:%s must be set for SigProfilerTopography Analysis.' %(genome))
###############################################
if strand_bias:
replication_strand_bias=True
transcription_strand_bias=True
if plot_strand_bias:
plot_replication_strand_bias=True
plot_transcription_strand_bias=True
###############################################
###############################################
# We need full path of the library files
if (genome==GRCh37) and (epigenomics_files==None):
epigenomics_files = [DEFAULT_ATAC_SEQ_OCCUPANCY_FILE,
DEFAULT_H3K27ME3_OCCUPANCY_FILE,
DEFAULT_H3K36ME3_OCCUPANCY_FILE,
DEFAULT_H3K9ME3_OCCUPANCY_FILE,
DEFAULT_H3K27AC_OCCUPANCY_FILE,
DEFAULT_H3K4ME1_OCCUPANCY_FILE,
DEFAULT_H3K4ME3_OCCUPANCY_FILE,
DEFAULT_CTCF_OCCUPANCY_FILE]
epigenomics_files_memos=[]
for epigenomics_file in epigenomics_files:
epigenomics_files_memos.append(os.path.splitext(os.path.basename(epigenomics_file))[0])
# Defines columns in the heatmap
# These strings must be within filenames (without file extension)
# Order is not important
epigenomics_dna_elements = ['H3K27me3', 'H3K36me3', 'H3K9me3', 'H3K27ac', 'H3K4me1', 'H3K4me3', 'CTCF', 'ATAC']
# Defines rows in the detailed heatmap
# These strings must be within filenames (without file extension)
# Order is not important
epigenomics_biosamples = ['breast_epithelium']
for file_index, filename in enumerate(epigenomics_files):
epigenomics_files[file_index] = os.path.join(current_abs_path, LIB, EPIGENOMICS, filename)
# These must be under epigenomics under installed SigPofilerTopography
elif (genome == MM10) and (epigenomics_files == None):
epigenomics_files = [ENCFF575PMI_mm10_embryonic_facial_prominence_ATAC_seq,
ENCFF993SRY_mm10_embryonic_fibroblast_H3K4me1,
ENCFF912DNP_mm10_embryonic_fibroblast_H3K4me3,
ENCFF611HDQ_mm10_embryonic_fibroblast_CTCF,
ENCFF152DUV_mm10_embryonic_fibroblast_POLR2A,
ENCFF114VLZ_mm10_embryonic_fibroblast_H3K27ac]
epigenomics_files_memos = []
for epigenomics_file in epigenomics_files:
epigenomics_files_memos.append(os.path.splitext(os.path.basename(epigenomics_file))[0])
# Defines columns in the heatmap
# These strings must be within filenames (without file extension)
# Order is not important
epigenomics_dna_elements = ['ATAC', 'H3K4me1', 'H3K4me3', 'CTCF', 'POLR2A', 'H3K27ac']
# Defines rows in the detailed heatmap
# These strings must be within filenames (without file extension)
# Order is not important
epigenomics_biosamples = ['embryonic_fibroblast']
for file_index, filename in enumerate(epigenomics_files):
epigenomics_files[file_index] = os.path.join(current_abs_path, LIB, EPIGENOMICS, filename)
###############################################
###############################################
if genome==MM10:
#Case1: File is not set, Biosample is not set
if (nucleosome_file is None) and (nucleosome_biosample is None):
nucleosome_biosample = MEF
nucleosome_file = getNucleosomeFile(nucleosome_biosample)
#Case2: File is not set, Biosample is set
elif (nucleosome_file is None) and (nucleosome_biosample is not None):
if (nucleosome_biosample in available_nucleosome_biosamples):
#Sets the filename without the full path
nucleosome_file = getNucleosomeFile(nucleosome_biosample)
#Case3: nucleosome_file is a filename with fullpath (User provided) , biosample is not set
elif ((nucleosome_file is not None) and (nucleosome_biosample is None)):
# We expect that user has provided nucleosome file with full path
nucleosome_biosample = UNDECLARED
#Case4: nucleosome_file is a filename with fullpath (User provided), biosample is set
#Do nothing use as it is
elif genome==GRCh37:
#Case1: File is not set, Biosample is not set
if (nucleosome_file is None) and (nucleosome_biosample is None):
nucleosome_biosample = K562
nucleosome_file = getNucleosomeFile(nucleosome_biosample)
#Case2: File is not set, Biosample is set
elif (nucleosome_file is None) and (nucleosome_biosample is not None):
if (nucleosome_biosample in available_nucleosome_biosamples):
#Sets the filename without the full path
nucleosome_file = getNucleosomeFile(nucleosome_biosample)
#Case3: nucleosome_file is a filename with fullpath (User provided) , biosample is not set
elif ((nucleosome_file is not None) and (nucleosome_biosample is None)):
# We expect that user has provided nucleosome file with full path
nucleosome_biosample = UNDECLARED
#Case4: nucleosome_file is a filename with fullpath (User provided), biosample is set
#Do nothing use as it is
###############################################
###############################################
if genome==MM10:
# Case1: Files are not set, Biosample is not set
if (replication_time_signal_file is None) and (replication_time_valley_file is None) and (replication_time_peak_file is None) and (replication_time_biosample is None):
replication_time_biosample=MEF
#We only set replication_time_signal_file
# replication_time_valley_file is None
# replication_time_peak_file is None
replication_time_signal_file, replication_time_valley_file,replication_time_peak_file=getReplicationTimeFiles(replication_time_biosample)
elif genome==GRCh37:
# We need full path of the library files
# By default replication_time_biosample=MCF7 and signal, valley, peak files are None
# Case1: Files are not set, Biosample is not set
if (replication_time_signal_file is None) and (replication_time_valley_file is None) and (replication_time_peak_file is None) and (replication_time_biosample is None):
replication_time_biosample=MCF7
replication_time_signal_file, replication_time_valley_file,replication_time_peak_file=getReplicationTimeFiles(replication_time_biosample)
if (replication_time or replication_strand_bias):
# For using SigProfilerTopography Provided Replication Time Files
check_download_replication_time_files(replication_time_signal_file, replication_time_valley_file,replication_time_peak_file)
#Case2: Files are not set, Biosample is set
elif (replication_time_signal_file is None) and (replication_time_valley_file is None) and (replication_time_peak_file is None) and (replication_time_biosample is not None):
if (replication_time_biosample in available_replication_time_biosamples):
replication_time_signal_file, replication_time_valley_file, replication_time_peak_file = getReplicationTimeFiles(replication_time_biosample)
if (replication_time or replication_strand_bias):
# For using SigProfilerTopography Provided Replication Time Files
check_download_replication_time_files(replication_time_signal_file, replication_time_valley_file,replication_time_peak_file)
#Case3: nucleosome_file is a filename with fullpath (User provided) , biosample is not set
elif ((replication_time_signal_file is not None) or (replication_time_valley_file is not None) or (replication_time_peak_file is not None)) and (replication_time_biosample is None):
replication_time_biosample = UNDECLARED
#Case4: Files are set. Biosample is set. Use as it is. Do nothing.
###############################################
###############################################
# data files are named using user provided epigenomics_files_memos or using epigenomics_file_memos_created
epigenomics_file_memos_created = []
# Run for each epigenomics file
if (epigenomics_files_memos is None) or (len(epigenomics_files_memos) != len(epigenomics_files)):
for idx, epigenomics_file in enumerate(epigenomics_files):
epigenomics_file_memo = os.path.splitext(os.path.basename(epigenomics_file))[0]
epigenomics_file_memos_created.append(epigenomics_file_memo)
# Used for plotting
if (epigenomics_files_memos is None) or (len(epigenomics_files_memos) != len(epigenomics_files)):
epigenomics_files_memos = epigenomics_file_memos_created
if (epigenomics_biosamples is None) or (len(epigenomics_biosamples) == 0):
epigenomics_biosamples = [UNDECLARED]
###############################################
#################################################################################
################## Set full path library files ends #############################
################################## Setting ends #################################
#################################################################################
print('#################################################################################')
# print('--- %s' %platform.platform())
# print('--- %s' %platform.system())
#print("--- Operating System: %s" %(platform.uname()[0]))
print("--- SigProfilerTopography starts")
print('#################################################################################')
print('#################################################################################')
print("--- Operating System: %s" %(platform.platform()))
print("--- Release: %s" %platform.uname()[2])
print("--- Version: %s" %platform.uname()[3])
print("--- Nodename: %s" %platform.uname()[1])
print('#################################################################################')
print('#################################################################################')
print("--- Python and Package Versions")
print("--- Python Version: %s" %(str(platform.sys.version_info.major) + "." + str(platform.sys.version_info.minor) + "." + str(platform.sys.version_info.micro)))
print('--- SigProfilerTopography Version:%s' % topography_version.version)
print("--- SigProfilerMatrixGenerator Version: %s" %matrix_generator_version.version)
print("--- SigProfilerSimulator version: %s" %simulator_version.version)
print("--- pandas version: %s" %pd.__version__)
print("--- numpy version: %s" %np.__version__)
print("--- statsmodels version: %s" %statsmodels.__version__)
print("--- scipy version: %s" %scipy.__version__)
print("--- matplotlib version: %s" %plt.__version__)
print('#################################################################################\n')
print('#################################################################################')
print('--- SigProfilerTopography parameters')
print('--- Genome: %s' %(genome))
print('--- inputDir:%s' %inputDir)
print('--- outputDir:%s' %outputDir)
print('--- jobname:%s' %jobname)
if (sbs_probabilities is not None):
print('--- sbs_probabilities:%s' %sbs_probabilities)
if (dbs_probabilities is not None):
print('--- dbs_probabilities:%s' %dbs_probabilities)
if (id_probabilities is not None):
print('--- id_probabilities:%s' %id_probabilities)
print('--- numofSimulations:%d' %numofSimulations)
print('\n--- epigenomics_files:%s' %epigenomics_files)
print('--- epigenomics_files_memos:%s' %epigenomics_files_memos)
print('--- epigenomics_biosamples:%s' %epigenomics_biosamples)
print('--- epigenomics_dna_elements:%s' %epigenomics_dna_elements)
print('--- number of epigenomics_files:%d' %len(epigenomics_files))
print('\n--- nucleosome_biosample:%s' %nucleosome_biosample)
print('--- nucleosome_file:%s' % nucleosome_file)
print('\n--- replication_time_biosample:%s' % replication_time_biosample)
print('--- replication_time_signal_file:%s' % replication_time_signal_file)
print('--- replication_time_valley_file:%s' % replication_time_valley_file)
print('--- replication_time_peak_file:%s' % replication_time_peak_file)
print('\n--- mutation_types_contexts:%s' %mutation_types_contexts)
print('--- mutation_types_contexts_for_signature_probabilities:%s' %mutation_types_contexts_for_signature_probabilities)
print('--- computation_type:%s' %computation_type)
print('--- mutation contribution is_discreet:%s\n' %is_discreet)
if sample_based:
print('--- Sample Based Analysis.')
if epigenomics:
print('--- Epigenomics Analysis.')
if nucleosome:
print('--- Nucleosome Analysis.')
if replication_time:
print('--- Replication Time Analysis.')
if (strand_bias or replication_strand_bias):
print('--- Replication Strand Bias Analysis.')
if (strand_bias or transcription_strand_bias):
print('--- Transcription Strand Bias Analysis.')
if processivity:
print('--- Processivity Analysis.')
print('--- step1_sim_data:%s' %step1_sim_data)
print('--- step2_matgen_data:%s' %step2_matgen_data)
print('--- step3_prob_merged_data:%s' %step3_prob_merged_data)
print('--- step4_tables:%s' %step4_tables)
print('--- plot_figures:%s' %plot_figures)
print('--- average mutation probability required %0.2f' %average_probability)
print('--- minimum number of sbs mutations required: %d' %num_of_sbs_required)
print('--- minimum number of id mutations required: %d' %num_of_id_required)
print('--- minimum number of dbs mutations required: %d' %num_of_dbs_required)
if epigenomics:
print('--- number of bases considered before and after mutation start for epigenomics analysis: %d' %plusorMinus_epigenomics)
if nucleosome:
print('--- number of bases considered before and after mutation start for nucleosome occupancy analysis: %d' %plusorMinus_nucleosome)
print('#################################################################################\n')
print('#################################################################################')
numofProcesses = multiprocessing.cpu_count()
print('--- numofProcesses for multiprocessing: %d' %numofProcesses)
print('#################################################################################\n')
#################################################################################
print('#################################################################################')
print('--- For Genome: %s' %(genome))
print('--- Chromosome names: %s' %(chromNamesList))
print('--- Chromosome short names: %s' % (chromShortNamesList))
print('--- current_abs_path: %s ' % current_abs_path)
print('#################################################################################\n')
#################################################################################
###################################################################################################################
################################################# All Steps starts ################################################
###################################################################################################################
###################################################################################################
######################### SigProfilerMatrixGenerator for original data starts #####################
###################################################################################################
if (step2_matgen_data):
# Run MatrixGenerator for original data: this call prepares chrBased input files for original data with mutation contexts
print('#################################################################################')
print('--- SigProfilerMatrixGenerator for original data')
start_time = time.time()
print('For original data inputDir:%s' % (inputDir))
matrices = matGen.SigProfilerMatrixGeneratorFunc(jobname, genome, inputDir, plot=False, seqInfo=True)
# print('matrices')
# print(matrices)
# original matrix generator chrbased data will be under inputDir/output/vcf_files/SNV
# original matrix generator chrbased data will be under inputDir/output/vcf_files/DBS
# original matrix generator chrbased data will be under inputDir/output/vcf_files/ID
print("--- SigProfilerMatrixGenerator for original data: %s seconds ---" % (time.time() - start_time))
print("--- SigProfilerMatrixGenerator for original data: %f minutess ---" % float((time.time() - start_time) / 60))
print('#################################################################################\n')
###################################################################################################
######################### SigProfilerMatrixGenerator for original data ends #######################
###################################################################################################
###################################################################################################################
################################## Step1 Simulations if any starts ################################################
###################################################################################################################
if ((numofSimulations > 0) and (step1_sim_data)):
###################################################################################################
############################ SigProfilerSimulator for n simulations starts #######################
###################################################################################################
print('#################################################################################')
print('--- SigProfilerSimulator for %d simulations starts' %(numofSimulations))
start_time = time.time()
#Call SigProfilerSimulator separately for each mutation type context otherwise it counts DBS mutations also in SBS mutations
# Topography uses same mutation types with Simulator
# Acceptable contexts for Simulator include {'96', '384', '1536', '6144', 'DBS', 'ID', 'ID415'}.
# '96' or '384' for single base substitutions (Simulator 1536, or 3072)
# 'DBS' for double base substitutions
# 'ID' for indels
for mutation_type_context in mutation_types_contexts:
mutation_type_context_for_simulator = []
mutation_type_context_for_simulator.append(mutation_type_context)
# Please notice that Simulator reverse the given input mutationTypes_for_simulator
print('--- SigProfilerSimulator is running for %s' %(mutation_type_context))
simulator.SigProfilerSimulator(jobname, inputDir, genome, mutation_type_context_for_simulator,simulations=numofSimulations,chrom_based=True, gender='male')
print("--- SigProfilerSimulator for %d simulations: %s seconds" %(numofSimulations,(time.time() - start_time)))
print("--- SigProfilerSimulator for %d simulations: %f minutes" %(numofSimulations,float((time.time()-start_time)/60)))
print('--- SigProfilerSimulator for %d simulations ends' %(numofSimulations))
print('#################################################################################\n')
###################################################################################################
############################ SigProfilerSimulator for n simulations ends #########################
###################################################################################################
###################################################################################################################
################################## Step1 Simulations if any ends ##################################################
###################################################################################################################
###################################################################################################################
################################## Step2 Matrix Generator for n simulations starts ################################
###################################################################################################################
if (step2_matgen_data):
if (numofSimulations > 0):
###################################################################################################
########################### Create simN directories for MatrixGenerator starts ####################
###################################################################################################
print('#################################################################################')
print('--- Create directories for %d simulations under %s/output/simulations/' %(numofSimulations,inputDir))
start_time = time.time()
#Create directories sim1 to SimN under inputDir/output/simulations/
access_rights = 0o755
for simNum in range(1,numofSimulations+1):
try:
simName = 'sim%d' %(simNum)
simDir = os.path.join(inputDir,'output','simulations',simName)
if (not os.path.exists(simDir)):
os.mkdir(simDir, access_rights)
for mutation_type_context in mutation_types_contexts:
simDir = os.path.join(inputDir,'output','simulations',simName,mutation_type_context)
if (not os.path.exists(simDir)):
os.mkdir(simDir, access_rights)
except OSError:
print("Creation of the directory %s failed" %simDir)
# else:
# print("Successfully created the directory %s" %simDir)
for mutation_type_context in mutation_types_contexts:
# Simulator creates one maf file for each simulation for each mutation context
# Simulator creates maf files under inputDir/output/simulations/jobname_simulations_GRCh37_96
# Simulator creates maf files under inputDir/output/simulations/jobname_simulations_GRCh37_ID
# Simulator creates maf files under inputDir/output/simulations/jobname_simulations_GRCh37_DBS
dirName = '%s_simulations_%s_%s' %(jobname, genome,mutation_type_context)
copyFromDir = os.path.join(inputDir,'output','simulations',dirName)
copyToMainDir= os.path.join(inputDir,'output','simulations')
# Topography copies these maf files to inputDir/output/simulations/simX/mutation_type_context/X.maf
# So that, in the next step MatrixGenerator can create chrom based seqinfo text files for each X.maf file
copyMafFiles(copyFromDir,copyToMainDir,mutation_type_context,numofSimulations)
print("--- Create directories and copy files: %s seconds ---" %(time.time()-start_time))
print("--- Create directories and copy files: %f minutes ---" %(float((time.time()-start_time)/60)))
print('#################################################################################\n')
###################################################################################################
########################### Create simN directories for MatrixGenerator ends ######################
###################################################################################################
###################################################################################################
#Important note: Separate directory creation is necessary for Matrix Generator
#inputDir/output/simulations/simX/96/X.maf
#inputDir/output/simulations/simX/ID/X.maf
#inputDir/output/simulations/simX/DBS/X.maf
#enables MatrixGenerator to create chr based simulated data files under
#sim1 matrix generator chrbased data will be under inputDir/output/simulations/simX/96/output/vcf_files/SNV
#sim1 matrix generator chrbased data will be under inputDir/output/simulations/simX/ID/output/vcf_files/ID
#sim1 matrix generator chrbased data will be under inputDir/output/simulations/simX/DBS/output/vcf_files/DBS
#otherwise all simulations maf files will be under
#inputDir/output/simulations/Skin-Melanoma_simulations_GRCh37_96
#inputDir/output/simulations/Skin-Melanoma_simulations_GRCh37_DBS
#inputDir/output/simulations/Skin-Melanoma_simulations_GRCh37_ID
#Then running MatrixGenerator for each simulation will not be possible.
###################################################################################################
###################################################################################################
####################### Run MatrixGenerator for each simulation starts ############################
###################################################################################################
print('#################################################################################')
print('--- Run SigProfilerMatrixGenerator for each simulation starts')
start_time = time.time()
for simNum in range(1,numofSimulations+1):
simName = 'sim%d' %(simNum)
#For each simulation we are calling matrix generator separately for each mutation type context
print('--- SigProfilerMatrixGenerator is run for %s starts' %(simName))
for mutation_type_context in mutation_types_contexts:
simInputDir= os.path.join(inputDir,'output','simulations',simName,mutation_type_context)
print('For %s: %s simInputDir:%s' %(mutation_type_context,simName,simInputDir))
matrices = matGen.SigProfilerMatrixGeneratorFunc(jobname,genome,simInputDir,plot=False, seqInfo=True)
# print('matrices')
# print(matrices)
print('#####################################')
print('--- SigProfilerMatrixGenerator is run for %s ends\n' % (simName))
#sim1 matrix generator chrbased data will be under inputDir/output/simulations/sim1/96/output/vcf_files/SNV
#sim1 matrix generator chrbased data will be under inputDir/output/simulations/sim1/ID/output/vcf_files/ID
#sim1 matrix generator chrbased data will be under inputDir/output/simulations/sim1/DBS/output/vcf_files/DBS
#simN matrix generator chrbased data will be under inputDir/output/simulations/simN/96/output/vcf_files/SNV
#simN matrix generator chrbased data will be under inputDir/output/simulations/simN/ID/output/vcf_files/ID
#simN matrix generator chrbased data will be under inputDir/output/simulations/simN/DBS/output/vcf_files/DBS
print("--- Run MatrixGenerator for each simulation: %s seconds" %(time.time()-start_time))
print("--- Run MatrixGenerator for each simulation: %f minutes" %(float((time.time()-start_time)/60)))
print('--- Run SigProfilerMatrixGenerator for each simulation ends')
print('#################################################################################\n')
###################################################################################################
####################### Run MatrixGenerator for each simulation ends ##############################
###################################################################################################
###################################################################################################################
################################## Step2 Matrix Generator for n simulations ends ##################################
###################################################################################################################
###################################################################################################################
########### Step3 Merge chrom based matrix generator generated files with probabilities starts ####################
###################################################################################################################
if (step3_prob_merged_data):
####################################################################################################################
################## Merge original chr based files with Mutation Probabilities starts ##############################
####################################################################################################################
print('#################################################################################')
print('--- Merge original chr based files with Mutation Probabilities starts')
print('#################################################################################')
startSimNum = 0
endSimNum = 0
start_time = time.time()
# SBS
for mutation_type_context in mutation_types_contexts:
# if (mutation_type_context in SBS_CONTEXTS) and (sbs_probabilities is not None):
if (mutation_type_context in SBS_CONTEXTS):
mutation_type_context_for_probabilities = get_mutation_type_context_for_probabilities_file(mutation_types_contexts_for_signature_probabilities,SUBS)
print('--- Merge %s context mutations with probabilities for %s' % (mutation_type_context, sbs_probabilities))
ordered_all_sbs_signatures_wrt_probabilities_file_array = prepareMutationsDataAfterMatrixGenerationAndExtractorForTopography(chromShortNamesList,
inputDir,
outputDir,
jobname,
mutation_type_context,
sbs_probabilities,
mutation_type_context_for_probabilities,
startSimNum,
endSimNum,
SNV,
PCAWG,
verbose)
# ID
# if ((ID in mutation_types_contexts) and (id_probabilities is not None)):
if (ID in mutation_types_contexts):
mutation_type_context_for_probabilities = get_mutation_type_context_for_probabilities_file(mutation_types_contexts_for_signature_probabilities, INDELS)
print('--- Merge %s mutations with probabilities for %s' % (ID, id_probabilities))
ordered_all_id_signatures_wrt_probabilities_file_array = prepareMutationsDataAfterMatrixGenerationAndExtractorForTopography(chromShortNamesList,
inputDir,
outputDir,
jobname,
ID,
id_probabilities,
mutation_type_context_for_probabilities,
startSimNum,
endSimNum,
ID,
PCAWG,
verbose)
# DBS
# if ((DBS in mutation_types_contexts) and (dbs_probabilities is not None)):
if (DBS in mutation_types_contexts):
mutation_type_context_for_probabilities = get_mutation_type_context_for_probabilities_file(mutation_types_contexts_for_signature_probabilities, DINUCS)
print('--- Merge %s mutations with probabilities for %s' % (DBS, dbs_probabilities))
ordered_all_dbs_signatures_wrt_probabilities_file_array = prepareMutationsDataAfterMatrixGenerationAndExtractorForTopography(chromShortNamesList,
inputDir,
outputDir,
jobname,
DBS,
dbs_probabilities,
mutation_type_context_for_probabilities,
startSimNum,
endSimNum,
DBS,
PCAWG,
verbose)
print("--- Merge original chr based files with Mutation Probabilities: %s seconds" % (time.time() - start_time))
print("--- Merge original chr based files with Mutation Probabilities: %f minutes" % (float((time.time() - start_time) / 60)))
print('--- Merge original chr based files with Mutation Probabilities ends')
print('#################################################################################\n')
####################################################################################################################
################## Merge original chr based files with Mutation Probabilities ends ################################
####################################################################################################################
####################################################################################################################
################## Merge simulations chr based files with Mutation Probabilities starts ###########################
####################################################################################################################
if (numofSimulations > 0):
print('#################################################################################')
print('--- Merge simulations chr based files with Mutation Probabilities starts')
print('#################################################################################')
startSimNum=1
endSimNum=numofSimulations
start_time = time.time()
# SBS
for mutation_type_context in mutation_types_contexts:
# if (mutation_type_context in SBS_CONTEXTS) and (sbs_probabilities is not None):
if (mutation_type_context in SBS_CONTEXTS):
mutation_type_context_for_probabilities = get_mutation_type_context_for_probabilities_file(mutation_types_contexts_for_signature_probabilities, SUBS)
print('--- Merge %s mutations with probabilities for %s' %(mutation_type_context,sbs_probabilities))
prepareMutationsDataAfterMatrixGenerationAndExtractorForTopography(chromShortNamesList,inputDir,outputDir,jobname,mutation_type_context,sbs_probabilities,mutation_type_context_for_probabilities,startSimNum,endSimNum,'SNV',PCAWG,verbose)
# ID
# if ((ID in mutation_types_contexts) and (id_probabilities is not None)):
if (ID in mutation_types_contexts):
mutation_type_context_for_probabilities = get_mutation_type_context_for_probabilities_file(mutation_types_contexts_for_signature_probabilities, ID)
print('--- Merge %s mutations with probabilities for %s' % (ID, id_probabilities))
prepareMutationsDataAfterMatrixGenerationAndExtractorForTopography(chromShortNamesList,inputDir,outputDir,jobname,'ID',id_probabilities,mutation_type_context_for_probabilities,startSimNum,endSimNum,'ID',PCAWG,verbose)
# DBS
# if ((DBS in mutation_types_contexts) and (dbs_probabilities is not None)):
if (DBS in mutation_types_contexts):
mutation_type_context_for_probabilities = get_mutation_type_context_for_probabilities_file(mutation_types_contexts_for_signature_probabilities, DBS)
print('--- Merge %s mutations with probabilities for %s' % (DBS,dbs_probabilities))
prepareMutationsDataAfterMatrixGenerationAndExtractorForTopography(chromShortNamesList,inputDir,outputDir,jobname,'DBS',dbs_probabilities,mutation_type_context_for_probabilities,startSimNum,endSimNum,'DBS',PCAWG,verbose)
print("--- Merge simulations chr based files with Mutation Probabilities: %s seconds" %(time.time()-start_time))
print("--- Merge simulations chr based files with Mutation Probabilities: %f minutes" %(float((time.time()-start_time)/60)))
print('--- Merge simulations chr based files with Mutation Probabilities ends')
print('#################################################################################\n')
####################################################################################################################
################## Merge simulations chr based files with Mutation Probabilities ends #############################
####################################################################################################################
else:
for mutation_type_context in mutation_types_contexts:
if (mutation_type_context in SBS_CONTEXTS):
if ((sbs_probabilities is not None) and (os.path.exists(sbs_probabilities))):
ordered_all_sbs_signatures_wrt_probabilities_file_array = pd.read_csv(sbs_probabilities, sep='\t', nrows=0).columns.values
else:
filename = '%s_%s_for_topography.txt' % ('chr1', SUBS)
chrBasedMutationDFFilePath = os.path.join(outputDir, jobname, DATA, CHRBASED, filename)
if os.path.exists(chrBasedMutationDFFilePath):
ordered_all_sbs_signatures_wrt_probabilities_file_array = pd.read_csv(chrBasedMutationDFFilePath,sep='\t', nrows=0).columns.values
print('ordered_all_sbs_signatures_wrt_probabilities_file_array:%s' %(ordered_all_sbs_signatures_wrt_probabilities_file_array))
else:
print('There is a problem: ordered_all_sbs_signatures_wrt_probabilities_file_array is not filled.')
if (DBS in mutation_types_contexts):
if ((dbs_probabilities is not None) and (os.path.exists(dbs_probabilities))):
ordered_all_dbs_signatures_wrt_probabilities_file_array = pd.read_csv(dbs_probabilities, sep='\t', nrows=0).columns.values
else:
filename = '%s_%s_for_topography.txt' % ('chr1', DINUCS)
chrBasedMutationDFFilePath = os.path.join(outputDir, jobname, DATA, CHRBASED, filename)
if os.path.exists(chrBasedMutationDFFilePath):
ordered_all_dbs_signatures_wrt_probabilities_file_array = pd.read_csv(chrBasedMutationDFFilePath, sep='\t', nrows=0).columns.values
print('ordered_all_dbs_signatures_wrt_probabilities_file_array:%s' %(ordered_all_dbs_signatures_wrt_probabilities_file_array))
else:
print('There is a problem: ordered_all_dbs_signatures_wrt_probabilities_file_array is not filled.')
if (ID in mutation_types_contexts):
if ((id_probabilities is not None) and (os.path.exists(id_probabilities))):
ordered_all_id_signatures_wrt_probabilities_file_array = pd.read_csv(id_probabilities,sep='\t', nrows=0).columns.values
else:
filename = '%s_%s_for_topography.txt' % ('chr1', INDELS)
chrBasedMutationDFFilePath = os.path.join(outputDir, jobname, DATA, CHRBASED, filename)
if os.path.exists(chrBasedMutationDFFilePath):
ordered_all_id_signatures_wrt_probabilities_file_array = pd.read_csv(chrBasedMutationDFFilePath, sep='\t', nrows=0).columns.values
print('ordered_all_id_signatures_wrt_probabilities_file_array:%s' %(ordered_all_id_signatures_wrt_probabilities_file_array))
else:
print('There is a problem: ordered_all_id_signatures_wrt_probabilities_file_array is not filled.')
###################################################################################################################
########### Step# Merge chrom based matrix generator generated files with probabilities ends ######################
###################################################################################################################
#######################################################################################################
################################### Step4 Fill Table Starts ###########################################
#######################################################################################################
# Step4 Initialize these dataframes as empty dataframe
# Step4 We will fill these dataframes if there is the corresponding data
subsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()
dinucsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()
indelsSignature_cutoff_numberofmutations_averageprobability_df = pd.DataFrame()
# Fill these pandas dataframes
# cancer_type signature number_of_mutations average_probability samples_list len(samples_list) len(all_samples_list) percentage_of_samples
sbs_signature_number_of_mutations_df = pd.DataFrame()
dbs_signature_number_of_mutations_df = pd.DataFrame()
id_signature_number_of_mutations_df = pd.DataFrame()
mutationtype_numberofmutations_numberofsamples_sampleslist_df = pd.DataFrame()
chrlong_numberofmutations_df = pd.DataFrame()
if (step4_tables):
#################################################################################
print('#################################################################################')
print('--- Fill tables/dictionaries using original data starts')
start_time = time.time()
##################################################################################
# For each signature we will find a cutoff value for mutations with average probability >=0.9
# Our aim is to have at most 10% false positive rate in mutations
# number of mutations >= 5K for subs signatures
# number of mutations >= 1K for indels signatures
# number of mutations >= 200 for dinuc signatures
# If we can not satisfy this condition we will discard the signature
cutoffs = []
for cufoff in np.arange(0.5, 0.91, 0.01):
cutoffs.append("%.2f" % (cufoff))
# Initialize
# mutationType2PropertiesListDict: PropertiesList consists of [NumberofMutations NumberofSamples SamplesList]
mutationType2PropertiesDict = {}
chrLong2NumberofMutationsDict = {}
for mutation_type_context in mutation_types_contexts:
if (mutation_type_context in SBS_CONTEXTS):
sbs_signature_number_of_mutations_df = fill_signature_number_of_mutations_df(outputDir,
jobname,
chromNamesList,
SUBS)
sbs_signature_number_of_mutations_df.to_csv(os.path.join(outputDir,
jobname,
DATA,
Table_SBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename),
sep='\t', header=True, index=False)
# We are reading original data to fill the signature2PropertiesListDict
# We are writing all samples_mutations_cutoffs_tables and signature based decided samples_mutations_cutoffs_tables in table format.
subsSignature_cutoff_numberofmutations_averageprobability_df = fillCutoff2Signature2PropertiesListDictionary(
outputDir,
jobname,
chromNamesList,
SUBS,
cutoffs,
average_probability,
num_of_sbs_required,
num_of_id_required,
num_of_dbs_required,
mutationType2PropertiesDict,
chrLong2NumberofMutationsDict)
if (DBS in mutation_types_contexts):
dbs_signature_number_of_mutations_df = fill_signature_number_of_mutations_df(outputDir,
jobname,
chromNamesList,
DINUCS)
dbs_signature_number_of_mutations_df.to_csv(os.path.join(outputDir,
jobname,
DATA,
Table_DBS_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename),
sep='\t', header=True, index=False)
# We are reading original data to fill the signature2PropertiesListDict
# We are writing all samples_mutations_cutoffs_tables and signature based decided samples_mutations_cutoffs_tables in table format.
dinucsSignature_cutoff_numberofmutations_averageprobability_df = fillCutoff2Signature2PropertiesListDictionary(
outputDir,
jobname,
chromNamesList,
DINUCS,
cutoffs,
average_probability,
num_of_sbs_required,
num_of_id_required,
num_of_dbs_required,
mutationType2PropertiesDict,
chrLong2NumberofMutationsDict)
if (ID in mutation_types_contexts):
id_signature_number_of_mutations_df = fill_signature_number_of_mutations_df(outputDir,
jobname,
chromNamesList,
INDELS)
id_signature_number_of_mutations_df.to_csv(os.path.join(outputDir,
jobname,
DATA,
Table_ID_Signature_Probability_Mode_NumberofMutations_AverageProbability_Filename),
sep='\t', header=True, index=False)
# We are reading original data to fill the signature2PropertiesListDict
# We are writing all samples_mutations_cutoffs_tables and signature based decided samples_mutations_cutoffs_tables in table format.
indelsSignature_cutoff_numberofmutations_averageprobability_df = fillCutoff2Signature2PropertiesListDictionary(
outputDir,
jobname,
chromNamesList,
INDELS,
cutoffs,
average_probability,
num_of_sbs_required,
num_of_id_required,
num_of_dbs_required,
mutationType2PropertiesDict,
chrLong2NumberofMutationsDict)
####################################################################
# Add the last row
numberofMutations = 0
all_samples = set()
for mutation_type in mutationType2PropertiesDict:
numberofMutations += mutationType2PropertiesDict[mutation_type]['number_of_mutations']
samples_list = mutationType2PropertiesDict[mutation_type]['samples_list']
all_samples = all_samples.union(samples_list)
all_samples_list=list(all_samples)
all_samples_list = sorted(all_samples_list, key=natural_key)
print("--- Number of samples: %d" %len(all_samples_list))
print("--- Samples: %s" %(all_samples_list))
all_samples_np_array=np.array(all_samples_list)
mutationType2PropertiesDict['All']={}
mutationType2PropertiesDict['All']['number_of_mutations'] = numberofMutations
mutationType2PropertiesDict['All']['number_of_samples'] = len(all_samples)
mutationType2PropertiesDict['All']['samples_list'] = all_samples_list
# Write mutationType2PropertiesListDict dictionary as a dataframe starts
filePath = os.path.join(outputDir, jobname, DATA, Table_MutationType_NumberofMutations_NumberofSamples_SamplesList_Filename)
L = sorted([(mutation_type, a['number_of_mutations'], a['number_of_samples'], a['samples_list'])
for mutation_type, a in mutationType2PropertiesDict.items()])
if L:
mutationtype_numberofmutations_numberofsamples_sampleslist_df = pd.DataFrame(L, columns=['mutation_type', 'number_of_mutations', 'number_of_samples', 'samples_list'])
# write this dataframe
mutationtype_numberofmutations_numberofsamples_sampleslist_df.to_csv(filePath, sep='\t', header=True, index=False)
# Write dictionary as a dataframe ends
####################################################################
# Write chrLong2NumberofMutationsDict dictionary as a dataframe starts
filePath = os.path.join(outputDir, jobname, DATA, Table_ChrLong_NumberofMutations_Filename)
L = sorted([(chrLong, number_of_mutations)
for chrLong, number_of_mutations in chrLong2NumberofMutationsDict.items()])
if L:
chrlong_numberofmutations_df = pd.DataFrame(L, columns=['chrLong', 'number_of_mutations'])
# write this dataframe
chrlong_numberofmutations_df.to_csv(filePath, sep='\t', header=True, index=False)
# Write dictionary as a dataframe ends
##################################################################################
# We are reading original data again to fill the mutationType based, sample based and signature based dictionaries
# This part is deprecated
if sample_based:
# Using original data
for mutation_type_context in mutation_types_contexts:
if (mutation_type_context in SBS_CONTEXTS):
fill_mutations_dictionaries_write(outputDir, jobname, chromNamesList, SUBS,
subsSignature_cutoff_numberofmutations_averageprobability_df, num_of_sbs_required, num_of_id_required,
num_of_dbs_required)
if (DBS in mutation_types_contexts):
fill_mutations_dictionaries_write(outputDir, jobname, chromNamesList, DINUCS,
dinucsSignature_cutoff_numberofmutations_averageprobability_df,
num_of_sbs_required,
num_of_id_required,
num_of_dbs_required)
if (ID in mutation_types_contexts):
fill_mutations_dictionaries_write(outputDir, jobname, chromNamesList, INDELS,
indelsSignature_cutoff_numberofmutations_averageprobability_df,
num_of_sbs_required,
num_of_id_required,
num_of_dbs_required)
##################################################################################
print("--- Fill tables/dictionaries using original data: %s seconds" % (time.time() - start_time))
print("--- Fill tables/dictionaries using original data: %f minutes" % (float((time.time() - start_time) / 60)))
print('--- Fill tables/dictionaries using original data ends')
print('#################################################################################\n')
#################################################################################
else:
mutationtype_numberofmutations_numberofsamples_sampleslist_df=pd.read_csv(os.path.join(outputDir,jobname,DATA,Table_MutationType_NumberofMutations_NumberofSamples_SamplesList_Filename),sep='\t', header=0, dtype={'mutation_type':str, 'number_of_mutations':np.int32})
all_samples_string=mutationtype_numberofmutations_numberofsamples_sampleslist_df[mutationtype_numberofmutations_numberofsamples_sampleslist_df['mutation_type']=='All']['samples_list'].values[0]
all_samples_list=eval(all_samples_string)
all_samples_list = sorted(all_samples_list, key=natural_key)
all_samples_np_array=np.array(all_samples_list)
print('sample_based:%s --- len(all_samples_list):%d --- all_samples_list:%s' %(sample_based,len(all_samples_list), all_samples_list))
chrlong_numberofmutations_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_ChrLong_NumberofMutations_Filename), sep='\t',header=0, dtype={'chrLong': str, 'number_of_mutations': np.int32})
for mutation_type_context in mutation_types_contexts:
if (mutation_type_context in SBS_CONTEXTS):
subsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_SBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\t', header=0, dtype={'cutoff':np.float32,'signature':str, 'number_of_mutations':np.int32,'average_probability':np.float32})
if (DBS in mutation_types_contexts):
dinucsSignature_cutoff_numberofmutations_averageprobability_df = pd.read_csv(os.path.join(outputDir, jobname, DATA, Table_DBS_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename), sep='\t',header=0, dtype={'cutoff': np.float32, 'signature': str, 'number_of_mutations': np.int32,'average_probability': np.float32})
if (ID in mutation_types_contexts):
indelsSignature_cutoff_numberofmutations_averageprobability_df= pd.read_csv(os.path.join(outputDir,jobname,DATA, Table_ID_Signature_Discreet_Mode_Cutoff_NumberofMutations_AverageProbability_Filename),sep='\t', header=0, dtype={'cutoff':np.float32,'signature':str, 'number_of_mutations':np.int32,'average_probability':np.float32})
#######################################################################################################
################################### Step4 Fill Table ends #############################################
#######################################################################################################
###################################################################################################################
################################################# All Steps ends ##################################################
###################################################################################################################
####################################################################################################################
# Fill numpy arrays with the signatures in cutoff files
sbs_signatures_with_cutoffs = np.array([])
dbs_signatures_with_cutoffs = np.array([])
id_signatures_with_cutoffs = np.array([])
# Fill ordered_signatures arrays w.r.t the order in probabilities file
# cutoffs_df (e.g.: subsSignature_cutoff_numberofmutations_averageprobability_df) are filled in (Step4=True or False but full_mode=True) or full_mode=False
# ordered_signatures_wrt_probabilities_file are filled in (Step3=True or False but full_mode=True) or full_mode=False
# We are interested in the signatures in cutoffs_df
# But user might have changed the order of lines in cutoffs_df
# Therefore we are setting the order in signatures_array and signatures_cutoff_arrays w.r.t. probabilities file
ordered_sbs_signatures_with_cutoffs = np.array([])
ordered_dbs_signatures_with_cutoffs = np.array([])
ordered_id_signatures_with_cutoffs = np.array([])
# Fill the list with the cutoff values
# Fill ordered_signatures_cutoffs
ordered_sbs_signatures_cutoffs = []
ordered_dbs_signatures_cutoffs = []
ordered_id_signatures_cutoffs = []
if not subsSignature_cutoff_numberofmutations_averageprobability_df.empty:
sbs_signatures_with_cutoffs = subsSignature_cutoff_numberofmutations_averageprobability_df['signature'].values
if not dinucsSignature_cutoff_numberofmutations_averageprobability_df.empty:
dbs_signatures_with_cutoffs = dinucsSignature_cutoff_numberofmutations_averageprobability_df['signature'].values
if not indelsSignature_cutoff_numberofmutations_averageprobability_df.empty:
id_signatures_with_cutoffs = indelsSignature_cutoff_numberofmutations_averageprobability_df['signature'].values
if ordered_all_sbs_signatures_wrt_probabilities_file_array is not None:
df_columns_subs_signatures_mask_array = np.isin(ordered_all_sbs_signatures_wrt_probabilities_file_array, sbs_signatures_with_cutoffs)
ordered_sbs_signatures_with_cutoffs = ordered_all_sbs_signatures_wrt_probabilities_file_array[df_columns_subs_signatures_mask_array]
for signature in ordered_sbs_signatures_with_cutoffs:
cutoff = subsSignature_cutoff_numberofmutations_averageprobability_df[subsSignature_cutoff_numberofmutations_averageprobability_df['signature'] == signature]['cutoff'].values[0]
ordered_sbs_signatures_cutoffs.append(cutoff)
if ordered_all_dbs_signatures_wrt_probabilities_file_array is not None:
df_columns_dbs_signatures_mask_array = np.isin(ordered_all_dbs_signatures_wrt_probabilities_file_array, dbs_signatures_with_cutoffs)
ordered_dbs_signatures_with_cutoffs = ordered_all_dbs_signatures_wrt_probabilities_file_array[df_columns_dbs_signatures_mask_array]
for signature in ordered_dbs_signatures_with_cutoffs:
cutoff = dinucsSignature_cutoff_numberofmutations_averageprobability_df[dinucsSignature_cutoff_numberofmutations_averageprobability_df['signature'] == signature]['cutoff'].values[0]
ordered_dbs_signatures_cutoffs.append(cutoff)
if ordered_all_id_signatures_wrt_probabilities_file_array is not None:
df_columns_id_signatures_mask_array = np.isin(ordered_all_id_signatures_wrt_probabilities_file_array, id_signatures_with_cutoffs)
ordered_id_signatures_with_cutoffs = ordered_all_id_signatures_wrt_probabilities_file_array[df_columns_id_signatures_mask_array]
for signature in ordered_id_signatures_with_cutoffs:
cutoff = indelsSignature_cutoff_numberofmutations_averageprobability_df[indelsSignature_cutoff_numberofmutations_averageprobability_df['signature'] == signature]['cutoff'].values[0]
ordered_id_signatures_cutoffs.append(cutoff)
ordered_sbs_signatures_cutoffs = np.array(ordered_sbs_signatures_cutoffs)
ordered_dbs_signatures_cutoffs = np.array(ordered_dbs_signatures_cutoffs)
ordered_id_signatures_cutoffs = np.array(ordered_id_signatures_cutoffs)
####################################################################################################################
# Get all signatures ordered array w.r.t. the probabilities file
ordered_all_sbs_signatures_array = get_all_signatures_array(ordered_all_sbs_signatures_wrt_probabilities_file_array, SBS)
ordered_all_dbs_signatures_array = get_all_signatures_array(ordered_all_dbs_signatures_wrt_probabilities_file_array, DBS)
ordered_all_id_signatures_array = get_all_signatures_array(ordered_all_id_signatures_wrt_probabilities_file_array, ID)
####################################################################################################################
################################### Run SigProfilerTopography Analysis starts ######################################
####################################################################################################################
print('#################################################################################')
print('--- Run SigProfilerTopography Analysis starts')
if (computation_type==USING_APPLY_ASYNC_FOR_EACH_CHROM_AND_SIM_SPLIT):
job_tuples=get_job_tuples(chrlong_numberofmutations_df,numofSimulations)
else:
job_tuples=[]
if (nucleosome):
#Nucleosome Occupancy
occupancy_type = NUCLEOSOMEOCCUPANCY
if delete_old:
deleteOldData(outputDir,jobname,occupancy_type)
start_time = time.time()
runOccupancyAnalyses(genome,
outputDir,
jobname,
numofSimulations,
job_tuples,
sample_based,
nucleosome_file,
None,
chromSizesDict,
chromNamesList,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures_with_cutoffs,
ordered_dbs_signatures_with_cutoffs,
ordered_id_signatures_with_cutoffs,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
computation_type,
occupancy_type,
occupancy_calculation_type,
plusorMinus_nucleosome,
remove_outliers,
quantileValue,
is_discreet,
verbose)
print('#################################################################################')
print("--- Run Nucleosome Occupancy Analyses: %s seconds --- %s" %((time.time()-start_time),nucleosome_file))
print("--- Run Nucleosome Occupancy Analyses: %f minutes --- %s" %(float((time.time()-start_time)/60),nucleosome_file))
print('#################################################################################\n')
if (replication_time):
# Replication Time
# Required genome is already downloaded by matrix generator
if delete_old:
deleteOldData(outputDir,jobname,REPLICATIONTIME)
start_time = time.time()
runReplicationTimeAnalysis(genome,
outputDir,
jobname,
numofSimulations,
job_tuples,
sample_based,
replication_time_signal_file,
chromSizesDict,
chromNamesList,
computation_type,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures_with_cutoffs,
ordered_dbs_signatures_with_cutoffs,
ordered_id_signatures_with_cutoffs,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
is_discreet,
verbose,
matrix_generator_path)
print('#################################################################################')
print("--- Run Replication Time Analyses: %s seconds --- %s" %((time.time()-start_time),computation_type))
print("--- Run Replication Time Analyses: %f minutes --- %s" %(float((time.time()-start_time)/60),computation_type))
print('#################################################################################\n')
if replication_strand_bias:
# Replication Strand Bias
if delete_old:
deleteOldData(outputDir,jobname,REPLICATIONSTRANDBIAS)
start_time = time.time()
runReplicationStrandBiasAnalysis(outputDir,
jobname,
numofSimulations,
job_tuples,
sample_based,
all_samples_np_array,
replication_time_signal_file,
replication_time_valley_file,
replication_time_peak_file,
chromSizesDict,
chromNamesList,
computation_type,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures_with_cutoffs,
ordered_dbs_signatures_with_cutoffs,
ordered_id_signatures_with_cutoffs,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
is_discreet,
verbose)
print('#################################################################################')
print("--- Run Replication Strand Bias Analyses: %s seconds --- %s" %((time.time()-start_time),computation_type))
print("--- Run Replication Strand Bias Analyses: %f minutes --- %s" %(float((time.time()-start_time)/60),computation_type))
print('#################################################################################\n')
if transcription_strand_bias:
# Transcription Strand Bias
if delete_old:
deleteOldData(outputDir,jobname,TRANSCRIPTIONSTRANDBIAS)
start_time = time.time()
runTranscriptionStradBiasAnalysis(outputDir,
jobname,
numofSimulations,
job_tuples,
sample_based,
all_samples_np_array,
chromNamesList,
computation_type,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures_with_cutoffs,
ordered_dbs_signatures_with_cutoffs,
ordered_id_signatures_with_cutoffs,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
is_discreet,
verbose)
print('#################################################################################')
print("--- Run Transcription Strand Bias Analyses: %s seconds --- %s" %((time.time()-start_time),computation_type))
print("--- Run Transcription Strand Bias Analyses: %f minutes --- %s" %(float((time.time()-start_time)/60),computation_type))
print('#################################################################################\n')
if (processivity):
# Processivity
if delete_old:
deleteOldData(outputDir,jobname,PROCESSIVITY)
start_time = time.time()
runProcessivityAnalysis(mutation_types_contexts,
outputDir,
jobname,
numofSimulations,
chromNamesList,
processivity_calculation_type,
inter_mutational_distance_for_processivity,
subsSignature_cutoff_numberofmutations_averageprobability_df,
verbose)
print('#################################################################################')
print("--- Run Processivity Analyses: %s seconds ---" %(time.time()-start_time))
print("--- Run Processivity Analyses: %f minutes ---" %(float((time.time()-start_time)/60)))
print('#################################################################################\n')
if (epigenomics):
#Epigenomics
#If there is a user provided name use it as occupancy_type
if (epigenomics_dir_name is not None):
occupancy_type=epigenomics_dir_name
else:
occupancy_type=EPIGENOMICSOCCUPANCY
if delete_old:
deleteOldData(outputDir,jobname,occupancy_type)
#Run for each epigenomics file
for idx, epigenomics_file in enumerate(epigenomics_files):
start_time = time.time()
if (epigenomics_files_memos is not None) and (len(epigenomics_files_memos)==len(epigenomics_files)):
epigenomics_file_memo= epigenomics_files_memos[idx]
else:
epigenomics_file_memo = os.path.splitext(os.path.basename(epigenomics_file))[0]
runOccupancyAnalyses(genome,
outputDir,
jobname,
numofSimulations,
job_tuples,
sample_based,
epigenomics_file,
epigenomics_file_memo,
chromSizesDict,
chromNamesList,
ordered_all_sbs_signatures_array,
ordered_all_dbs_signatures_array,
ordered_all_id_signatures_array,
ordered_sbs_signatures_with_cutoffs,
ordered_dbs_signatures_with_cutoffs,
ordered_id_signatures_with_cutoffs,
ordered_sbs_signatures_cutoffs,
ordered_dbs_signatures_cutoffs,
ordered_id_signatures_cutoffs,
computation_type,
occupancy_type,
occupancy_calculation_type,
plusorMinus_epigenomics,
remove_outliers,
quantileValue,
is_discreet,
verbose)
print('#################################################################################')
print("--- Run Epigenomics Analyses: %s seconds --- %s" %((time.time()-start_time),epigenomics_file))
print("--- Run Epigenomics Analyses: %f minutes --- %s" %(float((time.time()-start_time)/60),epigenomics_file))
print('#################################################################################\n')
print('--- Run SigProfilerTopography Analysis ends')
print('#################################################################################\n')
####################################################################################################################
################################### Run SigProfilerTopography Analysis ends ########################################
####################################################################################################################
####################################################################################################################
############################################ Plot figures starts ###################################################
####################################################################################################################
if (plot_figures):
print('#################################################################################')
print('--- Plot figures starts')
start_time = time.time()
plotFigures(outputDir,
jobname,
numofSimulations,
sample_based,
mutation_types_contexts,
epigenomics_files,
epigenomics_files_memos,
epigenomics_biosamples,
epigenomics_dna_elements,
epigenomics_dir_name,
nucleosome_file,
nucleosome_biosample,
epigenomics,
nucleosome,
replication_time,
replication_strand_bias,
transcription_strand_bias,
processivity,
plusorMinus_epigenomics,
plusorMinus_nucleosome,
epigenomics_heatmap_significance_level,
is_discreet,
verbose,
plot_epigenomics,
plot_nucleosome,
plot_replication_time,
plot_replication_strand_bias,
plot_transcription_strand_bias,
plot_processivity,
delete_old,
plot_mode,
combine_p_values_method,
fold_change_window_size,
num_of_real_data_avg_overlap)
print('#################################################################################')
print("--- Plot Figures: %s seconds ---" %(time.time()-start_time))
print("--- Plot Figures: %f minutes ---" %(float((time.time()-start_time)/60)))
print('--- Plot figures ends')
print('#################################################################################\n')
####################################################################################################################
############################################ Plot figures ends #####################################################
####################################################################################################################
print('#################################################################################')
print("--- SigProfilerTopography ended successfully")
print("--- Thanks for using SigProfilerTopography")
print('#################################################################################\n')
#######################################################
# Plot figures for the attainded data after SigProfilerTopography Analyses
def plotFigures(outputDir,
jobname,
numberofSimulations,
sample_based,
mutation_types_contexts,
epigenomics_files,
epigenomics_files_memos,
epigenomics_biosamples,
epigenomics_dna_elements,
epigenomics_dir_name,
nucleosome_file,
nucleosome_biosample,
epigenomics,
nucleosome,
replication_time,
replication_strand_bias,
transcription_strand_bias,
processivity,
plusOrMinus_epigenomics,
plusOrMinus_nucleosome,
epigenomics_heatmap_significance_level,
is_discreet,
verbose,
plot_epigenomics,
plot_nucleosome,
plot_replication_time,
plot_replication_strand_bias,
plot_transcription_strand_bias,
plot_processivity,
delete_old,
plot_mode,
combine_p_values_method,
fold_change_window_size,
num_of_real_data_avg_overlap):
if (nucleosome or plot_nucleosome):
occupancy_type=NUCLEOSOMEOCCUPANCY
if delete_old:
deleteOldFigures(outputDir, jobname, occupancy_type)
nucleosome_file_basename = os.path.basename(nucleosome_file)
occupancyAverageSignalFigures(outputDir,
jobname,
numberofSimulations,
sample_based,
mutation_types_contexts,
nucleosome_file_basename,
None,
occupancy_type,
plusOrMinus_nucleosome,
is_discreet,
verbose,
plot_mode)
print("--- Plot nucleosome occupancy ends")
if (replication_time or plot_replication_time):
if delete_old:
deleteOldFigures(outputDir, jobname, REPLICATIONTIME)
replicationTimeNormalizedMutationDensityFigures(outputDir,
jobname,
numberofSimulations,
sample_based,
mutation_types_contexts,
is_discreet,
plot_mode)
print("--- Plot replication time starts")
if ((replication_strand_bias and transcription_strand_bias) or (plot_replication_strand_bias and plot_transcription_strand_bias)):
if delete_old:
deleteOldFigures(outputDir, jobname, STRANDBIAS)
# old way
# transcriptionReplicationStrandBiasFigures(outputDir,jobname,figureAugmentation,numberofSimulations,sample_based)
strand_bias_list=[TRANSCRIBED_VERSUS_UNTRANSCRIBED,GENIC_VERSUS_INTERGENIC,LAGGING_VERSUS_LEADING]
transcriptionReplicationStrandBiasFiguresUsingDataframes(outputDir, jobname, numberofSimulations, mutation_types_contexts, strand_bias_list, is_discreet, plot_mode)
print("--- Plot strand bias ends")
elif (replication_strand_bias or plot_replication_strand_bias):
strand_bias_list=[LAGGING_VERSUS_LEADING]
transcriptionReplicationStrandBiasFiguresUsingDataframes(outputDir, jobname, numberofSimulations, mutation_types_contexts, strand_bias_list, is_discreet, plot_mode)
print("--- Plot strand bias ends")
elif (transcription_strand_bias or plot_transcription_strand_bias):
strand_bias_list=[TRANSCRIBED_VERSUS_UNTRANSCRIBED,GENIC_VERSUS_INTERGENIC]
transcriptionReplicationStrandBiasFiguresUsingDataframes(outputDir, jobname, numberofSimulations, mutation_types_contexts, strand_bias_list, is_discreet, plot_mode)
print("--- Plot strand bias ends")
if (processivity or plot_processivity):
if delete_old:
deleteOldFigures(outputDir, jobname, PROCESSIVITY)
processivityFigures(outputDir,jobname,numberofSimulations,verbose)
print("--- Plot processivity ends")
if (epigenomics or plot_epigenomics):
if epigenomics_dir_name is not None:
occupancy_type=epigenomics_dir_name
else:
occupancy_type=EPIGENOMICSOCCUPANCY
if delete_old:
deleteOldFigures(outputDir, jobname, occupancy_type)
# Initiate the pool
numofProcesses = multiprocessing.cpu_count()
# For real runs uncomment
pool = multiprocessing.Pool(numofProcesses)
jobs=[]
# Please note that epigenomics_file_memo is not None
# If None then it is created from filename.
for idx, epigenomics_file in enumerate(epigenomics_files):
epigenomics_file_basename = os.path.basename(epigenomics_file)
epigenomics_file_memo= epigenomics_files_memos[idx]
jobs.append(pool.apply_async(occupancyAverageSignalFigures,
args=(outputDir,
jobname,
numberofSimulations,
sample_based,
mutation_types_contexts,
epigenomics_file_basename,
epigenomics_file_memo,
occupancy_type,
plusOrMinus_epigenomics,
is_discreet,
verbose,
plot_mode,)))
if verbose: print('\tVerbose %s Plotting figures len(jobs):%d ' %(occupancy_type,len(jobs)))
# Wait for all jobs to finish
for job in jobs:
if verbose: print('\n\tVerbose %s Worker pid %s Plotting figures job.get():%s ' %(occupancy_type,str(os.getpid()),job.get()))
pool.close()
pool.join()
print("--- Plot epigenomics occupancy ends")
# original old call
# sequential
# occupancyAverageSignalFigures(outputDir, jobname, figureAugmentation, numberofSimulations,sample_based, mutationTypes,epigenomics_file_basename,epigenomics_file_memo,occupancy_type,plusOrMinus_epigenomics,verbose)
compute_fold_change_with_p_values_plot_heatmaps(combine_p_values_method,
fold_change_window_size,
num_of_real_data_avg_overlap,
outputDir,
jobname,
numberofSimulations,
mutation_types_contexts,
nucleosome_file,
nucleosome_biosample,
epigenomics_files_memos,
epigenomics_biosamples,
epigenomics_dna_elements,
plusOrMinus_epigenomics,
plusOrMinus_nucleosome,
epigenomics_heatmap_significance_level,
is_discreet,
verbose)
print("--- Plot epigenomics heatmaps ends")
##############################################################
#To run on laptob
import os
if __name__== "__main__":
genome = 'GRCh37'
jobname = 'Test-Skin-Melanoma'
numberofSimulations = 2
inputDir = '/oasis/tscc/scratch/burcak/developer/python/SigProfilerTopography/SigProfilerTopography/input/PCAWG_Matlab_Clean/Skin-Melanoma/filtered/'
outputDir = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','output_test')
sbs_probabilities_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','output_for_extractor','PCAWG_Matlab','Skin-Melanoma_sbs96_mutation_probabilities.txt')
id_probabilities_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','output_for_extractor','PCAWG_Matlab','Skin-Melanoma_id83_mutation_probabilities.txt')
dbs_probabilities_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','output_for_extractor','PCAWG_Matlab','Skin-Melanoma_dbs_mutation_probabilities.txt')
# user_provided_replication_time_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','replication','wgEncodeUwRepliSeqNhekWaveSignalRep1.wig')
# user_provided_replication_time_valley_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','replication','wgEncodeUwRepliSeqNhekValleysRep1.bed')
# user_provided_replication_time_peak_file_path = os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','replication','wgEncodeUwRepliSeqNhekPkRep1.bed')
# user_provided_nucleosome_file_path= os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','nucleosome','wgEncodeSydhNsomeK562Sig.wig')
user_provided_nucleosome_file_path = os.path.join('C:\\', 'Users', 'burcak', 'Developer', 'Python','SigProfilerTopography', 'SigProfilerTopography', 'lib','nucleosome', 'wgEncodeSydhNsomeGm12878Sig.wig')
# user_provided_nucleosome_file_path= os.path.join('C:\\','Users','burcak','Developer','Python','SigProfilerTopography','SigProfilerTopography','lib','nucleosome','wgEncodeSydhNsomeGm12878Sig.bigWig')
runAnalyses(genome, inputDir, outputDir, jobname, numberofSimulations,
sbs_probabilities=sbs_probabilities_file_path,
id_probabilities=id_probabilities_file_path,
dbs_probabilities=dbs_probabilities_file_path,
# nucleosome_biosample='K562',
# replication_time_biosample='NHEK',
# nucleosome_file=user_provided_nucleosome_file_path,
# replication_time_signal_file=user_provided_replication_time_file_path,
# replication_time_valley_file=user_provided_replication_time_valley_file_path,
# replication_time_peak_file=user_provided_replication_time_peak_file_path,
epigenomics=True, nucleosome=False, replication_time=False, strand_bias=False, processivity=False,
sample_based=False, new_simulations_enforced=False, full_mode=False, verbose=False,necessary_dictionaries_already_exists=True)
##############################################################
| 135,082 | 0 | 470 |
e6f0ae213c3940abf9a8efd1a6587d1fed9d63f7 | 5,782 | py | Python | mincall/hyperparam/_hyperparam.py | nmiculinic/minion-basecaller | 73a134c8ed3715e79319780e24a171dd21713408 | [
"MIT"
] | 7 | 2017-07-13T15:08:16.000Z | 2021-04-24T16:39:11.000Z | mincall/hyperparam/_hyperparam.py | nmiculinic/minion-basecaller | 73a134c8ed3715e79319780e24a171dd21713408 | [
"MIT"
] | 4 | 2018-01-09T13:50:57.000Z | 2020-07-15T15:33:35.000Z | mincall/hyperparam/_hyperparam.py | nmiculinic/minion-basecaller | 73a134c8ed3715e79319780e24a171dd21713408 | [
"MIT"
] | 1 | 2018-03-24T22:48:25.000Z | 2018-03-24T22:48:25.000Z | from typing import *
from itertools import count
import os
from pprint import pformat
import logging
import cytoolz as toolz
import numpy as np
import yaml
import argparse
import voluptuous
from mincall.common import *
from mincall import train
from mincall.train import DataDir, TrainConfig
from voluptuous.humanize import humanize_error
from ._solvers import AbstractSolver, available_solvers
from ._types import Param, Observation
import sys
hyperparam_logger = logging.getLogger(".".join(__name__.split(".")[:-1]))
| 30.272251 | 115 | 0.576271 | from typing import *
from itertools import count
import os
from pprint import pformat
import logging
import cytoolz as toolz
import numpy as np
import yaml
import argparse
import voluptuous
from mincall.common import *
from mincall import train
from mincall.train import DataDir, TrainConfig
from voluptuous.humanize import humanize_error
from ._solvers import AbstractSolver, available_solvers
from ._types import Param, Observation
import sys
hyperparam_logger = logging.getLogger(".".join(__name__.split(".")[:-1]))
class HyperParamCfg(NamedTuple):
model_name: str
train_data: List[DataDir]
test_data: List[DataDir]
seq_length: Param
batch_size: Param
surrogate_base_pair: Param
train_steps: Param
init_learning_rate: Param
lr_decay_steps: Param
lr_decay_rate: Param
model_hparams: Dict[str, Param]
solver_class: Callable[[Dict], AbstractSolver]
work_dir: str
grad_clipping: float = 10.0
validate_every: int = 50
run_trace_every: int = 5000
save_every: int = 2000
@classmethod
def schema(cls, data):
return named_tuple_helper(
cls, {
'train_data': [DataDir.schema],
'test_data': [DataDir.schema],
'model_hparams': {
str: Param.scheme
},
'solver_class': lambda x: available_solvers[voluptuous.validators.In(available_solvers.keys())(x)],
}, data
)
def run_args(args: argparse.Namespace):
logger = hyperparam_logger
with open(args.config) as f:
config = yaml.load(f)
for k, v in vars(args).items():
if v is not None and "." in k:
config = toolz.assoc_in(config, k.split("."), v)
print(k, v)
try:
cfg = voluptuous.Schema({
'hyperparam': HyperParamCfg.schema,
'version': str,
},
extra=voluptuous.REMOVE_EXTRA,
required=True)(config)
except voluptuous.error.Error as e:
logger.error(humanize_error(config, e))
sys.exit(1)
formatter = logging.Formatter(
"%(asctime)s [%(levelname)5s]:%(name)20s: %(message)s"
)
cfg: HyperParamCfg = cfg['hyperparam']
os.makedirs(cfg.work_dir, exist_ok=True)
fn = os.path.join(
cfg.work_dir, f"{getattr(args, 'name', 'mincall_hyper')}.log"
)
h = (logging.FileHandler(fn))
h.setLevel(logging.DEBUG)
h.setFormatter(formatter)
hyperparam_logger.addHandler(h)
logging.info(f"Added handler to {fn}")
logger.info(f"Parsed config\n{pformat(cfg)}")
run(cfg)
def add_args(parser: argparse.ArgumentParser):
parser.add_argument("--config", "-c", help="config file", required=True)
parser.add_argument(
"--work_dir",
"-w",
dest="hyperparam.work_dir",
help="working directory"
)
parser.set_defaults(func=run_args)
parser.set_defaults(name="mincall_hyperparam_search")
def make_dict(x, subs: Dict) -> Tuple[Dict, Dict]:
if x is None:
return {}, {}
if isinstance(x, (int, str, float, bool)): # scalar
return x, {}
if isinstance(x, Param):
return x, x
if isinstance(x, dict):
sol = {}
params = {}
for k, v in x.items():
if k in subs and not isinstance(subs[k], dict):
d, p = subs[k], {}
else:
d, p = make_dict(v, subs.get(k, {}))
sol[k] = d
if len(p):
params[k] = p
return sol, params
if isinstance(x, list):
sol = []
for d, p in map(lambda k: make_dict(k, subs), x):
if len(p) > 0:
raise ValueError(
f"Cannot have params in list!{x}\nparams: {p}\ndata:{d}"
)
sol.append(d)
return sol, {}
if hasattr(x, '_asdict'):
return make_dict(dict(x._asdict()), subs)
raise ValueError(f"Unknown type {type(x).__name__}: {x}")
def subs_dict(x, subs: Dict) -> Dict:
sol, _ = make_dict(x, subs)
return sol
def run(cfg: HyperParamCfg):
logger = hyperparam_logger
train_cfg, params = make_dict(
toolz.keyfilter(
lambda x: x in TrainConfig.__annotations__.keys(), cfg._asdict()
),
{},
)
solver = cfg.solver_class(params)
while True:
assigement = solver.new_assignment()
concrete_params = assigement.params
folder = os.path.normpath(
os.path.abspath(os.path.join(cfg.work_dir, assigement.name))
)
logger.info(f"Starting {assigement.name}")
cfg_path = os.path.join(folder, "config.yml")
os.makedirs(folder, exist_ok=False)
concrete_cfg = subs_dict(train_cfg, concrete_params)
concrete_cfg['logdir'] = folder
concrete_cfg = subs_dict(TrainConfig.schema(concrete_cfg), {})
with open(cfg_path, "w") as f:
yaml.safe_dump({
'train': concrete_cfg,
'version': "v0.1",
},
stream=f,
default_flow_style=False)
result = train.run_args(
argparse.Namespace(
config=cfg_path,
logdir=None,
name=assigement.name,
)
)
logger.info(f"Got results:\n{result.describe().to_string()}\n{result}")
obs = Observation(
metric=float(np.mean(result['identity'])),
metric_std=float(np.std(result['identity'])),
metadata={
c: float(np.mean(series))
for c, series in result.iteritems()
}
)
solver.report(assigement, obs)
| 4,577 | 541 | 138 |
c09f34824761d6195efb3b06e107298407379d04 | 313 | py | Python | zooapi/api/logout.py | ismyblue/zoo | b00d8af5a6d086369cf939e66884bd377fdf8333 | [
"Apache-2.0"
] | 2 | 2020-09-18T03:58:16.000Z | 2021-03-15T12:28:57.000Z | zooapi/api/logout.py | ismyblue/zoo | b00d8af5a6d086369cf939e66884bd377fdf8333 | [
"Apache-2.0"
] | null | null | null | zooapi/api/logout.py | ismyblue/zoo | b00d8af5a6d086369cf939e66884bd377fdf8333 | [
"Apache-2.0"
] | null | null | null | # Name: logout.py
# Author: HuangHao
# Time: 2020/9/30 22:17
from django.http import JsonResponse
from zooapi.models import User
def logout(request):
"""
注销登录 GET
@param request:
@return:
"""
request.session.flush()
return JsonResponse({'result': 'success', 'success': '注销成功'})
| 14.904762 | 65 | 0.642173 | # Name: logout.py
# Author: HuangHao
# Time: 2020/9/30 22:17
from django.http import JsonResponse
from zooapi.models import User
def logout(request):
"""
注销登录 GET
@param request:
@return:
"""
request.session.flush()
return JsonResponse({'result': 'success', 'success': '注销成功'})
| 0 | 0 | 0 |
31838a3974e9a0840a5ede0588d72dbfa38aac4a | 509 | py | Python | token.py | punch872/EyeWarnYou | 71ea21a8b3f1ae213478d735a10a240524b89702 | [
"MIT"
] | 1 | 2019-03-04T08:37:26.000Z | 2019-03-04T08:37:26.000Z | token.py | punch872/EyeWarnYou | 71ea21a8b3f1ae213478d735a10a240524b89702 | [
"MIT"
] | null | null | null | token.py | punch872/EyeWarnYou | 71ea21a8b3f1ae213478d735a10a240524b89702 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from pythainlp.tokenize import sent_tokenize, word_tokenize
text = "ฉันรักภาษาไทย เพราะฉันใช้ภาษาไทย "
print(text)
print(sent_tokenize(text))
# ['ฉันรักภาษาไทย', 'เพราะฉันใช้ภาษาไทย', '']
print(word_tokenize(text))
# ['ฉัน', 'รัก', 'ภาษาไทย', ' ', 'เพราะ', 'ฉัน', 'ใช้', 'ภาษาไทย', ' ']
print(word_tokenize(text, whitespaces=False))
# ['ฉัน', 'รัก', 'ภาษาไทย', 'เพราะ', 'ฉัน', 'ใช้', 'ภาษาไทย']
text2 = "กฎหมายแรงงาน"
print(text2)
print(word_tokenize(text2))
# ['กฎหมายแรงงาน']
| 23.136364 | 71 | 0.644401 | # -*- coding: utf-8 -*-
from pythainlp.tokenize import sent_tokenize, word_tokenize
text = "ฉันรักภาษาไทย เพราะฉันใช้ภาษาไทย "
print(text)
print(sent_tokenize(text))
# ['ฉันรักภาษาไทย', 'เพราะฉันใช้ภาษาไทย', '']
print(word_tokenize(text))
# ['ฉัน', 'รัก', 'ภาษาไทย', ' ', 'เพราะ', 'ฉัน', 'ใช้', 'ภาษาไทย', ' ']
print(word_tokenize(text, whitespaces=False))
# ['ฉัน', 'รัก', 'ภาษาไทย', 'เพราะ', 'ฉัน', 'ใช้', 'ภาษาไทย']
text2 = "กฎหมายแรงงาน"
print(text2)
print(word_tokenize(text2))
# ['กฎหมายแรงงาน']
| 0 | 0 | 0 |
509f51d52a7396dba603ca6421f58b4f4987b20a | 220 | py | Python | setup.py | mareklovci/kky-zsur | c41fbce53aa790b1f280cbca8d274845993e74f9 | [
"MIT"
] | null | null | null | setup.py | mareklovci/kky-zsur | c41fbce53aa790b1f280cbca8d274845993e74f9 | [
"MIT"
] | null | null | null | setup.py | mareklovci/kky-zsur | c41fbce53aa790b1f280cbca8d274845993e74f9 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='zsur',
version='0.1.0',
packages=['zsur'],
entry_points={
'console_scripts': [
'zsur = zsur.__main__:main'
]
},
)
| 18.333333 | 41 | 0.486364 | from setuptools import setup
setup(name='zsur',
version='0.1.0',
packages=['zsur'],
entry_points={
'console_scripts': [
'zsur = zsur.__main__:main'
]
},
)
| 0 | 0 | 0 |
2984265db5ad3dc8939f67e11869ee6efb92c666 | 839 | py | Python | apex/__init__.py | bcbcbcbcbcl/apex | 7b3ac7221367dc7b7527a68e34cf08b5eeb0fc47 | [
"BSD-3-Clause"
] | 2 | 2021-06-24T18:31:04.000Z | 2021-06-24T20:34:44.000Z | apex/__init__.py | bcbcbcbcbcl/apex | 7b3ac7221367dc7b7527a68e34cf08b5eeb0fc47 | [
"BSD-3-Clause"
] | null | null | null | apex/__init__.py | bcbcbcbcbcl/apex | 7b3ac7221367dc7b7527a68e34cf08b5eeb0fc47 | [
"BSD-3-Clause"
] | null | null | null | # from . import RNN
# from . import reparameterization
from . import fp16_utils
from . import parallel
from . import amp
try:
from . import optimizers
except ImportError:
# An attempt to fix https://github.com/NVIDIA/apex/issues/97. I'm not sure why 97 is even
# happening because Python modules should only be imported once, even if import is called
# multiple times.
try:
_ = warned_optimizers
except NameError:
print("Warning: apex was installed without --cuda_ext. FusedAdam will be unavailable.")
warned_optimizers = True
try:
from . import normalization
except ImportError:
try:
_ = warned_normalization
except NameError:
print("Warning: apex was installed without --cuda_ext. FusedLayerNorm will be unavailable.")
warned_normalization = True
| 32.269231 | 102 | 0.703218 | # from . import RNN
# from . import reparameterization
from . import fp16_utils
from . import parallel
from . import amp
try:
from . import optimizers
except ImportError:
# An attempt to fix https://github.com/NVIDIA/apex/issues/97. I'm not sure why 97 is even
# happening because Python modules should only be imported once, even if import is called
# multiple times.
try:
_ = warned_optimizers
except NameError:
print("Warning: apex was installed without --cuda_ext. FusedAdam will be unavailable.")
warned_optimizers = True
try:
from . import normalization
except ImportError:
try:
_ = warned_normalization
except NameError:
print("Warning: apex was installed without --cuda_ext. FusedLayerNorm will be unavailable.")
warned_normalization = True
| 0 | 0 | 0 |
edb94b58d1c26be7749ed87ed63953601befc353 | 8,454 | py | Python | api/namex/resources/auto_analyse/issues/corporate_name_conflict.py | riyazuddinsyed/namex | c100ef4378794f509b738d38276e3b902d26067a | [
"Apache-2.0"
] | null | null | null | api/namex/resources/auto_analyse/issues/corporate_name_conflict.py | riyazuddinsyed/namex | c100ef4378794f509b738d38276e3b902d26067a | [
"Apache-2.0"
] | null | null | null | api/namex/resources/auto_analyse/issues/corporate_name_conflict.py | riyazuddinsyed/namex | c100ef4378794f509b738d38276e3b902d26067a | [
"Apache-2.0"
] | null | null | null | from datetime import date
from string import Template
from namex.services.name_request.auto_analyse import AnalysisIssueCodes
# Import DTOs
from .abstract import AnalysisResponseIssue
from ..response_objects import NameAnalysisIssue
from ..response_objects import NameAction, NameActions, Conflict
| 44.494737 | 161 | 0.577596 | from datetime import date
from string import Template
from namex.services.name_request.auto_analyse import AnalysisIssueCodes
# Import DTOs
from .abstract import AnalysisResponseIssue
from ..response_objects import NameAnalysisIssue
from ..response_objects import NameAction, NameActions, Conflict
class CorporateNameConflictIssue(AnalysisResponseIssue):
issue_type = AnalysisIssueCodes.CORPORATE_CONFLICT
status_text = "Further Action Required"
issue = None
def create_issue(self):
issue = NameAnalysisIssue(
issue_type=self.issue_type,
line1="",
line2=None,
consenting_body=None,
designations=None,
show_reserve_button=None,
show_examination_button=False,
conflicts=[],
setup=None,
name_actions=[]
)
return issue
def configure_issue(self, procedure_result):
name_as_submitted = self.analysis_response.name_as_submitted
list_original = self._lc_list_items(self.analysis_response.name_original_tokens)
list_name = self._lc_list_items(self.analysis_response.name_tokens)
all_designations = self._lc_list_items(self.analysis_response.analysis_service.get_all_designations())
list_name_as_submitted = self._lc_list_items(self.analysis_response.name_as_submitted_tokenized)
# Filter out designations from the tokens
list_tokens = [item for item in list_name_as_submitted if item not in all_designations]
list_dist = procedure_result.values['list_dist'] # Don't lower case this one it's a list wrapped list
list_desc = procedure_result.values['list_desc'] # Don't lower case this one it's a list wrapped list
list_conflicts = procedure_result.values['list_conflicts'] # Don't lower case this one it's a dict
start_date = procedure_result.values['start_date']
id_num = procedure_result.values['id']
source = procedure_result.values['source']
issue = self.create_issue()
if issue.issue_type == AnalysisIssueCodes.CORPORATE_CONFLICT:
issue.line1 = "Too similar to an existing name."
else:
issue.line1 = "Too similar to an existing name in queue."
'''
eg:
list_name: <class 'list'>: ['mountain', 'view', 'growers']
list_dist: <class 'list'>: [['mountain'], ['mountain', 'view']]
list_desc: <class 'list'>: [['view', 'growers'], ['growers']]
list_conflicts: <class 'dict'>: {'MOUNTAIN VIEW GROWERS INC.': {'mountain': ['mountain'], 'view': ['view'], 'growers': ['growers']}}
'''
# Grab the first conflict
current_conflict_name = list(list_conflicts.keys())[0] # eg: 'MOUNTAIN VIEW GROWERS INC.'
current_conflict = list_conflicts[
current_conflict_name] # eg: {'mountain': ['mountain'], 'view': ['view'], 'growers': ['growers']}
current_conflict_keys = list(current_conflict.keys()) if current_conflict else []
is_exact_match = (list_name == current_conflict_keys)
list_dist_words = list(set([item for sublist in list_dist for item in sublist]))
list_desc_words = list(set([item for sublist in list_desc for item in sublist]))
# Apply our is_exact_match strategy:
# - Add brackets after the first distinctive word
# - Add brackets after the last descriptive word?
# - Strike out the last word
list_remove = [] # These are passed down to the Template
if is_exact_match:
# Loop over the token words, we need to decide to do with each word
for token_idx, word in enumerate(list_tokens):
offset_idx, word_idx, word_idx_offset, composite_token_offset = self.adjust_word_index(
name_as_submitted,
list_original,
list_tokens,
token_idx
)
# Highlight the conflict words
if list_tokens.index(word) != list_tokens.index(list_tokens[-1]):
issue.name_actions.append(NameAction(
word=word,
index=offset_idx,
endIndex=offset_idx,
type=NameActions.HIGHLIGHT
))
# Strike out the last matching word
if list_tokens.index(word) == list_tokens.index(list_tokens[-1]):
list_remove.append(word)
issue.name_actions.append(NameAction(
word=word,
index=offset_idx,
endIndex=offset_idx,
type=NameActions.STRIKE
))
if not is_exact_match:
# Loop over the list_name words, we need to decide to do with each word
for token_idx, word in enumerate(list_tokens):
offset_idx, word_idx, word_idx_offset, composite_token_offset = self.adjust_word_index(
name_as_submitted,
list_original,
list_tokens,
token_idx
)
# This code has duplicate blocks because it allows us to tweak the response for composite token matches separately from normal words if necessary
if composite_token_offset and composite_token_offset > 0:
# <class 'list'>: ['mountain', 'view']
# Highlight the conflict words
if word in current_conflict_keys and current_conflict_keys.index(
word) != current_conflict_keys.index(current_conflict_keys[-1]):
issue.name_actions.append(NameAction(
word=word,
index=offset_idx,
type=NameActions.HIGHLIGHT
))
# Strike out the last matching word
if word in current_conflict_keys and current_conflict_keys.index(
word) == current_conflict_keys.index(current_conflict_keys[-1]):
issue.name_actions.append(NameAction(
word=word,
index=offset_idx,
type=NameActions.STRIKE
))
else:
# Highlight the conflict words
if word in current_conflict_keys and current_conflict_keys.index(
word) != current_conflict_keys.index(current_conflict_keys[-1]):
issue.name_actions.append(NameAction(
word=word,
index=offset_idx,
type=NameActions.HIGHLIGHT
))
# Strike out the last matching word
if word in current_conflict_keys and current_conflict_keys.index(
word) == current_conflict_keys.index(current_conflict_keys[-1]):
issue.name_actions.append(NameAction(
word=word,
index=offset_idx,
type=NameActions.STRIKE
))
issue.conflicts = []
conflict = Conflict(
name=current_conflict_name,
date=date.today(),
start_date=start_date,
id=id_num,
source=source
)
issue.conflicts.append(conflict)
# Setup boxes
issue.setup = self.setup_config
# Replace template strings in setup boxes
for setup_item in issue.setup:
# Loop over properties
for prop in vars(setup_item):
if isinstance(setup_item.__dict__[prop], Template):
# Render the Template string, replacing placeholder vars
setattr(setup_item, prop, setup_item.__dict__[prop].safe_substitute({
'list_name': self._join_list_words(list_name),
'list_remove': self._join_list_words(list_remove),
'list_dist': self._join_list_words(list_dist_words),
'list_desc': self._join_list_words(list_desc_words)
}))
return issue
| 7,925 | 205 | 23 |
17b916fddb4cf1c13bf9e42996fa9b637ba98911 | 2,942 | py | Python | fabfile/base.py | whatsthehubbub/rippleeffect | f33488e7a0dbeaadee5da5ddef7ce5f209fb3fd4 | [
"MIT"
] | null | null | null | fabfile/base.py | whatsthehubbub/rippleeffect | f33488e7a0dbeaadee5da5ddef7ce5f209fb3fd4 | [
"MIT"
] | null | null | null | fabfile/base.py | whatsthehubbub/rippleeffect | f33488e7a0dbeaadee5da5ddef7ce5f209fb3fd4 | [
"MIT"
] | null | null | null | from fabric.api import *
from fabric.colors import cyan
from fabric.contrib import files
packages = (
'build-essential',
'git',
'mercurial',
'rsync',
'vim',
)
def create_deploy_user():
"creates deployment user"
username = 'deploy'
# create deploy user & home without password
if files.contains('/etc/passwd', username):
return
sudo('useradd %s --create-home --shell /bin/bash' % username)
# create authorized_keys & upload public key
sudo('mkdir -p /home/deploy/.ssh')
sudo('chmod 700 /home/deploy/.ssh')
pub_key = open(env.key_filename, 'rb').read()
files.append('/home/%s/.ssh/authorized_keys' % username, pub_key, use_sudo=True)
# update authorized_keys permissions
sudo('chmod 400 /home/%s/.ssh/authorized_keys' % username)
sudo('chown deploy:deploy /home/%s/.ssh -R' % username)
# create sudo password & add to sudoers
print(cyan('set sudo password for "%s" user' % username))
sudo('passwd %s' % username)
files.append('/etc/sudoers', '%s ALL=(ALL) ALL' % username, use_sudo=True)
def automate_security_updates():
"enable automatic installation of security updates"
sudo('apt-get install unattended-upgrades')
files.upload_template(
'apt/10periodic',
'/etc/apt/apt.conf.d/10periodic',
env,
template_dir='fabfile/templates',
use_sudo=True,
mode=644,
)
# TODO: checkout apticron for email alerts
def harden_sudoers():
"""
>> /etc/sudoers
root ALL=(ALL) ALL
deploy ALL=(ALL) ALL
"""
pass
def harden_ssh():
"""
>> /etc/ssh/sshd_config
PermitRootLogin no
PasswordAuthentication no
"""
run('service ssh restart')
def setup_firewall():
"""
ufw allow from {your-ip} to any port 22
ufw allow 80
ufw enable
"""
pass
| 25.582609 | 108 | 0.655676 | from fabric.api import *
from fabric.colors import cyan
from fabric.contrib import files
packages = (
'build-essential',
'git',
'mercurial',
'rsync',
'vim',
)
def install_base_packages():
sudo('apt-get update')
for package in packages:
sudo('apt-get install %s --assume-yes' % package)
def upgrade_system():
sudo('apt-get update')
sudo('apt-get dist-upgrade --assume-yes --quiet')
def create_deploy_user():
"creates deployment user"
username = 'deploy'
# create deploy user & home without password
if files.contains('/etc/passwd', username):
return
sudo('useradd %s --create-home --shell /bin/bash' % username)
# create authorized_keys & upload public key
sudo('mkdir -p /home/deploy/.ssh')
sudo('chmod 700 /home/deploy/.ssh')
pub_key = open(env.key_filename, 'rb').read()
files.append('/home/%s/.ssh/authorized_keys' % username, pub_key, use_sudo=True)
# update authorized_keys permissions
sudo('chmod 400 /home/%s/.ssh/authorized_keys' % username)
sudo('chown deploy:deploy /home/%s/.ssh -R' % username)
# create sudo password & add to sudoers
print(cyan('set sudo password for "%s" user' % username))
sudo('passwd %s' % username)
files.append('/etc/sudoers', '%s ALL=(ALL) ALL' % username, use_sudo=True)
def automate_security_updates():
"enable automatic installation of security updates"
sudo('apt-get install unattended-upgrades')
files.upload_template(
'apt/10periodic',
'/etc/apt/apt.conf.d/10periodic',
env,
template_dir='fabfile/templates',
use_sudo=True,
mode=644,
)
# TODO: checkout apticron for email alerts
def install_rackspace_monitoring():
# add the rackspace apt repo to list
files.append("/etc/apt/sources.list.d/rackspace-monitoring-agent.list",
"deb http://stable.packages.cloudmonitoring.rackspace.com/ubuntu-12.04-x86_64 cloudmonitoring main",
use_sudo=True)
# install rackspace repo signing key
run('curl https://monitoring.api.rackspacecloud.com/pki/agent/linux.asc | apt-key add -')
# install the monitoring agent
run('apt-get update')
run('apt-get install rackspace-monitoring-agent')
# run setup
run('rackspace-monitoring-agent --setup')
def harden_sudoers():
"""
>> /etc/sudoers
root ALL=(ALL) ALL
deploy ALL=(ALL) ALL
"""
pass
def harden_ssh():
"""
>> /etc/ssh/sshd_config
PermitRootLogin no
PasswordAuthentication no
"""
run('service ssh restart')
def setup_firewall():
"""
ufw allow from {your-ip} to any port 22
ufw allow 80
ufw enable
"""
pass
def harden_server():
setup_firewall()
harden_ssh()
harden_sudoers()
def provision_base_server():
upgrade_system()
install_base_packages()
automate_security_updates()
create_deploy_user()
| 948 | 0 | 115 |
1cb0b3487a0368915142350ccee98b2e55f028c8 | 4,894 | py | Python | recipes/data/fisher/utils.py | lorenlugosch/wav2letter | 0393ac7d451e99a3d70a0d78fc48ebc403fee0dc | [
"BSD-3-Clause"
] | 337 | 2021-04-17T03:22:38.000Z | 2022-03-28T18:01:10.000Z | recipes/data/fisher/utils.py | lorenlugosch/wav2letter | 0393ac7d451e99a3d70a0d78fc48ebc403fee0dc | [
"BSD-3-Clause"
] | 64 | 2021-04-16T16:50:47.000Z | 2022-03-25T18:14:42.000Z | recipes/data/fisher/utils.py | lorenlugosch/wav2letter | 0393ac7d451e99a3d70a0d78fc48ebc403fee0dc | [
"BSD-3-Clause"
] | 63 | 2021-04-16T14:44:43.000Z | 2022-03-29T13:43:18.000Z | """
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sox
| 33.067568 | 86 | 0.526972 | """
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import sox
def find_files(src):
src_dirs = src.split(",")
required_dirs = [
"fe_03_p1_sph1",
"fe_03_p1_sph3",
"fe_03_p1_sph5",
"fe_03_p1_sph7",
"fe_03_p2_sph1",
"fe_03_p2_sph3",
"fe_03_p2_sph5",
"fe_03_p2_sph7",
"fe_03_p1_sph2",
"fe_03_p1_sph4",
"fe_03_p1_sph6",
"fe_03_p2_sph2",
"fe_03_p2_sph4",
"fe_03_p2_sph6",
"fe_03_p1_tran",
"fe_03_p2_tran",
]
dir_mapping = {}
for dir in src_dirs:
for curdir in os.listdir(dir):
fulldir = os.path.join(dir, curdir)
if not os.path.isdir(fulldir):
continue
for req_dir in required_dirs:
new_style_req_dir = req_dir.replace(
"fe_03_p1_sph", "fisher_eng_tr_sp_d"
)
if curdir == req_dir or curdir == new_style_req_dir:
dir_mapping[req_dir] = fulldir
continue
transcript_files = {}
audio_files = {}
for dir in required_dirs:
assert dir in dir_mapping, "could not find the subdirectory {}".format(dir)
fulldir = dir_mapping[dir]
if "tran" in fulldir:
fulldir = os.path.join(fulldir, "data")
for dirpath, _, filenames in os.walk(fulldir):
for filename in filenames:
key = filename.split(".")[0]
if filename.startswith("fe_") and filename.endswith(".txt"):
transcript_files[key] = os.path.join(dirpath, filename)
elif filename.endswith(".sph"):
audio_files[key] = os.path.join(dirpath, filename)
return [(audio_files[k], transcript_files[k]) for k in audio_files]
def process_fisher_data(sample_data):
files, _, audio_path, sph2pipe = sample_data
sphfile, tfile = files
tmp_files = {}
for channel in ["A", "B"]:
tmp_files[channel] = os.path.join(
audio_path, "{pid}_tmp_{ch}.wav".format(pid=os.getpid(), ch=channel)
)
os.system(
"{sph} -f wav -c {c} {i} {o}".format(
sph=sph2pipe,
c=1 if channel == "A" else 2,
i=sphfile,
o=tmp_files[channel],
)
)
idx = 0
lines = []
with open(tfile, "r") as f:
first_line = f.readline().strip()
assert first_line.startswith("#") and first_line.endswith(".sph")
audiofileid = first_line.replace("#", "").replace(".sph", "").strip()
cur_audio_path = os.path.join(audio_path, audiofileid)
os.makedirs(cur_audio_path, exist_ok=True)
for line in f:
if line.startswith("#") or not line.strip():
continue
tag, text = line.strip().split(":", 1)
start, end, channel = tag.split()
start = float(start)
end = float(end)
utt = "{a}-{c}-{s}-{e}".format(
a=audiofileid,
c=channel,
s="{:06d}".format(int(start * 100 + 0.5)),
e="{:06d}".format(int(end * 100 + 0.5)),
)
# ignore uncertain annotations
if "((" in text:
continue
# lower-case
text = text.lower()
# remove punctuation
text = text.replace("?", "")
text = text.replace(",", "")
# simplify noise annotations
text = text.replace("[[skip]]", "")
text = text.replace("[pause]", "")
text = text.replace("[laugh]", "[laughter]")
text = text.replace("[sigh]", "[noise]")
text = text.replace("[cough]", "[noise]")
text = text.replace("[mn]", "[noise]")
text = text.replace("[breath]", "[noise]")
text = text.replace("[lipsmack]", "[noise]")
text = text.replace("[sneeze]", "[noise]")
text = " ".join(text.split())
out_file = os.path.join(cur_audio_path, "{:09d}.flac".format(idx))
sox_tfm = sox.Transformer()
sox_tfm.set_output_format(
file_type="flac", encoding="signed-integer", bits=16
)
sox_tfm.trim(start, end)
sox_tfm.build(tmp_files[channel], out_file)
duration = (end - start) * 1000.0
idx = idx + 1
lines.append("\t".join([utt, out_file, "{0:.2f}".format(duration), text]))
# cleanup
for tmp in tmp_files.values():
os.remove(tmp)
return lines
| 4,534 | 0 | 46 |
e01c52b2bb6a223ae80e53e10527fdc38e1bd89e | 1,006 | py | Python | src/bgapi/flash/cmd.py | GetAmbush/python-bgapi | 985e5849275eb5e7cf794c30ef87e16ffa91fa63 | [
"MIT"
] | 5 | 2018-05-11T14:59:50.000Z | 2021-04-29T07:51:43.000Z | src/bgapi/flash/cmd.py | GetAmbush/python-bgapi | 985e5849275eb5e7cf794c30ef87e16ffa91fa63 | [
"MIT"
] | null | null | null | src/bgapi/flash/cmd.py | GetAmbush/python-bgapi | 985e5849275eb5e7cf794c30ef87e16ffa91fa63 | [
"MIT"
] | 2 | 2018-10-05T16:51:08.000Z | 2020-08-10T18:24:16.000Z | from struct import pack
from bgapi.base_command import command
from bgapi.types import (MessageType, MessageClass)
| 27.189189 | 57 | 0.720676 | from struct import pack
from bgapi.base_command import command
from bgapi.types import (MessageType, MessageClass)
def ps_erase(key):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.FLASH.value
MSG_ID = 0x04
payload = pack('<H', key)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def ps_erase_all():
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.FLASH.value
MSG_ID = 0x01
payload = b''
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def ps_load(key):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.FLASH.value
MSG_ID = 0x03
payload = pack('<H', key)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
def ps_save(key, value):
MSG_TYPE = MessageType.COMMAND_RESPONSE.value
MSG_CLASS = MessageClass.FLASH.value
MSG_ID = 0x02
payload = pack('<HB', key, len(value)) + bytes(value)
return command(MSG_TYPE, MSG_CLASS, MSG_ID, payload)
| 794 | 0 | 92 |
65c559e5b5f51d6b87eee6d3d2659ee1b550c613 | 2,632 | py | Python | fool/predictor.py | alanoooaao/FoolNLTK | 1344c5aa1c2aabc1f4f6f2a492e1663928836325 | [
"Apache-2.0"
] | 1,718 | 2017-12-15T06:14:10.000Z | 2022-03-28T02:31:56.000Z | fool/predictor.py | alanoooaao/FoolNLTK | 1344c5aa1c2aabc1f4f6f2a492e1663928836325 | [
"Apache-2.0"
] | 73 | 2017-12-22T03:04:17.000Z | 2021-11-15T15:38:18.000Z | fool/predictor.py | alanoooaao/FoolNLTK | 1344c5aa1c2aabc1f4f6f2a492e1663928836325 | [
"Apache-2.0"
] | 421 | 2017-12-17T08:32:11.000Z | 2022-03-11T03:02:29.000Z | #!/usr/bin/env python
# -*-coding:utf-8-*-
import tensorflow as tf
import numpy as np
from tensorflow.contrib.crf import viterbi_decode
| 33.316456 | 119 | 0.640578 | #!/usr/bin/env python
# -*-coding:utf-8-*-
import tensorflow as tf
import numpy as np
from tensorflow.contrib.crf import viterbi_decode
def decode(logits, trans, sequence_lengths, tag_num):
viterbi_sequences = []
small = -1000.0
start = np.asarray([[small] * tag_num + [0]])
for logit, length in zip(logits, sequence_lengths):
score = logit[:length]
pad = small * np.ones([length, 1])
score = np.concatenate([score, pad], axis=1)
score = np.concatenate([start, score], axis=0)
viterbi_seq, viterbi_score = viterbi_decode(score, trans)
viterbi_sequences.append(viterbi_seq[1:])
return viterbi_sequences
def list_to_array(data_list, dtype=np.int32):
array = np.array(data_list, dtype).reshape(1, len(data_list))
return array
def load_graph(path):
with tf.gfile.GFile(path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="prefix")
return graph
class Predictor(object):
def __init__(self, model_file, char_to_id, id_to_tag):
self.char_to_id = char_to_id
self.id_to_tag = {int(k):v for k,v in id_to_tag.items()}
self.graph = load_graph(model_file)
self.input_x = self.graph.get_tensor_by_name("prefix/char_inputs:0")
self.lengths = self.graph.get_tensor_by_name("prefix/lengths:0")
self.dropout = self.graph.get_tensor_by_name("prefix/dropout:0")
self.logits = self.graph.get_tensor_by_name("prefix/project/logits:0")
self.trans = self.graph.get_tensor_by_name("prefix/crf_loss/transitions:0")
self.sess = tf.Session(graph=self.graph)
self.sess.as_default()
self.num_class = len(self.id_to_tag)
def predict(self, sents):
inputs = []
lengths = [len(text) for text in sents]
max_len = max(lengths)
for sent in sents:
sent_ids = [self.char_to_id.get(w) if w in self.char_to_id else self.char_to_id.get("<OOV>") for w in sent]
padding = [0] * (max_len - len(sent_ids))
sent_ids += padding
inputs.append(sent_ids)
inputs = np.array(inputs, dtype=np.int32)
feed_dict = {
self.input_x: inputs,
self.lengths: lengths,
self.dropout: 1.0
}
logits, trans = self.sess.run([self.logits, self.trans], feed_dict=feed_dict)
path = decode(logits, trans, lengths, self.num_class)
labels = [[self.id_to_tag.get(l) for l in p] for p in path]
return labels
| 2,340 | 3 | 145 |
db1f10790ae19b9c1c9518e37dc69cd4f610f4b8 | 904 | py | Python | src/chembl_webservices/core/fpsim2_helper.py | BNext-IQT/chembl_webservices_py3 | 42ccb39f0121835ca7ee9ac8ddd04cb513998079 | [
"Apache-2.0"
] | 5 | 2020-10-23T11:56:59.000Z | 2021-06-05T16:30:10.000Z | src/chembl_webservices/core/fpsim2_helper.py | BNext-IQT/chembl_webservices_py3 | 42ccb39f0121835ca7ee9ac8ddd04cb513998079 | [
"Apache-2.0"
] | 9 | 2020-02-11T08:01:40.000Z | 2021-06-10T19:41:03.000Z | src/chembl_webservices/core/fpsim2_helper.py | BNext-IQT/chembl_webservices_py3 | 42ccb39f0121835ca7ee9ac8ddd04cb513998079 | [
"Apache-2.0"
] | 4 | 2020-02-11T10:45:22.000Z | 2021-06-07T01:48:02.000Z | from FPSim2 import FPSim2Engine
import time
# Variable loaded from the Settings to prevent circular references
FPSIM2_FILE_PATH = None
FPSIM_ENGINE = None
def get_similar_molregnos(query_string, similarity=0.7):
"""
:param query_string: the smiles, inchi or molfile representation of the query
:param similarity: the minimum similarity threshold
:return: a list with tuples of (molregno, similarity)
"""
if similarity < 0.4 or similarity > 1:
raise ValueError('Similarity should have a value between 0.4 and 1.')
return get_fpsim_engine().similarity(query_string, similarity, n_workers=1)
| 34.769231 | 81 | 0.733407 | from FPSim2 import FPSim2Engine
import time
# Variable loaded from the Settings to prevent circular references
FPSIM2_FILE_PATH = None
FPSIM_ENGINE = None
def get_fpsim_engine():
global FPSIM_ENGINE, FPSIM2_FILE_PATH
if FPSIM_ENGINE is None:
t_ini = time.time()
FPSIM_ENGINE = FPSim2Engine(FPSIM2_FILE_PATH)
print('FPSIM2 FILE LOADED IN {0} SECS'.format(time.time()-t_ini))
return FPSIM_ENGINE
def get_similar_molregnos(query_string, similarity=0.7):
"""
:param query_string: the smiles, inchi or molfile representation of the query
:param similarity: the minimum similarity threshold
:return: a list with tuples of (molregno, similarity)
"""
if similarity < 0.4 or similarity > 1:
raise ValueError('Similarity should have a value between 0.4 and 1.')
return get_fpsim_engine().similarity(query_string, similarity, n_workers=1)
| 253 | 0 | 23 |
8d9ef2b90b7b14509b031c398cfb1bab121f4696 | 1,711 | py | Python | tests/db_models/test_db_models.py | libercapital/dados_publicos_cnpj_receita_federal | a02f98ebb1e5aa64539cc371d94ba78a49647214 | [
"MIT"
] | 7 | 2022-02-04T22:02:01.000Z | 2022-03-08T22:55:29.000Z | tests/db_models/test_db_models.py | libercapital/dados_publicos_cnpj_receita_federal | a02f98ebb1e5aa64539cc371d94ba78a49647214 | [
"MIT"
] | 3 | 2022-02-04T22:48:01.000Z | 2022-02-10T01:53:00.000Z | tests/db_models/test_db_models.py | libercapital/dados_publicos_cnpj_receita_federal | a02f98ebb1e5aa64539cc371d94ba78a49647214 | [
"MIT"
] | 1 | 2022-03-18T17:07:18.000Z | 2022-03-18T17:07:18.000Z | from src.db_models.models import (dict_db_models, CompanyRoot, Company, Partners, CompanyRootSimples, CompanyTaxRegime,
RefDate)
| 34.22 | 120 | 0.700175 | from src.db_models.models import (dict_db_models, CompanyRoot, Company, Partners, CompanyRootSimples, CompanyTaxRegime,
RefDate)
def test_db_models_models_number_of_tables():
number_of_tables_current = len(dict_db_models.keys())
assert number_of_tables_current == 6
def test_db_models_models_company_root():
tbl = CompanyRoot()
assert tbl.__tablename__ == 'rf_company_root_test'
assert tbl.N_RAW_COLUMNS == 7
assert sorted(tbl.get_index_cols()) == sorted(['cnpj_root'])
def test_db_models_models_company():
tbl = Company()
assert tbl.__tablename__ == 'rf_company_test'
assert tbl.N_RAW_COLUMNS == 30
assert sorted(tbl.get_index_cols()) == sorted(['cnpj', 'cnpj_root'])
def test_db_models_models_company_tax_regime():
tbl = CompanyTaxRegime()
assert tbl.__tablename__ == 'rf_company_tax_regime_test'
assert tbl.N_RAW_COLUMNS == 5
assert sorted(tbl.get_index_cols()) == sorted(['cnpj', 'cnpj_root'])
def test_db_models_models_partners():
tbl = Partners()
assert tbl.__tablename__ == 'rf_partners_test'
assert tbl.N_RAW_COLUMNS == 11
assert sorted(tbl.get_index_cols()) == sorted(['cnpj_root'])
def test_db_models_models_company_root_simples():
tbl = CompanyRootSimples()
assert tbl.__tablename__ == 'rf_company_root_simples_test'
assert tbl.N_RAW_COLUMNS == 7
assert sorted(tbl.get_index_cols()) == sorted(['cnpj_root'])
def test_db_models_models_ref_date():
tbl = RefDate()
assert tbl.__tablename__ == 'rf_ref_date_test'
assert tbl.N_RAW_COLUMNS == 1
assert sorted(tbl.get_index_cols()) == sorted(['ref_date'])
| 1,357 | 0 | 175 |
919330bd72d7c40a43c0de814726b03e27ca231b | 292 | py | Python | tests/test_filters.py | test-and-trace-data/releases | 62719612160977e2af8657c0e0eb42547d0004cb | [
"MIT"
] | null | null | null | tests/test_filters.py | test-and-trace-data/releases | 62719612160977e2af8657c0e0eb42547d0004cb | [
"MIT"
] | null | null | null | tests/test_filters.py | test-and-trace-data/releases | 62719612160977e2af8657c0e0eb42547d0004cb | [
"MIT"
] | null | null | null | from website.filters import formatdatestring
| 24.333333 | 54 | 0.722603 | from website.filters import formatdatestring
def test_formatdatestring_datetime():
x = formatdatestring("2020-12-23T10:32:16.054492")
assert x == "23 December 2020, 10:32"
def test_formatdatestring_date():
x = formatdatestring("2020-12-23")
assert x == "23 December 2020"
| 199 | 0 | 46 |
fa18948df120f9cec960c87761e0e375fe726424 | 8,025 | py | Python | tests/unit_config.py | rohank63/htsinfer | 2067ed67bdc9b4208efa3d2080c3fe541607e5fb | [
"Apache-2.0"
] | 1 | 2020-05-28T21:10:57.000Z | 2020-05-28T21:10:57.000Z | tests/unit_config.py | rohank63/htsinfer | 2067ed67bdc9b4208efa3d2080c3fe541607e5fb | [
"Apache-2.0"
] | null | null | null | tests/unit_config.py | rohank63/htsinfer | 2067ed67bdc9b4208efa3d2080c3fe541607e5fb | [
"Apache-2.0"
] | null | null | null | """
Unit tests for '.config'.
"""
import os
import pytest
from yaml.parser import ParserError
from yaml.representer import RepresenterError
from myproj.config import ConfigParser
from myproj.models import Parameters
# Test parameters
FILE_OK = os.path.join(
os.path.dirname(__file__),
"files",
"yaml",
)
FILE_UNAVAILABLE = "xyz/zyx/123"
FILE_NOT_YAML = __file__
FILE_EMPTY = os.path.join(
os.path.dirname(__file__),
"files",
"empty",
)
FILE_TXT = os.path.join(
os.path.dirname(__file__),
"files",
"txt",
)
FILE_OUT = os.path.join(
os.path.dirname(__file__),
"files",
"conf_out",
)
STRING = "SOME HEADER"
KWARGS = {
"STRING": STRING,
"INTEGER": 123,
"DICT": {"abc": 1, "cde": 2, "efg": 3},
"DICT_EMPTY": {},
"LIST": [1, 2, 3],
"LIST_EMPTY": [],
}
KEY_1 = "a"
KEY_2 = "a1"
KEY_3 = "a2"
KEY_4 = "b"
KEY_5 = "c"
INT = 1
LIST = [1, 2, 3]
OBJECT = {"OBJECT": ConfigParser}
DICT_1 = {KEY_1: {KEY_2: 2, KEY_3: 3}}
DICT_2 = {KEY_1: {KEY_2: 5}, KEY_4: 6}
QUERY = {KEY_1: {KEY_2: INT, KEY_3: {}}, KEY_4: INT, KEY_5: KEY_1}
QUERY_FALSE = {KEY_1: INT, KEY_4: INT, KEY_5: KEY_1}
REF = {KEY_1: {KEY_2: INT}, KEY_4: [], KEY_5: {}}
# __init__()
# log_yaml()
# read_config_files()
# recursive_dict_update()
# same_keys()
# dict_to_yaml()
# yaml_to_dict()
| 22.542135 | 66 | 0.681246 | """
Unit tests for '.config'.
"""
import os
import pytest
from yaml.parser import ParserError
from yaml.representer import RepresenterError
from myproj.config import ConfigParser
from myproj.models import Parameters
# Test parameters
FILE_OK = os.path.join(
os.path.dirname(__file__),
"files",
"yaml",
)
FILE_UNAVAILABLE = "xyz/zyx/123"
FILE_NOT_YAML = __file__
FILE_EMPTY = os.path.join(
os.path.dirname(__file__),
"files",
"empty",
)
FILE_TXT = os.path.join(
os.path.dirname(__file__),
"files",
"txt",
)
FILE_OUT = os.path.join(
os.path.dirname(__file__),
"files",
"conf_out",
)
STRING = "SOME HEADER"
KWARGS = {
"STRING": STRING,
"INTEGER": 123,
"DICT": {"abc": 1, "cde": 2, "efg": 3},
"DICT_EMPTY": {},
"LIST": [1, 2, 3],
"LIST_EMPTY": [],
}
KEY_1 = "a"
KEY_2 = "a1"
KEY_3 = "a2"
KEY_4 = "b"
KEY_5 = "c"
INT = 1
LIST = [1, 2, 3]
OBJECT = {"OBJECT": ConfigParser}
DICT_1 = {KEY_1: {KEY_2: 2, KEY_3: 3}}
DICT_2 = {KEY_1: {KEY_2: 5}, KEY_4: 6}
QUERY = {KEY_1: {KEY_2: INT, KEY_3: {}}, KEY_4: INT, KEY_5: KEY_1}
QUERY_FALSE = {KEY_1: INT, KEY_4: INT, KEY_5: KEY_1}
REF = {KEY_1: {KEY_2: INT}, KEY_4: [], KEY_5: {}}
# __init__()
def test_init_no_args():
res = ConfigParser()
assert res.values == {}
def test_init_single_config():
res = ConfigParser(FILE_OK)
assert type(res.values) is dict
def test_init_config_unavailable():
with pytest.raises(FileNotFoundError):
ConfigParser(FILE_UNAVAILABLE)
def test_init_config_invalid():
with pytest.raises(ParserError):
ConfigParser(FILE_NOT_YAML)
def test_init_multi_config():
res = ConfigParser(FILE_OK, FILE_OK)
assert type(res.values) is dict
def test_init_single_config_log():
res = ConfigParser(FILE_OK, log=True)
assert type(res.values) is dict
def test_init_empty_config_log():
res = ConfigParser(FILE_EMPTY, log=True)
assert res.values == {}
# log_yaml()
def test_log_yaml_no_args():
assert ConfigParser.log_yaml() is None
def test_log_yaml_header():
assert ConfigParser.log_yaml(header=STRING) is None
def test_log_yaml_kwargs():
assert ConfigParser.log_yaml(kwargs=KWARGS) is None
def test_log_yaml_kwargs_with_error():
with pytest.raises(TypeError):
ConfigParser.log_yaml(**KWARGS, level=OBJECT)
def test_log_yaml_header_and_kwargs():
assert ConfigParser.log_yaml(header=STRING, **KWARGS) is None
# read_config_files()
def test_read_config_files_no_args():
res = ConfigParser.read_config_files()
assert res == {}
def test_read_config_files_single_config():
res = ConfigParser.read_config_files(FILE_OK)
assert type(res) is dict
def test_read_config_files_multi_config():
res = ConfigParser.read_config_files(FILE_OK, FILE_OK)
assert type(res) is dict
def test_read_config_files_single_config_unavailable():
with pytest.raises(FileNotFoundError):
ConfigParser.read_config_files(FILE_UNAVAILABLE)
def test_read_config_files_multi_config_partly_unavailable():
with pytest.raises(FileNotFoundError):
ConfigParser.read_config_files(FILE_OK, FILE_UNAVAILABLE)
def test_read_config_files_single_config_invalid():
with pytest.raises(ParserError):
ConfigParser.read_config_files(FILE_NOT_YAML)
def test_read_config_files_multi_config_partly_invalid():
with pytest.raises(ParserError):
ConfigParser.read_config_files(FILE_OK, FILE_NOT_YAML)
# recursive_dict_update()
def test_recursive_dict_update_correct_inputs():
d = ConfigParser.recursive_dict_update(
original=DICT_1,
update=DICT_2,
)
assert d[KEY_1][KEY_2] == DICT_2[KEY_1][KEY_2]
assert KEY_3 in d[KEY_1]
assert KEY_4 in d
def test_recursive_dict_update_arg1_list():
with pytest.raises(TypeError):
ConfigParser.recursive_dict_update(
original=LIST,
update=DICT_1,
)
def test_recursive_dict_update_arg2_list():
with pytest.raises(TypeError):
ConfigParser.recursive_dict_update(
original=DICT_1,
update=LIST,
)
def test_recursive_dict_update_arg1_int():
with pytest.raises(TypeError):
ConfigParser.recursive_dict_update(
original=INT,
update=DICT_1,
)
def test_recursive_dict_update_arg2_int():
with pytest.raises(TypeError):
ConfigParser.recursive_dict_update(
original=DICT_1,
update=INT,
)
def test_recursive_dict_update_arg1_str():
with pytest.raises(TypeError):
ConfigParser.recursive_dict_update(
original=KEY_1,
update=DICT_1,
)
def test_recursive_dict_update_arg2_str():
with pytest.raises(TypeError):
ConfigParser.recursive_dict_update(
original=DICT_1,
update=KEY_1,
)
# same_keys()
def test_same_keys_correct_inputs():
assert ConfigParser.same_keys(
query=QUERY,
ref=REF,
) is True
def test_same_keys_correct_inputs_two_way():
assert ConfigParser.same_keys(
query=QUERY,
ref=QUERY,
two_way=True,
) is True
def test_same_keys_conflicting_inputs():
assert ConfigParser.same_keys(
query=QUERY_FALSE,
ref=REF,
) is False
def test_same_keys_conflicting_inputs_two_way():
assert ConfigParser.same_keys(
query=QUERY,
ref=REF,
two_way=True,
) is False
def test_same_keys_wrong_types_ref():
with pytest.raises(TypeError):
ConfigParser.same_keys(
query=QUERY,
ref=INT,
)
def test_same_keys_wrong_types_query_list():
assert ConfigParser.same_keys(
query=LIST,
ref=REF,
) is False
def test_same_keys_wrong_types_query_int():
assert ConfigParser.same_keys(
query=INT,
ref=REF,
) is False
def test_same_keys_wrong_types_query_str():
assert ConfigParser.same_keys(
query=KEY_1,
ref=REF,
) is False
def test_same_keys_wrong_types_query_none():
assert ConfigParser.same_keys(
query=None,
ref=REF,
) is False
def test_same_keys_wrong_types_query_class():
assert ConfigParser.same_keys(
query=ConfigParser,
ref=REF,
) is False
# dict_to_yaml()
def test_dict_to_yaml_file_ok():
params = Parameters().to_dict()
ret = ConfigParser.dict_to_yaml(
d=params,
yaml_file=FILE_OUT,
)
assert ret is None
def test_dict_to_yaml_wrong_type_d():
with pytest.raises(TypeError):
ConfigParser.dict_to_yaml(
d=LIST,
yaml_file=FILE_OUT,
)
def test_dict_to_yaml_wrong_type_yaml_file():
params = Parameters().to_dict()
with pytest.raises(TypeError):
ConfigParser.dict_to_yaml(
d=params,
yaml_file=LIST,
)
def test_dict_to_yaml_file_unavailable():
params = Parameters().to_dict()
ret = ConfigParser.dict_to_yaml(
d=params,
yaml_file=FILE_UNAVAILABLE,
)
assert ret is None
def test_dict_to_yaml_invalid_object():
params = {Parameters(): Parameters()}
with pytest.raises(RepresenterError):
ConfigParser.dict_to_yaml(
d=params,
yaml_file=FILE_OUT,
)
# yaml_to_dict()
def test_yaml_to_dict_file_ok():
d = ConfigParser.yaml_to_dict(yaml_file=FILE_OK)
assert type(d) is dict
assert bool(d) is True
def test_yaml_to_dict_file_not_found():
with pytest.raises(FileNotFoundError):
ConfigParser.yaml_to_dict(yaml_file=FILE_UNAVAILABLE)
def test_yaml_to_dict_file_not_yaml():
with pytest.raises(ParserError):
ConfigParser.yaml_to_dict(yaml_file=FILE_NOT_YAML)
def test_yaml_to_dict_file_txt():
with pytest.raises(TypeError):
ConfigParser.yaml_to_dict(yaml_file=FILE_TXT)
def test_yaml_to_dict_file_empty():
assert ConfigParser.yaml_to_dict(yaml_file=FILE_EMPTY) == {}
| 5,609 | 0 | 1,051 |
0250a0c2744104558e0af87746c740e94fbfa427 | 5,347 | py | Python | otio.py | eric-with-a-c/resolve-otio | 7e5bcfbf1025042368a3e53547cafbe437d14e9d | [
"Apache-2.0"
] | 10 | 2020-10-02T06:12:22.000Z | 2021-11-03T02:34:21.000Z | otio.py | eric-with-a-c/resolve-otio | 7e5bcfbf1025042368a3e53547cafbe437d14e9d | [
"Apache-2.0"
] | 1 | 2021-11-19T00:58:52.000Z | 2022-01-01T20:27:27.000Z | otio.py | eric-with-a-c/resolve-otio | 7e5bcfbf1025042368a3e53547cafbe437d14e9d | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
# CHANGE THE LINE BELOW TO POINT TO YOUR PYTHON SITE PACKAGES
sys.path.append("/path/to/site-packages")
import opentimelineio as otio
resolve = bmd.scriptapp("Resolve")
fu = resolve.Fusion()
ui = fu.UIManager
disp = bmd.UIDispatcher(fu.UIManager)
TRACK_TYPES = {
"video": otio.schema.TrackKind.Video,
"audio": otio.schema.TrackKind.Audio
}
title_font = ui.Font({"PixelSize": 18})
dlg = disp.AddWindow(
{
"WindowTitle": "Export OTIO",
"ID": "OTIOwin",
"Geometry": [250, 250, 250, 100],
"Spacing": 0,
"Margin": 10
},
[
ui.VGroup(
{
"Spacing": 2
},
[
ui.Button(
{
"ID": "exportfilebttn",
"Text": "Select Destination",
"Weight": 1.25,
"ToolTip": "Choose where to save the otio",
"Flat": False
}
),
ui.VGap(),
ui.Button(
{
"ID": "exportbttn",
"Text": "Export",
"Weight": 2,
"ToolTip": "Export the current timeline",
"Flat": False
}
)
]
)
]
)
itm = dlg.GetItems()
dlg.On.OTIOwin.Close = _close_window
dlg.On.exportfilebttn.Clicked = _export_file_pressed
dlg.On.exportbttn.Clicked = _export_button
dlg.Show()
disp.RunLoop()
dlg.Hide()
| 28.441489 | 78 | 0.586684 | #!/usr/bin/env python
import os
import sys
# CHANGE THE LINE BELOW TO POINT TO YOUR PYTHON SITE PACKAGES
sys.path.append("/path/to/site-packages")
import opentimelineio as otio
resolve = bmd.scriptapp("Resolve")
fu = resolve.Fusion()
ui = fu.UIManager
disp = bmd.UIDispatcher(fu.UIManager)
TRACK_TYPES = {
"video": otio.schema.TrackKind.Video,
"audio": otio.schema.TrackKind.Audio
}
def _create_rational_time(frame, fps):
return otio.opentime.RationalTime(
float(frame),
float(fps)
)
def _create_time_range(start, duration, fps):
return otio.opentime.TimeRange(
start_time=_create_rational_time(start, fps),
duration=_create_rational_time(duration, fps)
)
def _create_reference(mp_item):
return otio.schema.ExternalReference(
target_url=mp_item.GetClipProperty("File Path").get("File Path"),
available_range=_create_time_range(
mp_item.GetClipProperty("Start").get("Start"),
mp_item.GetClipProperty("Frames").get("Frames"),
mp_item.GetClipProperty("FPS").get("FPS")
)
)
def _create_markers(tl_item, frame_rate):
tl_markers = tl_item.GetMarkers()
markers = []
for m_frame in tl_markers:
markers.append(
otio.schema.Marker(
name=tl_markers[m_frame]["name"],
marked_range=_create_time_range(
m_frame,
tl_markers[m_frame]["duration"],
frame_rate
),
color=tl_markers[m_frame]["color"].upper(),
metadata={"Resolve": {"note": tl_markers[m_frame]["note"]}}
)
)
return markers
def _create_clip(tl_item):
mp_item = tl_item.GetMediaPoolItem()
frame_rate = mp_item.GetClipProperty("FPS").get("FPS")
clip = otio.schema.Clip(
name=tl_item.GetName(),
source_range=_create_time_range(
tl_item.GetLeftOffset(),
tl_item.GetDuration(),
frame_rate
),
media_reference=_create_reference(mp_item)
)
for marker in _create_markers(tl_item, frame_rate):
clip.markers.append(marker)
return clip
def _create_gap(gap_start, clip_start, tl_start_frame, frame_rate):
return otio.schema.Gap(
source_range=_create_time_range(
gap_start,
(clip_start - tl_start_frame) - gap_start,
frame_rate
)
)
def _create_ot_timeline(output_path):
if not output_path:
return
project_manager = resolve.GetProjectManager()
current_project = project_manager.GetCurrentProject()
dr_timeline = current_project.GetCurrentTimeline()
ot_timeline = otio.schema.Timeline(name=dr_timeline.GetName())
for track_type in list(TRACK_TYPES.keys()):
track_count = dr_timeline.GetTrackCount(track_type)
for track_index in range(1, int(track_count) + 1):
ot_track = otio.schema.Track(
name="{}{}".format(track_type[0].upper(), track_index),
kind=TRACK_TYPES[track_type]
)
tl_items = dr_timeline.GetItemListInTrack(track_type, track_index)
for tl_item in tl_items:
if tl_item.GetMediaPoolItem() is None:
continue
clip_start = tl_item.GetStart() - dr_timeline.GetStartFrame()
if clip_start > ot_track.available_range().duration.value:
ot_track.append(
_create_gap(
ot_track.available_range().duration.value,
tl_item.GetStart(),
dr_timeline.GetStartFrame(),
current_project.GetSetting("timelineFrameRate")
)
)
ot_track.append(_create_clip(tl_item))
ot_timeline.tracks.append(ot_track)
ot_timeline.to_json_file(
"{}/{}.otio".format(output_path, dr_timeline.GetName())
)
title_font = ui.Font({"PixelSize": 18})
dlg = disp.AddWindow(
{
"WindowTitle": "Export OTIO",
"ID": "OTIOwin",
"Geometry": [250, 250, 250, 100],
"Spacing": 0,
"Margin": 10
},
[
ui.VGroup(
{
"Spacing": 2
},
[
ui.Button(
{
"ID": "exportfilebttn",
"Text": "Select Destination",
"Weight": 1.25,
"ToolTip": "Choose where to save the otio",
"Flat": False
}
),
ui.VGap(),
ui.Button(
{
"ID": "exportbttn",
"Text": "Export",
"Weight": 2,
"ToolTip": "Export the current timeline",
"Flat": False
}
)
]
)
]
)
itm = dlg.GetItems()
def _close_window(event):
disp.ExitLoop()
def _export_button(event):
_create_ot_timeline(itm["exportfilebttn"].Text)
_close_window(None)
def _export_file_pressed(event):
selectedPath = fu.RequestDir(os.path.expanduser("~/Documents"))
itm["exportfilebttn"].Text = selectedPath
dlg.On.OTIOwin.Close = _close_window
dlg.On.exportfilebttn.Clicked = _export_file_pressed
dlg.On.exportbttn.Clicked = _export_button
dlg.Show()
disp.RunLoop()
dlg.Hide()
| 3,704 | 0 | 230 |
19b96d53355eb51d4df2f87f15d58f7f8764f9d3 | 1,833 | py | Python | Python_Script/Insert_Data_To_MongoDB/Insertdata_to withoutDuplication.py | amolkokare/FinTech-Flair-NSE-data | 46941078299dbb63ef35875b3851458b3538c87d | [
"MIT"
] | null | null | null | Python_Script/Insert_Data_To_MongoDB/Insertdata_to withoutDuplication.py | amolkokare/FinTech-Flair-NSE-data | 46941078299dbb63ef35875b3851458b3538c87d | [
"MIT"
] | null | null | null | Python_Script/Insert_Data_To_MongoDB/Insertdata_to withoutDuplication.py | amolkokare/FinTech-Flair-NSE-data | 46941078299dbb63ef35875b3851458b3538c87d | [
"MIT"
] | null | null | null | import pymongo
import os
import datetime,time
import pandas as pd
import glob
import zipfile
import json,codecs
import shutil
dflist=[]
m=[]
os.chdir(r"D:\NSEDATA\2021")
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["NSEDATA2020FinalDataCopy"]
mycol = mydb["BHAVCOPY1"]
#mydb.mycol.create_index([{"TIMESTAMP"}], unique=True )
filelist=glob.glob("*.csv")
for filename in filelist:
df = pd.DataFrame(pd.read_csv(filename))
dflist.append(df)
concatdf = pd.concat(dflist)
# print(concatdf)
rec = concatdf.to_dict("records")
"""dict=[]
for x in mycol.find({'TIMESTAMP':1}):
dict.append(x)
print(x)"""
#print(concatdf)
#AB=list(mycol.find({}, {"_id":0,"TIMESTAMP":1, "SYMBOL":1}))
newpath = r"D:\\NSEPROCESSDATA\\2021"
Q=list(mycol.find({},{ "SYMBOL":1,"TIMESTAMP": 1}))
data = pd.DataFrame.from_dict(Q)
A=(concatdf["TIMESTAMP"])
#print(data)
#print(A)
if (set(concatdf["TIMESTAMP"]).intersection(set(data['TIMESTAMP']))):
print("File is alredy present")
#newfilename = os.path.join(r"D:\NSEDATA\2021",date.strftime('%Y-%m-%d.csv'))
newpath1 =r"D:\\Error_file"
print("File is Succesfully Moved to Error Folder")
shutil.move(filename, newpath1)
#timestamp_name = int(.time())#
#os.rename('path/to/file/name.csv', 'path/to/file/' + timestamp_name + '.csv')
#os.remove(filename)
#print(newfilename)
else:
print("not present")
print("inserted Successfully")
mycol.insert_many(rec)
shutil.move(filename, newpath)
print("Moved Successfully",filename)
"""newpath = r"D:\\NSEPROCESSDATA\\2021"
#for f in filelist :
if os.fspath(filename):
print("file is successfully Moved",filename)
if os.path.exists(newpath):
print("file alrady present")
else:
print(filename)
#if not os.path.exists(newpath):
# os.makedirs(newpath)"""
| 22.353659 | 82 | 0.687943 | import pymongo
import os
import datetime,time
import pandas as pd
import glob
import zipfile
import json,codecs
import shutil
dflist=[]
m=[]
os.chdir(r"D:\NSEDATA\2021")
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["NSEDATA2020FinalDataCopy"]
mycol = mydb["BHAVCOPY1"]
#mydb.mycol.create_index([{"TIMESTAMP"}], unique=True )
filelist=glob.glob("*.csv")
for filename in filelist:
df = pd.DataFrame(pd.read_csv(filename))
dflist.append(df)
concatdf = pd.concat(dflist)
# print(concatdf)
rec = concatdf.to_dict("records")
"""dict=[]
for x in mycol.find({'TIMESTAMP':1}):
dict.append(x)
print(x)"""
#print(concatdf)
#AB=list(mycol.find({}, {"_id":0,"TIMESTAMP":1, "SYMBOL":1}))
newpath = r"D:\\NSEPROCESSDATA\\2021"
Q=list(mycol.find({},{ "SYMBOL":1,"TIMESTAMP": 1}))
data = pd.DataFrame.from_dict(Q)
A=(concatdf["TIMESTAMP"])
#print(data)
#print(A)
if (set(concatdf["TIMESTAMP"]).intersection(set(data['TIMESTAMP']))):
print("File is alredy present")
#newfilename = os.path.join(r"D:\NSEDATA\2021",date.strftime('%Y-%m-%d.csv'))
newpath1 =r"D:\\Error_file"
print("File is Succesfully Moved to Error Folder")
shutil.move(filename, newpath1)
#timestamp_name = int(.time())#
#os.rename('path/to/file/name.csv', 'path/to/file/' + timestamp_name + '.csv')
#os.remove(filename)
#print(newfilename)
else:
print("not present")
print("inserted Successfully")
mycol.insert_many(rec)
shutil.move(filename, newpath)
print("Moved Successfully",filename)
"""newpath = r"D:\\NSEPROCESSDATA\\2021"
#for f in filelist :
if os.fspath(filename):
print("file is successfully Moved",filename)
if os.path.exists(newpath):
print("file alrady present")
else:
print(filename)
#if not os.path.exists(newpath):
# os.makedirs(newpath)"""
| 0 | 0 | 0 |
29fa78a1a4370e7989dd486a303fe7ecc0c6b1ad | 7,015 | py | Python | nablapps/podcast/migrations/0001_squashed_0014_podcast_content_type.py | Amund211/nablaweb | 8105c34615d4b67637e982545fbc6489a131c1f3 | [
"MIT"
] | 17 | 2019-10-07T15:10:58.000Z | 2022-01-21T14:18:07.000Z | nablapps/podcast/migrations/0001_squashed_0014_podcast_content_type.py | Amund211/nablaweb | 8105c34615d4b67637e982545fbc6489a131c1f3 | [
"MIT"
] | 222 | 2019-10-07T15:04:51.000Z | 2022-03-24T12:14:16.000Z | nablapps/podcast/migrations/0001_squashed_0014_podcast_content_type.py | Amund211/nablaweb | 8105c34615d4b67637e982545fbc6489a131c1f3 | [
"MIT"
] | 7 | 2019-10-10T18:53:42.000Z | 2021-10-18T02:13:09.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import image_cropping.fields
| 35.251256 | 126 | 0.380328 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import image_cropping.fields
class Migration(migrations.Migration):
replaces = [
("podcast", "0001_initial"),
("podcast", "0002_auto_20150214_2044"),
("podcast", "0003_auto_20150521_0025"),
("podcast", "0004_auto_20150525_1806"),
("podcast", "0005_auto_20150727_2133"),
("podcast", "0006_auto_20150727_2135"),
("podcast", "0007_auto_20150727_2210"),
("podcast", "0008_season_logo"),
("podcast", "0009_auto_20150808_1725"),
("podcast", "0010_auto_20150810_1206"),
("podcast", "0011_auto_20151102_2035"),
("podcast", "0012_auto_20151103_0013"),
("podcast", "0013_podcast_allow_comments"),
("podcast", "0014_podcast_content_type"),
]
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
]
operations = [
migrations.CreateModel(
name="Season",
fields=[
(
"id",
models.AutoField(
primary_key=True,
serialize=False,
verbose_name="ID",
auto_created=True,
),
),
(
"number",
models.IntegerField(unique=True, verbose_name="Sesongnummer"),
),
(
"banner",
models.ImageField(
help_text="Sesongbanner.",
null=True,
upload_to="podcast/images",
verbose_name="Banner",
blank=True,
),
),
(
"logo",
models.ImageField(
help_text="Podcastlogo.",
null=True,
upload_to="podcast/images",
verbose_name="Logo",
blank=True,
),
),
],
options={"verbose_name_plural": "Sesonger", "verbose_name": "Sesong"},
),
migrations.CreateModel(
name="Podcast",
fields=[
(
"id",
models.AutoField(
primary_key=True,
serialize=False,
verbose_name="ID",
auto_created=True,
),
),
("title", models.CharField(verbose_name="tittel", max_length=200)),
(
"description",
models.TextField(
verbose_name="beskrivelse",
help_text="Teksten vil bli kuttet etter 250 tegn på sesongsiden.",
blank=True,
),
),
("pub_date", models.DateTimeField(verbose_name="publisert", null=True)),
(
"file",
models.FileField(
upload_to="podcast",
verbose_name="lydfil",
help_text="Filformat: MP3",
blank=True,
),
),
(
"view_counter",
models.IntegerField(
verbose_name="Visninger", editable=False, default=0
),
),
(
"cropping",
image_cropping.fields.ImageRatioField(
"image",
"300x300",
allow_fullsize=False,
adapt_rotation=False,
size_warning=False,
help_text="Bildet vises i full form på detaljsiden.",
free_crop=False,
verbose_name="Beskjæring",
hide_image_field=False,
),
),
(
"image",
models.ImageField(
help_text="Bilder som er større enn 300x300 px ser best ut. Du kan beskjære bildet etter opplasting.",
null=True,
upload_to="news_pictures",
verbose_name="Bilde",
blank=True,
),
),
(
"is_clip",
models.BooleanField(
verbose_name="Er lydklipp",
help_text="Lydklipp blir ikke vist sammen med episodene.",
default=False,
),
),
(
"season",
models.ForeignKey(
to="podcast.Season",
null=True,
verbose_name="Sesong",
blank=True,
on_delete=models.CASCADE,
),
),
(
"extra_markdown",
models.TextField(
verbose_name="Ekstra markdown",
help_text="Ekstra markdown for å putte inn videoer etc.",
null=True,
blank=True,
),
),
(
"publication_date",
models.DateTimeField(
verbose_name="Publikasjonstid", null=True, blank=True
),
),
(
"published",
models.NullBooleanField(
verbose_name="Publisert",
help_text="Dato har høyere prioritet enn dette feltet.",
default=True,
),
),
(
"allow_comments",
models.BooleanField(
verbose_name="Tillat kommentarer",
help_text="Hvorvidt kommentering er tillatt",
default=True,
),
),
(
"content_type",
models.ForeignKey(
to="contenttypes.ContentType",
null=True,
editable=False,
on_delete=models.CASCADE,
),
),
],
options={
"verbose_name_plural": "Podcast",
"verbose_name": "Podcast",
"ordering": ["-pub_date"],
},
),
]
| 0 | 6,862 | 23 |
98c34a669c73592ded3bf286d8c834e4cf773fce | 3,501 | py | Python | python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_prelu_op.py | zhenlin-work/Paddle | ed7a21dea0ddcffb6f7f33ce21c5c368f5c7866b | [
"Apache-2.0"
] | 2 | 2018-12-27T07:13:55.000Z | 2021-06-16T09:30:09.000Z | python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_prelu_op.py | zhenlin-work/Paddle | ed7a21dea0ddcffb6f7f33ce21c5c368f5c7866b | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/ir/inference/test_mkldnn_prelu_op.py | zhenlin-work/Paddle | ed7a21dea0ddcffb6f7f33ce21c5c368f5c7866b | [
"Apache-2.0"
] | 1 | 2020-11-25T10:41:52.000Z | 2020-11-25T10:41:52.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import MkldnnAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig, OpConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
if __name__ == "__main__":
unittest.main()
| 36.852632 | 80 | 0.61611 | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from auto_scan_test import MkldnnAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig, OpConfig
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
class TestMkldnnPreluOp(MkldnnAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
# if mode is channel, and in_shape is 1 rank
if len(program_config.inputs['input_data'].
shape) == 1 and program_config.ops[0].attrs['mode'] == 'channel':
return False
return True
def sample_program_configs(self, *args, **kwargs):
def generate_input(*args, **kwargs):
return np.random.random(kwargs['in_shape']).astype(np.float32)
def generate_alpha(*args, **kwargs):
if kwargs["mode"] == "all":
return np.random.random(size=(1)).astype(np.float32)
elif kwargs["mode"] == "channel":
if len(kwargs['in_shape']) <= 1:
# not valid case, just return 0
return np.zeros((1)).astype(np.float32)
return np.random.random(kwargs['in_shape'][1]).astype(
np.float32)
else:
if len(kwargs['in_shape']) <= 1:
# not valid case, just return 0
return np.zeros((1)).astype(np.float32)
return np.random.random(kwargs['in_shape']).astype(np.float32)
prelu_op = OpConfig(
type="prelu",
inputs={"X": ["input_data"],
"Alpha": ["alpha_weight"]},
outputs={"Out": ["output_data"]},
attrs={"mode": kwargs['mode']})
program_config = ProgramConfig(
ops=[prelu_op],
weights={
"alpha_weight":
TensorConfig(data_gen=partial(generate_alpha, *args, **kwargs))
},
inputs={
"input_data":
TensorConfig(data_gen=partial(generate_input, *args, **kwargs)),
},
outputs=["output_data"])
yield program_config
def sample_predictor_configs(self, program_config):
config = self.create_inference_config(use_mkldnn=True)
yield config, (1e-5, 1e-5)
def add_skip_pass_case(self):
pass
@given(
mode=st.sampled_from(['all', 'channel', 'element']),
in_shape=st.lists(
st.integers(
min_value=1, max_value=32), min_size=1, max_size=4))
def test(self, *args, **kwargs):
self.add_skip_pass_case()
self.run_test(quant=False, *args, **kwargs)
if __name__ == "__main__":
unittest.main()
| 2,061 | 351 | 23 |
51911ce7576e67a254f3a2879abf32c2a39ea806 | 5,055 | py | Python | vision/applications/vision_detection_app.py | lcmonteiro/space-vision-py | 38022c99218de0e1e93ec0bae8d143fa0c787f1d | [
"MIT"
] | 1 | 2019-12-14T20:00:17.000Z | 2019-12-14T20:00:17.000Z | vision/applications/vision_detection_app.py | lcmonteiro/space-vision-py | 38022c99218de0e1e93ec0bae8d143fa0c787f1d | [
"MIT"
] | null | null | null | vision/applications/vision_detection_app.py | lcmonteiro/space-vision-py | 38022c99218de0e1e93ec0bae8d143fa0c787f1d | [
"MIT"
] | null | null | null | # ################################################################################################
# ------------------------------------------------------------------------------------------------
# File: vision_detection_app.py
# Author: Luis Monteiro
#
# Created on nov 8, 2019, 22:00 PM
# ------------------------------------------------------------------------------------------------
# ################################################################################################
# extern
from yaml import safe_load as loader
from logging import getLogger as logger
# intern
from vision.library import VisionDetector
from vision.library.inputs import CameraInput
from vision.library.inputs import FilesystemInput
from vision.library.outputs import WindowOutput
# #############################################################################
# -----------------------------------------------------------------------------
# main
# -----------------------------------------------------------------------------
# #############################################################################
# ############################################################################
# ----------------------------------------------------------------------------
# entry point
# ----------------------------------------------------------------------------
# ############################################################################
if __name__ == '__main__':
from argparse import ArgumentParser
from logging import basicConfig as config_logger
from logging import DEBUG as LEVEL
from sys import stdout
from os.path import abspath, dirname
import seaborn as sns
sns.set_palette("hls")
# ---------------------------------------------------------------
# parse parameters
# ---------------------------------------------------------------
parser = ArgumentParser()
# configuration path
parser.add_argument('--config', '-c',
type = str,
default = '%s/vision_detection_app.yaml'%(dirname(abspath(__file__))),
help = 'configuration file path')
# input options
parser.add_argument('--input', '-i',
type = str,
default = 'camera',
choices =['camera', 'filesystem'],
help = 'input option')
# output options
parser.add_argument('--output', '-o',
type = str,
default = 'window',
choices =['window'],
help = 'output option')
parser.add_argument('src',
default = '0',
nargs = '?',
help ='source id')
parser.add_argument('dst',
default = 'vision detection',
nargs = '?',
help ='destination id')
args = parser.parse_args()
# ---------------------------------------------------------------
# log configuration
# ---------------------------------------------------------------
config_logger(
stream = stdout,
filemode = 'w',
level = LEVEL,
#filename= 'vision_detection_app.log',
format =
'[%(asctime)s] '
'[%(levelname)-10s] '
'[%(funcName)s] %(message)s')
# ---------------------------------------------------------------
# main
# ---------------------------------------------------------------
try:
exit(main(vars(args)))
except Exception as e:
logger().exception(e)
exit(-1)
except KeyboardInterrupt:
exit(0)
# #################################################################################################
# -------------------------------------------------------------------------------------------------
# End
# -------------------------------------------------------------------------------------------------
# ################################################################################################# | 38.007519 | 99 | 0.314144 | # ################################################################################################
# ------------------------------------------------------------------------------------------------
# File: vision_detection_app.py
# Author: Luis Monteiro
#
# Created on nov 8, 2019, 22:00 PM
# ------------------------------------------------------------------------------------------------
# ################################################################################################
# extern
from yaml import safe_load as loader
from logging import getLogger as logger
# intern
from vision.library import VisionDetector
from vision.library.inputs import CameraInput
from vision.library.inputs import FilesystemInput
from vision.library.outputs import WindowOutput
# #############################################################################
# -----------------------------------------------------------------------------
# main
# -----------------------------------------------------------------------------
# #############################################################################
def main(args):
# ----------------------------------------------------
# init and load filters
# ----------------------------------------------------
vision_detector = VisionDetector(
# configuration
loader(open(args['config'])),
# input options
{
'camera' : CameraInput,
'filesystem' : FilesystemInput
}[args['input']](args['src']),
# output options
{
'window' : WindowOutput
}[args['output']](args['dst'])
)
# ----------------------------------------------------
# configure filter
# ----------------------------------------------------
vision_detector.set_filters()
# ----------------------------------------------------
# run detection
# ----------------------------------------------------
@vision_detector.serve
def process(id, results):
for result in results:
logger().info(
'filter={} label={}]'.format(
id, result.label()
)
)
# ############################################################################
# ----------------------------------------------------------------------------
# entry point
# ----------------------------------------------------------------------------
# ############################################################################
if __name__ == '__main__':
from argparse import ArgumentParser
from logging import basicConfig as config_logger
from logging import DEBUG as LEVEL
from sys import stdout
from os.path import abspath, dirname
import seaborn as sns
sns.set_palette("hls")
# ---------------------------------------------------------------
# parse parameters
# ---------------------------------------------------------------
parser = ArgumentParser()
# configuration path
parser.add_argument('--config', '-c',
type = str,
default = '%s/vision_detection_app.yaml'%(dirname(abspath(__file__))),
help = 'configuration file path')
# input options
parser.add_argument('--input', '-i',
type = str,
default = 'camera',
choices =['camera', 'filesystem'],
help = 'input option')
# output options
parser.add_argument('--output', '-o',
type = str,
default = 'window',
choices =['window'],
help = 'output option')
parser.add_argument('src',
default = '0',
nargs = '?',
help ='source id')
parser.add_argument('dst',
default = 'vision detection',
nargs = '?',
help ='destination id')
args = parser.parse_args()
# ---------------------------------------------------------------
# log configuration
# ---------------------------------------------------------------
config_logger(
stream = stdout,
filemode = 'w',
level = LEVEL,
#filename= 'vision_detection_app.log',
format =
'[%(asctime)s] '
'[%(levelname)-10s] '
'[%(funcName)s] %(message)s')
# ---------------------------------------------------------------
# main
# ---------------------------------------------------------------
try:
exit(main(vars(args)))
except Exception as e:
logger().exception(e)
exit(-1)
except KeyboardInterrupt:
exit(0)
# #################################################################################################
# -------------------------------------------------------------------------------------------------
# End
# -------------------------------------------------------------------------------------------------
# ################################################################################################# | 1,079 | 0 | 22 |
b78eae0a34ad7b1ae8948396c47fcb1452d394a8 | 2,986 | py | Python | test/test_languages/testCsharp.py | xdfeng/lizard | f867a0f23c94e94d69462ccd9e74eb750c1b8749 | [
"MIT"
] | null | null | null | test/test_languages/testCsharp.py | xdfeng/lizard | f867a0f23c94e94d69462ccd9e74eb750c1b8749 | [
"MIT"
] | null | null | null | test/test_languages/testCsharp.py | xdfeng/lizard | f867a0f23c94e94d69462ccd9e74eb750c1b8749 | [
"MIT"
] | null | null | null | import unittest
from lizard import analyze_file, FileAnalyzer, get_extensions
| 32.813187 | 69 | 0.482251 | import unittest
from lizard import analyze_file, FileAnalyzer, get_extensions
def get_csharpe_fileinfo(source_code):
return analyze_file.analyze_source_code("a.cs", source_code)
def get_csharpe_function_list(source_code):
return get_csharpe_fileinfo(source_code).function_list
class TestCsharpe(unittest.TestCase):
def test_function_with_one(self):
result = get_csharpe_function_list('''
public void Method()
{
Console.WriteLine("Hello World!");
}
''')
self.assertEqual(1, result[0].cyclomatic_complexity)
def test_function_with_two(self):
result = get_csharpe_function_list('''
void Method(bool condition)
{
if (condition)
{
Console.WriteLine("Hello World!");
}
}
''')
self.assertEqual(2, result[0].cyclomatic_complexity)
def test_function_with_three(self):
result = get_csharpe_function_list('''
public void Method(bool condition1, bool condition2)
{
if (condition1 || condition2)
{
Console.WriteLine("Hello World!");
}
}
''')
self.assertEqual(3, result[0].cyclomatic_complexity)
def test_function_with_eight(self):
result = get_csharpe_function_list('''
public void Method(DayOfWeek day)
{
switch (day)
{
case DayOfWeek.Monday:
Console.WriteLine("Today is Monday!");
break;
case DayOfWeek.Tuesday:
Console.WriteLine("Today is Tuesday!");
break;
case DayOfWeek.Wednesday:
Console.WriteLine("Today is Wednesday!");
break;
case DayOfWeek.Thursday:
Console.WriteLine("Today is Thursday!");
break;
case DayOfWeek.Friday:
Console.WriteLine("Today is Friday!");
break;
case DayOfWeek.Saturday:
Console.WriteLine("Today is Saturday!");
break;
case DayOfWeek.Sunday:
Console.WriteLine("Today is Sunday!");
break;
}
}
}
''')
self.assertEqual(8, result[0].cyclomatic_complexity)
def test_null_coalecing_operator(self):
result = get_csharpe_function_list('''
public void Method()
{
a ?? b;
}
''')
self.assertEqual(2, result[0].cyclomatic_complexity)
| 2,685 | 16 | 204 |