hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
761e7270b2e52e796a66290cdfaca9f03af0e43f
3,757
py
Python
Conecta 4/AlphaZero/test.py
alberto-maurel/Aprendizaje-por-refuerzo-Fundamentos-te-ricos-del-algoritmo-AlphaZero-e-implementaci-n
8c4b54ad897b32fb43058713ff658b97c44ec578
[ "MIT" ]
null
null
null
Conecta 4/AlphaZero/test.py
alberto-maurel/Aprendizaje-por-refuerzo-Fundamentos-te-ricos-del-algoritmo-AlphaZero-e-implementaci-n
8c4b54ad897b32fb43058713ff658b97c44ec578
[ "MIT" ]
null
null
null
Conecta 4/AlphaZero/test.py
alberto-maurel/Aprendizaje-por-refuerzo-Fundamentos-te-ricos-del-algoritmo-AlphaZero-e-implementaci-n
8c4b54ad897b32fb43058713ff658b97c44ec578
[ "MIT" ]
null
null
null
from board import Game from agent import Agent from humanAgent import HumanAgent from randomAgent import RandomAgent from MCTS import * from NeuralNetwork import Connect4Zero import numpy as np import matplotlib.pyplot as plt import tensorflow as tf #AI VS RANDOM ''' def main(): builder = Connect4Zero() #First, we create an empty board model = builder.load_model('nn_weights/d3-150.h5') random_wins = 0 ai_wins = 0 first_ai = True first_ai = False board = Game() evaluation = model.predict([board.get_state()]) mcts = MCTS(evaluation[0][0]) player = HumanAgent() finished = False draw = False while not finished and not draw: board.print_board() print('pol: ', model.predict([board.get_state()])[0][0]) print('val: ', model.predict([board.get_state()])[1][0][0]) if first_ai: if board.current_player == 0: action, state, mcts_distribution = mcts.makeMove(board, model) else: player.update_board(board) action = player.pick_move() else: if board.current_player == 1: action, state, mcts_distribution = mcts.makeMove(board, model) else: player.update_board(board) action = player.pick_move() finished = board.make_move(action) if not finished and not board.is_possible_to_move(): draw = True if first_ai and board.current_player == 0 or not first_ai and board.current_player == 1: mcts.update_board(board, action, model) if finished: if board.current_player == 0: print('Second player wins') else: print('First player wins') else: print('Draw') board.print_board() ''' # AI VS AI NUM_PARTIDAS = 30 def main(): builder = Connect4Zero() #Load the models model1 = builder.load_model('nn_weights/d3-0.h5') model2 = builder.load_model('nn_weights/d3-200.h5') first_player_wins = 0 second_player_wins = 0 for i in range(0, NUM_PARTIDAS): #First, we create an empty board board = Game() evaluation1 = model1.predict([board.get_state()]) mcts1 = MCTS(evaluation1[0][0]) evaluation2 = model2.predict([board.get_state()]) mcts2 = MCTS(evaluation2[0][0]) finished = False draw = False while not finished and not draw: if board.current_player == 0: action, state, mcts_distribution = mcts1.makeMove(board, model1) else: action, state, mcts_distribution = mcts2.makeMove(board, model2) finished = board.make_move(action) if not finished and not board.is_possible_to_move(): draw = True if board.current_player == 0: mcts1.update_board(board, action, model1) else: mcts2.update_board(board, action, model2) if finished: if board.current_player == 0: second_player_wins += 1 print('Second player wins') else: first_player_wins += 1 print('First player wins') else: print('Draw') board.print_board() print('FIRST: ', first_player_wins, ' SECOND: ', second_player_wins) if __name__ == "__main__": main()
30.056
97
0.541389
0
0
0
0
0
0
0
0
1,896
0.504658
761ec4083d73522c128e2f27edb3c3a48ac0baed
4,390
py
Python
fabio/pilatusimage.py
picca/fabio
bc3aae330bef6e1c983007562157edfe6d7daf91
[ "Apache-2.0" ]
null
null
null
fabio/pilatusimage.py
picca/fabio
bc3aae330bef6e1c983007562157edfe6d7daf91
[ "Apache-2.0" ]
2
2019-04-24T13:43:41.000Z
2019-06-13T08:54:02.000Z
fabio/pilatusimage.py
boesecke/fabio
11350e445a6def4d02c6860aea3ae7f36652af6a
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 # # Project: X-ray image reader # https://github.com/silx-kit/fabio # # # Copyright (C) European Synchrotron Radiation Facility, Grenoble, France # # Principal author: Jérôme Kieffer (Jerome.Kieffer@ESRF.eu) # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE from __future__ import with_statement, print_function __authors__ = ["V. Valls"] __license__ = "MIT" __date__ = "12/11/2018" import re import logging from . import tifimage _logger = logging.getLogger(__name__) class PilatusTiffFrame(tifimage.TiffFrame): """Frame container for TIFF format generated by a Pilatus detector""" def __init__(self, data, tiff_header, pilatus_header): super(PilatusTiffFrame, self).__init__(data, tiff_header) self.pilatus_header = pilatus_header # Override the header self._header = pilatus_header class PilatusImage(tifimage.TifImage): """ Read in Pilatus format, also pilatus images, including header info """ DESCRIPTION = "Pilatus file format based on Tiff" DEFAULT_EXTENSIONS = ["tif", "tiff"] _keyvalue_spliter = re.compile(r"\s*[,:=\s]\s*") """It allow to split the first white space, colon, coma, or equal character and remove white spaces around""" def _create_pilatus_header(self, tiff_header): """ Parse Pilatus header from a TIFF header. The Pilatus header is stored in the metadata ImageDescription (tag 270) as an ASCII text which looks like: .. block-code:: python imageDescription = '# Pixel_size 172e-6 m x 172e-6 m\r\n'\ '# Silicon sensor, thickness 0.000320 m\r\n# Exposure_time 90.000000 s\r\n'\ '# Exposure_period 90.000000 s\r\n# Tau = 0 s\r\n'\ '# Count_cutoff 1048574 counts\r\n# Threshold_setting 0 eV\r\n'\ '# Gain_setting not implemented (vrf = 9.900)\r\n'\ '# N_excluded_pixels = 0\r\n# Excluded_pixels: (nil)\r\n'\ '# Flat_field: (nil)\r\n# Trim_directory: (nil)\r\n\x00' :rtype: OrderedDict """ if "imageDescription" not in tiff_header: # It is not a Pilatus TIFF image raise IOError("Image is not a Pilatus image") header = self.check_header() description = tiff_header["imageDescription"] for line in description.split("\n"): index = line.find('# ') if index == -1: if line.strip(" \x00") != "": # If it is not an empty line _logger.debug("Pilatus header line '%s' misformed. Skipped", line) continue line = line[2:].strip() if line == "": # empty line continue result = self._keyvalue_spliter.split(line, 1) if len(result) != 2: _logger.debug("Pilatus header line '%s' misformed. Skipped", line) continue key, value = result header[key] = value return header def _create_frame(self, image_data, tiff_header): """Create exposed data from TIFF information""" pilatus_header = self._create_pilatus_header(tiff_header) frame = PilatusTiffFrame(image_data, tiff_header, pilatus_header) return frame pilatusimage = PilatusImage
35.983607
92
0.650342
2,831
0.644581
0
0
0
0
0
0
2,729
0.621357
762066f7d39644fbbdc113555a1f0d651b3596ba
8,699
py
Python
tests/components/tasks/gqa_tests.py
aasseman/pytorchpipe
9cb17271666061cb19fe24197ecd5e4c8d32c5da
[ "Apache-2.0" ]
232
2019-04-15T20:49:06.000Z
2022-03-31T06:44:08.000Z
tests/components/tasks/gqa_tests.py
aasseman/pytorchpipe
9cb17271666061cb19fe24197ecd5e4c8d32c5da
[ "Apache-2.0" ]
53
2019-04-19T22:50:16.000Z
2019-08-07T21:23:08.000Z
tests/components/tasks/gqa_tests.py
aasseman/pytorchpipe
9cb17271666061cb19fe24197ecd5e4c8d32c5da
[ "Apache-2.0" ]
34
2019-04-15T20:49:22.000Z
2021-12-07T17:04:01.000Z
# -*- coding: utf-8 -*- # # Copyright (C) tkornuta, IBM Corporation 2019 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. __author__ = "Tomasz Kornuta" import unittest from unittest.mock import MagicMock, patch from os import path from ptp.components.mixins.io import check_file_existence from ptp.components.tasks.image_text_to_class.gqa import GQA from ptp.configuration.config_interface import ConfigInterface class TestGQA(unittest.TestCase): def test_training_0_split(self): """ Tests the training_0 split. ..note: Test on real data is performed only if adequate json source file is found. """ # Empty config. config = ConfigInterface() config.add_config_params({"gqa_training_0": {"split": "training_0", "globals": {"image_height": "gqa_image_height", "image_width": "gqa_image_width"}}}) # Check the existence of test set. if False: #check_file_existence(path.expanduser('~/data/gqa/questions1.2/train_all_questions'),'train_all_questions_0.json'): # Create object. task = GQA("gqa_training_0", config["gqa_training_0"]) # Check dataset size. self.assertEqual(len(task), 1430536) # Get sample. sample = task[0] else: processed_dataset_content = [ {'sample_ids': '07333408', 'image_ids': '2375429', 'questions': 'What is on the white wall?', 'answers': 'pipe', 'full_answers': 'The pipe is on the wall.'} ] # Mock up the load_dataset method. with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.load_dataset", MagicMock( side_effect = [ processed_dataset_content ] )): task = GQA("gqa_training_0", config["gqa_training_0"]) # Mock up the get_image method. with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.get_image", MagicMock( side_effect = [ "0" ] )): sample = task[0] # Check sample. self.assertEqual(sample['indices'], 0) self.assertEqual(sample['sample_ids'], '07333408') self.assertEqual(sample['image_ids'], '2375429') self.assertEqual(sample['questions'], 'What is on the white wall?') self.assertEqual(sample['answers'], 'pipe') self.assertEqual(sample['full_answers'], 'The pipe is on the wall.') def test_validation_split(self): """ Tests the validation split. ..note: Test on real data is performed only if adequate json source file is found. """ # Empty config. config = ConfigInterface() config.add_config_params({"gqa_validation": {"split": "validation", "globals": {"image_height": "gqa_image_height", "image_width": "gqa_image_width"}}}) # Check the existence of test set. if False: #check_file_existence(path.expanduser('~/data/gqa/questions1.2'),'val_all_questions.json'): # Create object. task = GQA("gqa_validation", config["gqa_validation"]) # Check dataset size. self.assertEqual(len(task), 2011853) # Get sample. sample = task[0] else: processed_dataset_content = [ {'sample_ids': '05451384', 'image_ids': '2382986', 'questions': 'Are there blankets under the brown cat?', 'answers': 'no', 'full_answers': 'No, there is a towel under the cat.'} ] # Mock up the load_dataset method. with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.load_dataset", MagicMock( side_effect = [ processed_dataset_content ] )): task = GQA("gqa_validation", config["gqa_validation"]) # Mock up the get_image method. with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.get_image", MagicMock( side_effect = [ "0" ] )): sample = task[0] # Check sample. self.assertEqual(sample['indices'], 0) self.assertEqual(sample['sample_ids'], '05451384') self.assertEqual(sample['image_ids'], '2382986') self.assertEqual(sample['questions'], 'Are there blankets under the brown cat?') self.assertEqual(sample['answers'], 'no') self.assertEqual(sample['full_answers'], 'No, there is a towel under the cat.') def test_test_dev_split(self): """ Tests the test_dev split. ..note: Test on real data is performed only if adequate json source file is found. """ # Empty config. config = ConfigInterface() config.add_config_params({"gqa_testdev": {"split": "test_dev", "globals": {"image_height": "gqa_image_height", "image_width": "gqa_image_width"}}}) # Check the existence of test set. if False: #check_file_existence(path.expanduser('~/data/gqa/questions1.2'),'testdev_all_questions.json'): # Create object. task = GQA("gqa_testdev", config["gqa_testdev"]) # Check dataset size. self.assertEqual(len(task), 172174) # Get sample. sample = task[0] else: processed_dataset_content = [ {'sample_ids': '20968379', 'image_ids': 'n288870', 'questions': 'Do the shorts have dark color?', 'answers': 'yes', 'full_answers': 'Yes, the shorts are dark.'} ] # Mock up the load_dataset method. with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.load_dataset", MagicMock( side_effect = [ processed_dataset_content ] )): task = GQA("gqa_testdev", config["gqa_testdev"]) # Mock up the get_image method. with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.get_image", MagicMock( side_effect = [ "0" ] )): sample = task[0] # Check sample. self.assertEqual(sample['indices'], 0) self.assertEqual(sample['sample_ids'], '20968379') self.assertEqual(sample['image_ids'], 'n288870') self.assertEqual(sample['questions'], 'Do the shorts have dark color?') self.assertEqual(sample['answers'], 'yes') self.assertEqual(sample['full_answers'], 'Yes, the shorts are dark.') def test_test_split(self): """ Tests the test split. ..note: Test on real data is performed only if adequate json source file is found. """ # Empty config. config = ConfigInterface() config.add_config_params({"gqa_test": {"split": "test", "globals": {"image_height": "gqa_image_height", "image_width": "gqa_image_width"}}}) # Check the existence of test set. if False: #check_file_existence(path.expanduser('~/data/gqa/questions1.2'),'test_all_questions.json'): # Create object. task = GQA("gqa_test", config["gqa_test"]) # Check dataset size. self.assertEqual(len(task), 1340048) # Get sample. sample = task[0] else: processed_dataset_content = [ {'sample_ids': '201971873', 'image_ids': 'n15740', 'questions': 'Is the blanket to the right of a pillow?', 'answers': '<UNK>', 'full_answers': '<UNK>'} ] # Mock up the load_dataset method. with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.load_dataset", MagicMock( side_effect = [ processed_dataset_content ] )): task = GQA("gqa_test", config["gqa_test"]) # Mock up the get_image method. with patch( "ptp.components.tasks.image_text_to_class.gqa.GQA.get_image", MagicMock( side_effect = [ "0" ] )): sample = task[0] # Check sample. self.assertEqual(sample['indices'], 0) self.assertEqual(sample['sample_ids'], '201971873') self.assertEqual(sample['image_ids'], 'n15740') self.assertEqual(sample['questions'], 'Is the blanket to the right of a pillow?') self.assertEqual(sample['answers'], '<UNK>') self.assertEqual(sample['full_answers'], '<UNK>') #if __name__ == "__main__": # unittest.main()
42.642157
222
0.620991
7,724
0.887918
0
0
0
0
0
0
4,736
0.54443
7620fcf40ca8ac2c72d7f34f2ed6b0553720f0af
2,613
py
Python
Start.py
nuti23/StudentsAssignmentsManagement
ca3cb8a6f38c31cd0544a63179691f139a02612d
[ "Apache-2.0" ]
null
null
null
Start.py
nuti23/StudentsAssignmentsManagement
ca3cb8a6f38c31cd0544a63179691f139a02612d
[ "Apache-2.0" ]
null
null
null
Start.py
nuti23/StudentsAssignmentsManagement
ca3cb8a6f38c31cd0544a63179691f139a02612d
[ "Apache-2.0" ]
null
null
null
from Domain.student import Student from Repository.assignment_repository import AssignmentRepository from Repository.grade_repository import GradeRepository from Repository.student_repository import StudentRepository from Repository_Binary_File.assignment_repository_binary_file import AssignmentRepositoryBinaryFile from Repository_Binary_File.grade_repository_binary_file import GradeRepositoryBinaryFile from Repository_Binary_File.student_repository_binary_file import StudentRepositoryBinaryFile from Repository_TextFile.assignment_repository_text_file import AssignmentRepositoryTextFile from Repository_TextFile.grade_repository_text_file import GradeRepositoryTextFile from Repository_TextFile.student_repository_text_file import StudentRepositoryTextFile from Service.assignment_service import AssignmentService from Service.grade_service import GradeService from Service.settings_properties import SettingsProperties from Service.student_service import StudentService from Ui.console import Ui from Undo.undo_service import UndoRedoService from Validators.assignment_validator import AssignmentValidator from Validators.grade_validator import GradeValidator from Validators.student_validator import StudentValidator settings_properties = SettingsProperties() dictionary = settings_properties.settings_data if dictionary["repository"] == "inmemory": student_repository = StudentRepository() assignment_repository = AssignmentRepository() grade_repository = GradeRepository() elif dictionary["repository"] == "textfiles": student_repository = StudentRepositoryTextFile(dictionary['student']) assignment_repository = AssignmentRepositoryTextFile(dictionary['assignment']) grade_repository = GradeRepositoryTextFile(dictionary['grade']) elif dictionary["repository"] == "binaryfiles": student_repository = StudentRepositoryBinaryFile(dictionary['student']) assignment_repository = AssignmentRepositoryBinaryFile(dictionary['assignment']) grade_repository = GradeRepositoryBinaryFile(dictionary['grade']) undo_redo_service = UndoRedoService() grade_validator = GradeValidator() grade_service = GradeService(grade_repository, grade_validator, undo_redo_service) student_validator = StudentValidator() student_service = StudentService(student_repository, student_validator, grade_service, undo_redo_service) assignment_validator = AssignmentValidator() assignment_service = AssignmentService(assignment_repository, assignment_validator, grade_service, undo_redo_service) ui = Ui(student_service, assignment_service, grade_service, undo_redo_service) ui.start()
46.660714
117
0.867585
0
0
0
0
0
0
0
0
126
0.04822
762189364ae8346baa62adb5a86bb79745cc8954
83
py
Python
contrib/frontends/py/nntpchan/__init__.py
majestrate/nntpchan
f92f68c3cdce4b7ce6d4121ca4356b36ebcd933f
[ "MIT" ]
233
2015-08-06T02:51:52.000Z
2022-02-14T11:29:13.000Z
contrib/frontends/py/nntpchan/__init__.py
Revivify/nntpchan
0d555bb88a2298dae9aacf11348e34c52befa3d8
[ "MIT" ]
98
2015-09-19T22:29:00.000Z
2021-06-12T09:43:13.000Z
contrib/frontends/py/nntpchan/__init__.py
Revivify/nntpchan
0d555bb88a2298dae9aacf11348e34c52befa3d8
[ "MIT" ]
49
2015-08-06T02:51:55.000Z
2020-03-11T04:23:56.000Z
# # entry for gunicorn # from nntpchan.app import app from nntpchan import viewsp
11.857143
28
0.771084
0
0
0
0
0
0
0
0
22
0.26506
76239cf65a40116502dab08045c558ccfa503910
6,997
py
Python
_unittests/ut_profiling/test_event_profiler.py
sdpython/cpyquickhelper
c2bdebad2201c7e10a5999a836bbf53e27b963c7
[ "MIT" ]
2
2017-10-03T20:39:13.000Z
2019-02-06T15:24:04.000Z
_unittests/ut_profiling/test_event_profiler.py
sdpython/cpyquickhelper
c2bdebad2201c7e10a5999a836bbf53e27b963c7
[ "MIT" ]
21
2017-09-17T11:14:04.000Z
2021-01-01T13:24:20.000Z
_unittests/ut_profiling/test_event_profiler.py
sdpython/cpyquickhelper
c2bdebad2201c7e10a5999a836bbf53e27b963c7
[ "MIT" ]
null
null
null
""" @brief test log(time=3s) """ import unittest import inspect import logging from time import sleep, perf_counter from pyquickhelper.pycode import ExtTestCase from cpyquickhelper.profiling import ( EventProfiler, WithEventProfiler) from cpyquickhelper.profiling.event_profiler import EventProfilerDebug class TestEventProfiler(ExtTestCase): def test_profiling_exc(self): ev = EventProfiler(impl='python') self.assertRaise(lambda: ev.stop(), RuntimeError) ev.start() self.assertRaise(lambda: ev.start(), RuntimeError) ev.stop() self.assertRaise(lambda: ev.stop(), RuntimeError) def test_profiling(self): def f1(t): sleep(t) def f2(): f1(0.1) def f3(): li = [0 for i in range(0, 10000)] f1(0.2) return li def f4(): f2() f3() ev = EventProfiler(impl='python') ev.start() f4() ev.stop() res = ev.retrieve_raw_results() self.assertEqual(res.shape[1], ev.n_columns) df = ev.retrieve_results(False) self.assertEqual(df.shape, (res.shape[0], 10)) expected = ['time', 'value1', 'value2', 'event', 'name', 'mod', 'lineno', 'from_name', 'from_mod', 'from_line'] self.assertEqual(list(df.columns), expected) self.assertIn('sleep', set(df['name'])) self.assertIn('time', set(df['mod'])) def test_profiling_20(self): def f1(t): sleep(t) def f2(): f1(0.1) def f3(): f1(0.2) def f4(): f2() f3() ev = EventProfiler(size=30, impl='python') ev.start() f4() ev.stop() res = ev.retrieve_raw_results() self.assertGreater(res.shape[0], 10) self.assertEqual(res.shape[1], ev.n_columns) df = ev.retrieve_results(False) self.assertEqual(df.shape, (res.shape[0], 10)) expected = ['time', 'value1', 'value2', 'event', 'name', 'mod', 'lineno', 'from_name', 'from_mod', 'from_line'] self.assertEqual(list(df.columns), expected) def test_profiling_raise(self): def fraise(): raise RuntimeError("issue") def catch_exc(): try: fraise() return None except RuntimeError as e: return str(e) ev = EventProfiler(impl='python') ev.start() catch_exc() ev.stop() df = ev.retrieve_results(True) self.assertEqual(df.shape[1], 10) self.assertGreater(df.shape[0], 5) self.assertIn("catch_exc", set(df['name'])) def test_with_sleep(self): def fsleep(): sleep(0.1) prof = WithEventProfiler(impl='python') with prof: fsleep() df = prof.report self.assertGreater(df.shape[0], 1) self.assertEqual(df.shape[1], 10) def test_with_raise(self): def fraise(): raise RuntimeError("TESTISSUE") try: prof = WithEventProfiler(impl='python') with prof: fraise() except RuntimeError as e: self.assertEqual(str(e), 'TESTISSUE') def test_debug(self): N = 100000 ev = EventProfilerDebug(impl='python') ev.start() begin = perf_counter() for _ in range(N): ev.log_event(inspect.currentframe(), 'call', None) ev.log_event(inspect.currentframe(), 'return', None) end = perf_counter() ev.stop() duration = end - begin msg = "evpy: %1.6f microsecond" % (duration / N * 1e6) self.assertNotEmpty(msg) if __name__ == "__main__": print(msg) def test_debug_c(self): N = 100000 ev = EventProfilerDebug(impl='pybind11', size=10000000) ev.start() begin = perf_counter() for _ in range(N): ev._buffer.c_log_event( # pylint: disable=W0212 inspect.currentframe(), 'call', None) ev._buffer.c_log_event( # pylint: disable=W0212 inspect.currentframe(), 'return', None) end = perf_counter() ev.stop() duration = end - begin msg = "evc+: %1.6f microsecond" % (duration / N * 1e6) self.assertNotEmpty(msg) if __name__ == "__main__": print(msg) def test_debug_logging(self): N = 100 logger = logging.getLogger('cpyquickhelper-ut') logger.setLevel(logging.INFO) ev = EventProfilerDebug(impl='pybind11', size=10000000) ev.start() begin = perf_counter() for _ in range(N): logger.info("call %d", inspect.currentframe().f_lineno) logger.info("return %d", inspect.currentframe().f_lineno) end = perf_counter() ev.stop() duration = end - begin msg = "logg: %1.6f microsecond" % (duration / N * 1e6) self.assertNotEmpty(msg) if __name__ == "__main__": print(msg) def test_profiling_c(self): def f1(t): sleep(t) def f2(): f1(0.1) def f3(): li = [0 for i in range(0, 10000)] f1(0.2) return li def f4(): f2() f3() ev = EventProfiler(impl='pybind11') ev.start() f4() ev.stop() res = ev.retrieve_raw_results() self.assertEqual(res.shape[1], ev.n_columns) df = ev.retrieve_results(False) self.assertEqual(df.shape, (res.shape[0], 10)) expected = ['time', 'value1', 'value2', 'event', 'name', 'mod', 'lineno', 'from_name', 'from_mod', 'from_line'] self.assertEqual(list(df.columns), expected) self.assertIn('sleep', set(df['name'])) self.assertIn('time', set(df['mod'])) def test_profiling_c_20(self): def f1(t): sleep(t) def f2(): f1(0.1) def f3(): li = [0 for i in range(0, 10000)] f1(0.2) return li def f4(): f2() f3() ev = EventProfiler(impl='pybind11', size=220) ev.start() f4() ev.stop() res = ev.retrieve_raw_results() self.assertEqual(res.shape[1], ev.n_columns) df = ev.retrieve_results(False) self.assertEqual(df.shape, (res.shape[0], 10)) expected = ['time', 'value1', 'value2', 'event', 'name', 'mod', 'lineno', 'from_name', 'from_mod', 'from_line'] self.assertEqual(list(df.columns), expected) self.assertIn('sleep', set(df['name'])) self.assertIn('time', set(df['mod'])) if __name__ == "__main__": unittest.main()
28.100402
70
0.523653
6,631
0.947692
0
0
0
0
0
0
799
0.114192
7623c8878768af2e85f14e1d898fd32819bd4a04
4,572
py
Python
ionoscloud/models/s3_key_properties.py
ionos-cloud/ionos-cloud-sdk-python
3c5804697c262898e6f6a438dc40e1b45a4bb5c9
[ "Apache-2.0" ]
null
null
null
ionoscloud/models/s3_key_properties.py
ionos-cloud/ionos-cloud-sdk-python
3c5804697c262898e6f6a438dc40e1b45a4bb5c9
[ "Apache-2.0" ]
null
null
null
ionoscloud/models/s3_key_properties.py
ionos-cloud/ionos-cloud-sdk-python
3c5804697c262898e6f6a438dc40e1b45a4bb5c9
[ "Apache-2.0" ]
null
null
null
# coding: utf-8 """ CLOUD API IONOS Enterprise-grade Infrastructure as a Service (IaaS) solutions can be managed through the Cloud API, in addition or as an alternative to the \"Data Center Designer\" (DCD) browser-based tool. Both methods employ consistent concepts and features, deliver similar power and flexibility, and can be used to perform a multitude of management tasks, including adding servers, volumes, configuring networks, and so on. # noqa: E501 The version of the OpenAPI document: 6.0 Generated by: https://openapi-generator.tech """ import pprint import re # noqa: F401 import six from ionoscloud.configuration import Configuration class S3KeyProperties(object): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. """ """ Attributes: openapi_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ openapi_types = { 'secret_key': 'str', 'active': 'bool', } attribute_map = { 'secret_key': 'secretKey', 'active': 'active', } def __init__(self, secret_key=None, active=None, local_vars_configuration=None): # noqa: E501 """S3KeyProperties - a model defined in OpenAPI""" # noqa: E501 if local_vars_configuration is None: local_vars_configuration = Configuration() self.local_vars_configuration = local_vars_configuration self._secret_key = None self._active = None self.discriminator = None if secret_key is not None: self.secret_key = secret_key if active is not None: self.active = active @property def secret_key(self): """Gets the secret_key of this S3KeyProperties. # noqa: E501 Secret of the S3 key. # noqa: E501 :return: The secret_key of this S3KeyProperties. # noqa: E501 :rtype: str """ return self._secret_key @secret_key.setter def secret_key(self, secret_key): """Sets the secret_key of this S3KeyProperties. Secret of the S3 key. # noqa: E501 :param secret_key: The secret_key of this S3KeyProperties. # noqa: E501 :type secret_key: str """ self._secret_key = secret_key @property def active(self): """Gets the active of this S3KeyProperties. # noqa: E501 Denotes weather the S3 key is active. # noqa: E501 :return: The active of this S3KeyProperties. # noqa: E501 :rtype: bool """ return self._active @active.setter def active(self, active): """Sets the active of this S3KeyProperties. Denotes weather the S3 key is active. # noqa: E501 :param active: The active of this S3KeyProperties. # noqa: E501 :type active: bool """ self._active = active def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.openapi_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, S3KeyProperties): return False return self.to_dict() == other.to_dict() def __ne__(self, other): """Returns true if both objects are not equal""" if not isinstance(other, S3KeyProperties): return True return self.to_dict() != other.to_dict()
29.496774
438
0.595801
3,891
0.85105
0
0
1,194
0.261155
0
0
2,243
0.490595
76243510a1536cc8eb0c0666f32e3fd946357134
5,503
py
Python
setup.py
ProjexSoftware/orb
575be2689cb269e65a0a2678232ff940acc19e5a
[ "MIT" ]
7
2016-03-30T18:15:46.000Z
2021-02-19T14:55:01.000Z
setup.py
orb-framework/orb
575be2689cb269e65a0a2678232ff940acc19e5a
[ "MIT" ]
25
2016-02-02T20:52:35.000Z
2017-12-12T06:14:21.000Z
setup.py
orb-framework/orb
575be2689cb269e65a0a2678232ff940acc19e5a
[ "MIT" ]
3
2015-12-30T22:27:02.000Z
2016-08-24T22:33:42.000Z
import os import re import subprocess from setuptools import setup, find_packages, Command from setuptools.command.test import test as TestCommand __author__ = 'Eric Hulser' __email__ = 'eric.hulser@gmail.com' __license__ = 'MIT' INSTALL_REQUIRES = [] DEPENDENCY_LINKS = [] TESTS_REQUIRE = [] LONG_DESCRIPTION = '' class Tox(TestCommand): def run_tests(self): import tox tox.cmdline() class MakeDocs(Command): description = 'Generates documentation' user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): os.system('pip install -r requirements-dev.txt') os.system('sphinx-apidoc -f -o docs/source/api orb') os.system('sphinx-build -b html docs/source docs/build') class Release(Command): description = 'Runs the tests and releases a new version of the script' user_options = [ ('no-tests', None, 'Bypass the test validation before releasing') ] def initialize_options(self): self.no_tests = True # for now, default this to true... def finalize_options(self): pass def run(self): if self.no_tests: print('[WARNING] No tests have been run for this release!') if not self.no_tests and os.system('python setup.py test'): print('[ERROR] Could not release, tests are failing!') else: os.system('python setup.py tag') os.system('python setup.py bdist_wheel bdist_egg upload') class Tag(Command): description = 'Command used to release new versions of the website to the internal pypi server.' user_options = [ ('no-tag', None, 'Do not tag the repo before releasing') ] def initialize_options(self): self.no_tag = False def finalize_options(self): pass def run(self): # generate the version information from the current git commit cmd = ['git', 'describe', '--match', 'v[0-9]*.[0-9]*.0'] desc = subprocess.check_output(cmd).strip() result = re.match('v([0-9]+)\.([0-9]+)\.0-([0-9]+)-(.*)', desc) print 'generating version information from:', desc with open('./orb/_version.py', 'w') as f: f.write('__major__ = {0}\n'.format(result.group(1))) f.write('__minor__ = {0}\n'.format(result.group(2))) f.write('__revision__ = "{0}"\n'.format(result.group(3))) f.write('__hash__ = "{0}"'.format(result.group(4))) # tag this new release version if not self.no_tag: version = '.'.join([result.group(1), result.group(2), result.group(3)]) print 'creating git tag:', 'v' + version os.system('git tag -a v{0} -m "releasing {0}"'.format(version)) os.system('git push --tags') else: print 'warning: tagging ignored...' def read_requirements_file(path): """ reads requirements.txt file and handles PyPI index URLs :param path: (str) path to requirements.txt file :return: (tuple of lists) """ last_pypi_url = None with open(path) as f: requires = [] pypi_urls = [] for line in f.readlines(): if not line: continue if '--' in line: match = re.match(r'--index-url\s+([\w\d:/.-]+)\s', line) if match: last_pypi_url = match.group(1) if not last_pypi_url.endswith("/"): last_pypi_url += "/" else: if last_pypi_url: pypi_urls.append(last_pypi_url + line.strip().lower()) requires.append(line) return requires, pypi_urls if __name__ == '__main__': try: with open('orb/_version.py', 'r') as f: content = f.read() major = re.search('__major__ = (\d+)', content).group(1) minor = re.search('__minor__ = (\d+)', content).group(1) rev = re.search('__revision__ = "([^"]+)"', content).group(1) VERSION = '.'.join((major, minor, rev)) except StandardError: VERSION = '0.0.0' # parse the requirements file if os.path.isfile('requirements.txt'): _install_requires, _pypi_urls = read_requirements_file('requirements.txt') INSTALL_REQUIRES.extend(_install_requires) DEPENDENCY_LINKS.extend(_pypi_urls) if os.path.isfile('tests/requirements.txt'): _tests_require, _pypi_urls = read_requirements_file('tests/requirements.txt') TESTS_REQUIRE.extend(_tests_require) DEPENDENCY_LINKS.extend(_pypi_urls) # Get the long description from the relevant file if os.path.isfile('README.md'): with open('README.md') as f: LONG_DESCRIPTION = f.read() setup( name='orb-api', version=VERSION, author=__author__, author_email=__email__, maintainer=__author__, maintainer_email=__email__, description='Database ORM and API builder.', license=__license__, keywords='', url='https://github.com/orb-framework/orb', install_requires=INSTALL_REQUIRES, packages=find_packages(), tests_require=TESTS_REQUIRE, test_suite='tests', long_description=LONG_DESCRIPTION, cmdclass={ 'tag': Tag, 'release': Release, 'mkdocs': MakeDocs, 'test': Tox } )
31.445714
100
0.590405
2,566
0.466291
0
0
0
0
0
0
1,671
0.303653
762657929c6d46d8a16c4ce28a8f5a45ac78e34b
265
py
Python
forms.py
tecoholic/student_score_graph
d95d40953c1a93c3948db4d4f54591833e259daf
[ "MIT" ]
null
null
null
forms.py
tecoholic/student_score_graph
d95d40953c1a93c3948db4d4f54591833e259daf
[ "MIT" ]
null
null
null
forms.py
tecoholic/student_score_graph
d95d40953c1a93c3948db4d4f54591833e259daf
[ "MIT" ]
null
null
null
from flask_wtf import FlaskForm from flask_wtf.file import FileField, FileRequired, FileAllowed class CSVUploadForm(FlaskForm): csvfile = FileField( "CSV Mark Sheet", validators=[FileRequired(), FileAllowed(["csv"], "CSV Files only!")], )
26.5
77
0.701887
166
0.626415
0
0
0
0
0
0
38
0.143396
7626eb1109f2f23d20860751d2db0d2df24120d5
6,298
py
Python
recommendation-api/models/next_song_prediction/retrain.py
Kabeers-Network/yebrmusic
f97528fe79b4fb1c52f1c779599f67edefb3895c
[ "MIT" ]
1
2021-04-28T13:23:19.000Z
2021-04-28T13:23:19.000Z
recommendation-api/models/next_song_prediction/retrain.py
Kabeers-Network/yebrmusic
f97528fe79b4fb1c52f1c779599f67edefb3895c
[ "MIT" ]
2
2021-09-07T18:12:20.000Z
2021-09-07T18:13:35.000Z
recommendation-api/models/next_song_prediction/retrain.py
Kabeers-Network/yebrmusic
f97528fe79b4fb1c52f1c779599f67edefb3895c
[ "MIT" ]
null
null
null
import pandas as pd from tensorflow import keras, reduce_sum, ragged, function, math, nn, reduce_mean from os import environ class MaskedEmbeddingsAggregatorLayer(keras.layers.Layer): def __init__(self, agg_mode='sum', **kwargs): super(MaskedEmbeddingsAggregatorLayer, self).__init__(**kwargs) if agg_mode not in ['sum', 'mean']: raise NotImplementedError('mode {} not implemented!'.format(agg_mode)) self.agg_mode = agg_mode @function def call(self, inputs, mask=None): masked_embeddings = ragged.boolean_mask(inputs, mask) if self.agg_mode == 'sum': aggregated = reduce_sum(masked_embeddings, axis=1) elif self.agg_mode == 'mean': aggregated = reduce_mean(masked_embeddings, axis=1) return aggregated def get_config(self): # this is used when loading a saved model that uses a custom layer return {'agg_mode': self.agg_mode} class L2NormLayer(keras.layers.Layer): def __init__(self, **kwargs): super(L2NormLayer, self).__init__(**kwargs) @function def call(self, inputs, mask=None): if mask is not None: inputs = ragged.boolean_mask(inputs, mask).to_tensor() return math.l2_normalize(inputs, axis=-1) def compute_mask(self, inputs, mask): return mask def get_data(corpus_path): corpus = pd.read_json(environ.get("DATA_COLLECTION_HOST") + "/api/details/get-indexed-songs") """ Save Corpus """ corpus.to_pickle(corpus_path) print("Saved Corpus at: ", corpus_path) watch_history = pd.read_json(environ.get("DATA_COLLECTION_HOST") + "/api/history/get-history") # search_history = pd.read_json("http://localhost:9000/recommendation/history/search") return corpus, watch_history def get_model(NUM_CLASSES): EMBEDDING_DIMS = 16 DENSE_UNITS = 64 DROPOUT_PCT = 0.0 ALPHA = 0.0 LEARNING_RATE = 0.003 """ Handle Search Queries and Watch History - Encoded Indices of Songs """ search_queries = keras.layers.Input(shape=(None,), name='search_query') watch_history = keras.layers.Input(shape=(None,), name='watch_history') features_embedding_layer = keras.layers.Embedding(input_dim=NUM_CLASSES, output_dim=EMBEDDING_DIMS, mask_zero=True, trainable=True, name='searched_embeddings') labels_embedding_layer = keras.layers.Embedding(input_dim=NUM_CLASSES, output_dim=EMBEDDING_DIMS, mask_zero=True, trainable=True, name='watched_embeddings') avg_embeddings = MaskedEmbeddingsAggregatorLayer(agg_mode='mean', name='aggregate_embeddings') dense_1 = keras.layers.Dense(units=DENSE_UNITS, name='dense_1') dense_2 = keras.layers.Dense(units=DENSE_UNITS, name='dense_2') dense_3 = keras.layers.Dense(units=DENSE_UNITS, name='dense_3') l2_norm_1 = L2NormLayer(name='l2_norm_1') dense_output = keras.layers.Dense(NUM_CLASSES, activation=nn.softmax, name='dense_output') """ L2 Normalize Inputs - Normalize - Average Inputs - Concat as Single Layer """ searched_embeddings = features_embedding_layer(search_queries) l2_norm_searched = l2_norm_1(searched_embeddings) avg_searched = avg_embeddings(l2_norm_searched) labels_watched_embeddings = labels_embedding_layer(watch_history) l2_norm_watched = l2_norm_1(labels_watched_embeddings) avg_watched = avg_embeddings(l2_norm_watched) concat_inputs = keras.layers.Concatenate(axis=1)([avg_searched, avg_watched]) """### Dense Layers Contains: - DenseLayers - BatchNormalization Layers - Relu Layers """ dense_1_features = dense_1(concat_inputs) dense_1_relu = keras.layers.ReLU(name='dense_1_relu')(dense_1_features) dense_1_batch_norm = keras.layers.BatchNormalization(name='dense_1_batch_norm')(dense_1_relu) dense_2_features = dense_2(dense_1_relu) dense_2_relu = keras.layers.ReLU(name='dense_2_relu')(dense_2_features) dense_3_features = dense_3(dense_2_relu) dense_3_relu = keras.layers.ReLU(name='dense_3_relu')(dense_3_features) dense_3_batch_norm = keras.layers.BatchNormalization(name='dense_3_batch_norm')(dense_3_relu) outputs = dense_output(dense_3_batch_norm) """### Compiling the Model""" optimiser = keras.optimizers.Adam(learning_rate=LEARNING_RATE) loss = 'sparse_categorical_crossentropy' model = keras.models.Model( inputs=[search_queries, watch_history], outputs=[outputs] ) model.compile(optimizer=optimiser, loss=loss) return model def retrain_model(corpus_path, model_snapshot_location): corpus, watch_history = get_data(corpus_path) """ Make Indexes for speedier revival """ song_ids = corpus["song_id"].unique().tolist() song_2_index = {x: i for i, x in enumerate(song_ids)} # index_2_songid = {i: x for i, x in enumerate(song_ids)} user_ids = watch_history["user_id"].unique().tolist() user_2_index = {x: i for i, x in enumerate(user_ids)} # index_2_userid = {i: x for i, x in enumerate(user_ids)} """ Encoded Song Ids and user Ids to feed to the network """ watch_history['user_id'] = watch_history['user_id'].map(user_2_index) watch_history['song_id'] = watch_history['song_id'].map(song_2_index) """ Group user's watch history """ watches_grouped = watch_history.groupby(['user_id'])['song_id'].apply(list).reset_index() """ Treat last watched as Past Prediction """ watches_grouped['past_predicted'] = watches_grouped['song_id'].apply(lambda x: (x[-1])) """ Save model snapshot callback """ checkpoint = keras.callbacks.ModelCheckpoint(model_snapshot_location, monitor='loss', verbose=1, save_best_only=True, mode='min') model = get_model(NUM_CLASSES=(len(corpus) + 2)) """ Not Adding Search Queries""" model.fit([ keras.preprocessing.sequence.pad_sequences(watches_grouped['song_id']), keras.preprocessing.sequence.pad_sequences(watches_grouped['song_id']), ], watches_grouped['past_predicted'].values, callbacks=[checkpoint], steps_per_epoch=1, epochs=100, verbose=1) print("Model Retrained")
37.939759
119
0.695459
1,216
0.193077
0
0
531
0.084312
0
0
1,458
0.231502
762736c3af383f7a5e7c51cd44492ea53e1dcb42
818
py
Python
examples/example_plugin/example_plugin/tables.py
susanhooks/nautobot
bc3ef5958f0d5decb0be763342c790f26ff1e20e
[ "Apache-2.0" ]
null
null
null
examples/example_plugin/example_plugin/tables.py
susanhooks/nautobot
bc3ef5958f0d5decb0be763342c790f26ff1e20e
[ "Apache-2.0" ]
null
null
null
examples/example_plugin/example_plugin/tables.py
susanhooks/nautobot
bc3ef5958f0d5decb0be763342c790f26ff1e20e
[ "Apache-2.0" ]
null
null
null
import django_tables2 as tables from nautobot.utilities.tables import ( BaseTable, ButtonsColumn, ToggleColumn, ) from example_plugin.models import AnotherExampleModel, ExampleModel class ExampleModelTable(BaseTable): """Table for list view of `ExampleModel` objects.""" pk = ToggleColumn() name = tables.LinkColumn() actions = ButtonsColumn(ExampleModel) class Meta(BaseTable.Meta): model = ExampleModel fields = ["pk", "name", "number"] class AnotherExampleModelTable(BaseTable): """Table for list view of `AnotherExampleModel` objects.""" pk = ToggleColumn() name = tables.LinkColumn() actions = ButtonsColumn(AnotherExampleModel) class Meta(BaseTable.Meta): model = AnotherExampleModel fields = ["pk", "name", "number"]
24.058824
67
0.691932
616
0.753056
0
0
0
0
0
0
147
0.179707
762af3d64f2b43064fe67ca5b5b25e3229f256a7
555
py
Python
pyfacebook/utils/reformat_response.py
Socian-Ltd/python-facebook-1
e9a4f626b37541103c9534a29342ef6033c09c06
[ "Apache-2.0" ]
null
null
null
pyfacebook/utils/reformat_response.py
Socian-Ltd/python-facebook-1
e9a4f626b37541103c9534a29342ef6033c09c06
[ "Apache-2.0" ]
null
null
null
pyfacebook/utils/reformat_response.py
Socian-Ltd/python-facebook-1
e9a4f626b37541103c9534a29342ef6033c09c06
[ "Apache-2.0" ]
1
2021-08-25T05:34:14.000Z
2021-08-25T05:34:14.000Z
import json from typing import Optional, Union, Dict def replace_from_keyword_in_json( data, # type: Optional[Dict] ): # type: (...) -> Dict """ Rename the 'from' field coming from the Graph API. As 'from' is a Python keyword, we cannot use this as an attribute with 'attrs' package. So renaming the 'from' field to 'object_creator' """ json_str = json.dumps(data) replaced_json_str = json_str.replace('"from":', '"object_creator":') replaced_json_obj = json.loads(replaced_json_str) return replaced_json_obj
32.647059
110
0.688288
0
0
0
0
0
0
0
0
278
0.500901
762b4b39b1252f1872ea955f30e2d93f46a6eb06
1,224
py
Python
tests/test_ctparse.py
bharathi-srini/ctparse
f3b7ef1045fbbc566dc838c66d4a47de160cb38f
[ "MIT" ]
null
null
null
tests/test_ctparse.py
bharathi-srini/ctparse
f3b7ef1045fbbc566dc838c66d4a47de160cb38f
[ "MIT" ]
null
null
null
tests/test_ctparse.py
bharathi-srini/ctparse
f3b7ef1045fbbc566dc838c66d4a47de160cb38f
[ "MIT" ]
null
null
null
from unittest import TestCase from datetime import datetime from ctparse.ctparse import ctparse, _match_rule from ctparse.types import Time class TestCTParse(TestCase): def test_ctparse(self): txt = '12.12.2020' res = ctparse(txt) self.assertEqual(res.resolution, Time(year=2020, month=12, day=12)) self.assertIsNotNone(str(res)) self.assertIsNotNone(repr(res)) # non sense gives no result self.assertIsNone(ctparse('gargelbabel')) txt = '12.12.' res = ctparse(txt, ts=datetime(2020, 12, 1)) self.assertEqual(res.resolution, Time(year=2020, month=12, day=12)) res = ctparse(txt, ts=datetime(2020, 12, 1), debug=True) self.assertEqual(next(res).resolution, Time(year=2020, month=12, day=12)) def test_ctparse_timeout(self): # timeout in ctparse: should rather mock the logger and see # whether the timeout was hit, but cannot get it mocked txt = 'tomorrow 8 yesterday Sep 9 9 12 2023 1923' ctparse(txt, timeout=0.0001) def test_match_rule(self): self.assertEqual(list(_match_rule([], ['not empty'])), []) self.assertEqual(list(_match_rule(['not empty'], [])), [])
38.25
81
0.655229
1,081
0.88317
0
0
0
0
0
0
239
0.195261
762ecaaa1aa4b6aacfa3ef56be0e34fa5e92bb98
5,841
py
Python
src/data/preprocessing/rescale_videos.py
HochulHwang/gc_test
c42e9ec3392bc02eef5e3943ec3bf79456e91bf9
[ "MIT" ]
null
null
null
src/data/preprocessing/rescale_videos.py
HochulHwang/gc_test
c42e9ec3392bc02eef5e3943ec3bf79456e91bf9
[ "MIT" ]
null
null
null
src/data/preprocessing/rescale_videos.py
HochulHwang/gc_test
c42e9ec3392bc02eef5e3943ec3bf79456e91bf9
[ "MIT" ]
null
null
null
import argparse import os import subprocess import time import sys import ipdb import pickle from utils.meter import * def main(args): # Parameters from the args dir, h, w, fps, common_suffix = args.dir, args.height, args.width, args.fps, args.common_suffix # avi dir dir_split = dir.split('/') avi_dir = dir_split[-1] root_dir = '/'.join(dir_split[:-1]) new_avi_dir = "{}_{}x{}_{}".format(avi_dir, w, h, fps) new_dir = os.path.join(root_dir, new_avi_dir) os.makedirs(new_dir, exist_ok=True) # load the existing dict if exist dict_video_length_fn = os.path.join(new_dir, 'dict_id_length.pickle') if os.path.isfile(dict_video_length_fn): with open(dict_video_length_fn, 'rb') as file: dict_video_length = pickle.load(file) else: dict_video_length = {} # Get the super_video filenames list_video_fn = get_all_videos(dir, common_suffix) print("{} videos to uncompressed in total".format(len(list_video_fn))) # Loop over the super_video and extract op_time = AverageMeter() start = time.time() list_error_fn = [] for i, video_fn in enumerate(list_video_fn): try: # Rescale rescale_video(video_fn, w, h, fps, dir, new_dir, common_suffix, dict_video_length, ffmpeg=args.ffmpeg, crf=args.crf) # Log duration = time.time() - start op_time.update(duration, 1) print("{}/{} : {time.val:.3f} ({time.avg:.3f}) sec/super_video".format(i + 1, len(list_video_fn), time=op_time)) sys.stdout.flush() start = time.time() except: print("Impossible to rescale_videos super_video for {}".format(video_fn)) list_error_fn.append(video_fn) print("\nDone") print("\nImpossible to extract frames for {} videos: \n {}".format(len(list_error_fn), list_error_fn)) # Save the dict id -> length with open(dict_video_length_fn, 'wb') as file: pickle.dump(dict_video_length, file, protocol=pickle.HIGHEST_PROTOCOL) print("\nDict Video_id -> Length saved here ---> {}".format(file)) def get_duration(file): """Get the duration of a super_video using ffprobe. -> https://stackoverflow.com/questions/31024968/using-ffmpeg-to-obtain-super_video-durations-in-python""" cmd = 'ffprobe -i {} -show_entries format=duration -v quiet -of csv="p=0"'.format(file) output = subprocess.check_output( cmd, shell=True, # Let this run in the shell stderr=subprocess.STDOUT ) # return round(float(output)) # ugly, but rounds your seconds up or down return float(output) def rescale_video(video_fn, w, h, fps, dir, new_dir, common_suffix, dict_video_length, ffmpeg, crf=17): """ Rescale a super_video according to its new width, height an fps """ # Output video_name video_id = video_fn.replace(dir, '').replace(common_suffix, '') video_fn_rescaled = video_fn.replace(dir, new_dir) video_fn_rescaled = video_fn_rescaled.replace(common_suffix, common_suffix.lower()) # Create the dir video_dir_to_create = '/'.join(video_fn_rescaled.split('/')[:-1]) os.makedirs(video_dir_to_create, exist_ok=True) # Check if the file already exists if os.path.isfile(video_fn_rescaled): print("{} already exists".format(video_fn_rescaled)) else: subprocess.call( '{ffmpeg} -i {video_input} -vf scale={w}:{h} -crf {crf} -r {fps} -y {video_output} -loglevel panic'.format( ffmpeg=ffmpeg, video_input=video_fn, h=h, w=w, fps=fps, video_output=video_fn_rescaled, crf=crf ), shell=True) # Get the duration of the new super_video (in sec) duration_sec = get_duration(video_fn_rescaled) duration_frames = int(duration_sec * fps) # update the dict id -> length dict_video_length[video_id] = duration_frames return video_fn_rescaled def get_all_videos(dir, extension='mp4'): """ Return a list of the super_video filename from a directory and its subdirectories """ list_video_fn = [] for dirpath, dirnames, filenames in os.walk(dir): for filename in [f for f in filenames if f.endswith(extension)]: fn = os.path.join(dirpath, filename) list_video_fn.append(fn) return list_video_fn if __name__ == '__main__': parser = argparse.ArgumentParser(description='Preprocess dataset') parser.add_argument('--dir', metavar='DIR', # default='/Users/fabien/Datasets/NTU-RGB-D/avi', default='/home/hochul/Desktop/mini_syn_data/avi', help='path to avi dir') parser.add_argument('--width', default=256, type=int, metavar='W', help='Width') parser.add_argument('--height', default=256, type=int, metavar='H', help='Height') parser.add_argument('--fps', default=20, type=int, metavar='FPS', help='Frames per second for the extraction, -1 means that we take the fps from the super_video') parser.add_argument('--common-suffix', metavar='E', default='.avi', help='Common end of each super_video file') parser.add_argument('--crf', default=17, type=int, metavar='CRF', help='CRF for ffmpeg command') parser.add_argument('--ffmpeg', metavar='FF', default='ffmpeg', help='ffmpeg verison to use') args = parser.parse_args() main(args)
38.176471
161
0.610854
0
0
0
0
0
0
0
0
1,680
0.287622
57ff0de84d2c52561ef644349243f41fda37acf1
251
py
Python
sample_linear.py
Oleg-Krivosheev/Sample-Gamma-small-alpha
05988f8532b471305e31d8b5d0b3e027fb5d0b80
[ "MIT" ]
null
null
null
sample_linear.py
Oleg-Krivosheev/Sample-Gamma-small-alpha
05988f8532b471305e31d8b5d0b3e027fb5d0b80
[ "MIT" ]
null
null
null
sample_linear.py
Oleg-Krivosheev/Sample-Gamma-small-alpha
05988f8532b471305e31d8b5d0b3e027fb5d0b80
[ "MIT" ]
null
null
null
#!/usr/bin/env python import math import random import matplotlib.pyplot as plt # sample from linear distribution and plot it bins = [0.1 * i for i in range(12)] plt.hist([(1.0 - math.sqrt(random.random())) for k in range(10000)], bins) plt.show()
20.916667
74
0.705179
0
0
0
0
0
0
0
0
66
0.262948
52018879c474772597c77f6c11266040f80f9146
43,183
py
Python
tests/extension/thread_/mutex_try_lock/test_thread_mutex_try_lock.py
akmaru/veriloggen
74f998139e8cf613f7703fa4cffd571bbf069bbc
[ "Apache-2.0" ]
null
null
null
tests/extension/thread_/mutex_try_lock/test_thread_mutex_try_lock.py
akmaru/veriloggen
74f998139e8cf613f7703fa4cffd571bbf069bbc
[ "Apache-2.0" ]
null
null
null
tests/extension/thread_/mutex_try_lock/test_thread_mutex_try_lock.py
akmaru/veriloggen
74f998139e8cf613f7703fa4cffd571bbf069bbc
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import from __future__ import print_function import veriloggen import thread_mutex_try_lock expected_verilog = """ module test; reg CLK; reg RST; blinkled uut ( .CLK(CLK), .RST(RST) ); initial begin $dumpfile("uut.vcd"); $dumpvars(0, uut); end initial begin CLK = 0; forever begin #5 CLK = !CLK; end end initial begin RST = 0; #100; RST = 1; #100; RST = 0; #10000; $finish; end endmodule module blinkled ( input CLK, input RST ); reg _mymutex_lock_reg; reg [32-1:0] _mymutex_lock_id; reg [8-1:0] _th_myfunc_start; reg [32-1:0] th_blink; localparam th_blink_init = 0; reg signed [32-1:0] _th_blink_tid_0; reg [32-1:0] th_myfunc_0; localparam th_myfunc_0_init = 0; reg [32-1:0] th_myfunc_1; localparam th_myfunc_1_init = 0; reg [32-1:0] th_myfunc_2; localparam th_myfunc_2_init = 0; reg [32-1:0] th_myfunc_3; localparam th_myfunc_3_init = 0; reg [32-1:0] th_myfunc_4; localparam th_myfunc_4_init = 0; reg [32-1:0] th_myfunc_5; localparam th_myfunc_5_init = 0; reg [32-1:0] th_myfunc_6; localparam th_myfunc_6_init = 0; reg [32-1:0] th_myfunc_7; localparam th_myfunc_7_init = 0; reg _th_myfunc_0_called; reg signed [32-1:0] _th_myfunc_0_tid_1; reg signed [32-1:0] _th_myfunc_0_tid_2; reg _tmp_0; reg signed [32-1:0] _th_myfunc_0_lock_3; reg signed [32-1:0] _th_myfunc_0_waitcount_4; reg _tmp_1; reg signed [32-1:0] _th_myfunc_0_i_5; reg _th_myfunc_1_called; reg signed [32-1:0] _th_myfunc_1_tid_6; reg signed [32-1:0] _th_myfunc_1_tid_7; reg _tmp_2; reg signed [32-1:0] _th_myfunc_1_lock_8; reg signed [32-1:0] _th_myfunc_1_waitcount_9; reg _tmp_3; reg signed [32-1:0] _th_myfunc_1_i_10; reg _th_myfunc_2_called; reg signed [32-1:0] _th_myfunc_2_tid_11; reg signed [32-1:0] _th_myfunc_2_tid_12; reg _tmp_4; reg signed [32-1:0] _th_myfunc_2_lock_13; reg signed [32-1:0] _th_myfunc_2_waitcount_14; reg _tmp_5; reg signed [32-1:0] _th_myfunc_2_i_15; reg _th_myfunc_3_called; reg signed [32-1:0] _th_myfunc_3_tid_16; reg signed [32-1:0] _th_myfunc_3_tid_17; reg _tmp_6; reg signed [32-1:0] _th_myfunc_3_lock_18; reg signed [32-1:0] _th_myfunc_3_waitcount_19; reg _tmp_7; reg signed [32-1:0] _th_myfunc_3_i_20; reg _th_myfunc_4_called; reg signed [32-1:0] _th_myfunc_4_tid_21; reg signed [32-1:0] _th_myfunc_4_tid_22; reg _tmp_8; reg signed [32-1:0] _th_myfunc_4_lock_23; reg signed [32-1:0] _th_myfunc_4_waitcount_24; reg _tmp_9; reg signed [32-1:0] _th_myfunc_4_i_25; reg _th_myfunc_5_called; reg signed [32-1:0] _th_myfunc_5_tid_26; reg signed [32-1:0] _th_myfunc_5_tid_27; reg _tmp_10; reg signed [32-1:0] _th_myfunc_5_lock_28; reg signed [32-1:0] _th_myfunc_5_waitcount_29; reg _tmp_11; reg signed [32-1:0] _th_myfunc_5_i_30; reg _th_myfunc_6_called; reg signed [32-1:0] _th_myfunc_6_tid_31; reg signed [32-1:0] _th_myfunc_6_tid_32; reg _tmp_12; reg signed [32-1:0] _th_myfunc_6_lock_33; reg signed [32-1:0] _th_myfunc_6_waitcount_34; reg _tmp_13; reg signed [32-1:0] _th_myfunc_6_i_35; reg _th_myfunc_7_called; reg signed [32-1:0] _th_myfunc_7_tid_36; reg signed [32-1:0] _th_myfunc_7_tid_37; reg _tmp_14; reg signed [32-1:0] _th_myfunc_7_lock_38; reg signed [32-1:0] _th_myfunc_7_waitcount_39; reg _tmp_15; reg signed [32-1:0] _th_myfunc_7_i_40; always @(posedge CLK) begin if(RST) begin _mymutex_lock_reg <= 0; _mymutex_lock_id <= 0; end else begin if((th_myfunc_0 == 3) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 0; end if((th_myfunc_0 == 10) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 0; end if((th_myfunc_0 == 19) && (_mymutex_lock_id == 0)) begin _mymutex_lock_reg <= 0; end if((th_myfunc_1 == 3) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 1; end if((th_myfunc_1 == 10) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 1; end if((th_myfunc_1 == 19) && (_mymutex_lock_id == 1)) begin _mymutex_lock_reg <= 0; end if((th_myfunc_2 == 3) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 2; end if((th_myfunc_2 == 10) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 2; end if((th_myfunc_2 == 19) && (_mymutex_lock_id == 2)) begin _mymutex_lock_reg <= 0; end if((th_myfunc_3 == 3) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 3; end if((th_myfunc_3 == 10) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 3; end if((th_myfunc_3 == 19) && (_mymutex_lock_id == 3)) begin _mymutex_lock_reg <= 0; end if((th_myfunc_4 == 3) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 4; end if((th_myfunc_4 == 10) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 4; end if((th_myfunc_4 == 19) && (_mymutex_lock_id == 4)) begin _mymutex_lock_reg <= 0; end if((th_myfunc_5 == 3) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 5; end if((th_myfunc_5 == 10) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 5; end if((th_myfunc_5 == 19) && (_mymutex_lock_id == 5)) begin _mymutex_lock_reg <= 0; end if((th_myfunc_6 == 3) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 6; end if((th_myfunc_6 == 10) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 6; end if((th_myfunc_6 == 19) && (_mymutex_lock_id == 6)) begin _mymutex_lock_reg <= 0; end if((th_myfunc_7 == 3) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 7; end if((th_myfunc_7 == 10) && !_mymutex_lock_reg) begin _mymutex_lock_reg <= 1; _mymutex_lock_id <= 7; end if((th_myfunc_7 == 19) && (_mymutex_lock_id == 7)) begin _mymutex_lock_reg <= 0; end end end localparam th_blink_1 = 1; localparam th_blink_2 = 2; localparam th_blink_3 = 3; localparam th_blink_4 = 4; localparam th_blink_5 = 5; localparam th_blink_6 = 6; localparam th_blink_7 = 7; localparam th_blink_8 = 8; localparam th_blink_9 = 9; localparam th_blink_10 = 10; localparam th_blink_11 = 11; always @(posedge CLK) begin if(RST) begin th_blink <= th_blink_init; _th_blink_tid_0 <= 0; _th_myfunc_start[_th_blink_tid_0] <= (0 >> _th_blink_tid_0) & 1'd1; end else begin case(th_blink) th_blink_init: begin th_blink <= th_blink_1; end th_blink_1: begin _th_blink_tid_0 <= 0; th_blink <= th_blink_2; end th_blink_2: begin if(_th_blink_tid_0 < 8) begin th_blink <= th_blink_3; end else begin th_blink <= th_blink_7; end end th_blink_3: begin _th_myfunc_start[_th_blink_tid_0] <= 1; th_blink <= th_blink_4; end th_blink_4: begin th_blink <= th_blink_5; th_blink <= th_blink_5; th_blink <= th_blink_5; th_blink <= th_blink_5; th_blink <= th_blink_5; th_blink <= th_blink_5; th_blink <= th_blink_5; th_blink <= th_blink_5; end th_blink_5: begin _th_myfunc_start[_th_blink_tid_0] <= 0; th_blink <= th_blink_6; end th_blink_6: begin _th_blink_tid_0 <= _th_blink_tid_0 + 1; th_blink <= th_blink_2; end th_blink_7: begin _th_blink_tid_0 <= 0; th_blink <= th_blink_8; end th_blink_8: begin if(_th_blink_tid_0 < 8) begin th_blink <= th_blink_9; end else begin th_blink <= th_blink_11; end end th_blink_9: begin if((_th_blink_tid_0 == 0)? th_myfunc_0 == 21 : (_th_blink_tid_0 == 1)? th_myfunc_1 == 21 : (_th_blink_tid_0 == 2)? th_myfunc_2 == 21 : (_th_blink_tid_0 == 3)? th_myfunc_3 == 21 : (_th_blink_tid_0 == 4)? th_myfunc_4 == 21 : (_th_blink_tid_0 == 5)? th_myfunc_5 == 21 : (_th_blink_tid_0 == 6)? th_myfunc_6 == 21 : (_th_blink_tid_0 == 7)? th_myfunc_7 == 21 : 0) begin th_blink <= th_blink_10; end end th_blink_10: begin _th_blink_tid_0 <= _th_blink_tid_0 + 1; th_blink <= th_blink_8; end endcase end end localparam th_myfunc_0_1 = 1; localparam th_myfunc_0_2 = 2; localparam th_myfunc_0_3 = 3; localparam th_myfunc_0_4 = 4; localparam th_myfunc_0_5 = 5; localparam th_myfunc_0_6 = 6; localparam th_myfunc_0_7 = 7; localparam th_myfunc_0_8 = 8; localparam th_myfunc_0_9 = 9; localparam th_myfunc_0_10 = 10; localparam th_myfunc_0_11 = 11; localparam th_myfunc_0_12 = 12; localparam th_myfunc_0_13 = 13; localparam th_myfunc_0_14 = 14; localparam th_myfunc_0_15 = 15; localparam th_myfunc_0_16 = 16; localparam th_myfunc_0_17 = 17; localparam th_myfunc_0_18 = 18; localparam th_myfunc_0_19 = 19; localparam th_myfunc_0_20 = 20; localparam th_myfunc_0_21 = 21; always @(posedge CLK) begin if(RST) begin th_myfunc_0 <= th_myfunc_0_init; _th_myfunc_0_called <= 0; _th_myfunc_0_tid_1 <= 0; _th_myfunc_0_tid_2 <= 0; _tmp_0 <= 0; _th_myfunc_0_lock_3 <= 0; _th_myfunc_0_waitcount_4 <= 0; _tmp_1 <= 0; _th_myfunc_0_i_5 <= 0; end else begin case(th_myfunc_0) th_myfunc_0_init: begin if(_th_myfunc_start[0] && (th_blink == 4)) begin _th_myfunc_0_called <= 1; end if(_th_myfunc_start[0] && (th_blink == 4)) begin _th_myfunc_0_tid_1 <= _th_blink_tid_0; end if((th_blink == 4) && _th_myfunc_start[0]) begin th_myfunc_0 <= th_myfunc_0_1; end end th_myfunc_0_1: begin _th_myfunc_0_tid_2 <= _th_myfunc_0_tid_1; th_myfunc_0 <= th_myfunc_0_2; end th_myfunc_0_2: begin $display("-- Thread %d TryLock", _th_myfunc_0_tid_2); th_myfunc_0 <= th_myfunc_0_3; end th_myfunc_0_3: begin th_myfunc_0 <= th_myfunc_0_4; end th_myfunc_0_4: begin _tmp_0 <= _mymutex_lock_reg & (_mymutex_lock_id == 0); th_myfunc_0 <= th_myfunc_0_5; end th_myfunc_0_5: begin _th_myfunc_0_lock_3 <= _tmp_0; th_myfunc_0 <= th_myfunc_0_6; end th_myfunc_0_6: begin _th_myfunc_0_waitcount_4 <= 0; th_myfunc_0 <= th_myfunc_0_7; end th_myfunc_0_7: begin if(!_th_myfunc_0_lock_3) begin th_myfunc_0 <= th_myfunc_0_8; end else begin th_myfunc_0 <= th_myfunc_0_14; end end th_myfunc_0_8: begin $display("-- Thread %d TryLock", _th_myfunc_0_tid_2); th_myfunc_0 <= th_myfunc_0_9; end th_myfunc_0_9: begin _th_myfunc_0_waitcount_4 <= _th_myfunc_0_waitcount_4 + 1; th_myfunc_0 <= th_myfunc_0_10; end th_myfunc_0_10: begin th_myfunc_0 <= th_myfunc_0_11; end th_myfunc_0_11: begin _tmp_1 <= _mymutex_lock_reg & (_mymutex_lock_id == 0); th_myfunc_0 <= th_myfunc_0_12; end th_myfunc_0_12: begin _th_myfunc_0_lock_3 <= _tmp_1; th_myfunc_0 <= th_myfunc_0_13; end th_myfunc_0_13: begin th_myfunc_0 <= th_myfunc_0_7; end th_myfunc_0_14: begin $display("Thread %d Lock: waitcount=%d", _th_myfunc_0_tid_2, _th_myfunc_0_waitcount_4); th_myfunc_0 <= th_myfunc_0_15; end th_myfunc_0_15: begin _th_myfunc_0_i_5 <= 0; th_myfunc_0 <= th_myfunc_0_16; end th_myfunc_0_16: begin if(_th_myfunc_0_i_5 < 20) begin th_myfunc_0 <= th_myfunc_0_17; end else begin th_myfunc_0 <= th_myfunc_0_18; end end th_myfunc_0_17: begin _th_myfunc_0_i_5 <= _th_myfunc_0_i_5 + 1; th_myfunc_0 <= th_myfunc_0_16; end th_myfunc_0_18: begin $display("Thread %d Hello", _th_myfunc_0_tid_2); th_myfunc_0 <= th_myfunc_0_19; end th_myfunc_0_19: begin th_myfunc_0 <= th_myfunc_0_20; end th_myfunc_0_20: begin $display("Thread %d Unlock", _th_myfunc_0_tid_2); th_myfunc_0 <= th_myfunc_0_21; end endcase end end localparam th_myfunc_1_1 = 1; localparam th_myfunc_1_2 = 2; localparam th_myfunc_1_3 = 3; localparam th_myfunc_1_4 = 4; localparam th_myfunc_1_5 = 5; localparam th_myfunc_1_6 = 6; localparam th_myfunc_1_7 = 7; localparam th_myfunc_1_8 = 8; localparam th_myfunc_1_9 = 9; localparam th_myfunc_1_10 = 10; localparam th_myfunc_1_11 = 11; localparam th_myfunc_1_12 = 12; localparam th_myfunc_1_13 = 13; localparam th_myfunc_1_14 = 14; localparam th_myfunc_1_15 = 15; localparam th_myfunc_1_16 = 16; localparam th_myfunc_1_17 = 17; localparam th_myfunc_1_18 = 18; localparam th_myfunc_1_19 = 19; localparam th_myfunc_1_20 = 20; localparam th_myfunc_1_21 = 21; always @(posedge CLK) begin if(RST) begin th_myfunc_1 <= th_myfunc_1_init; _th_myfunc_1_called <= 0; _th_myfunc_1_tid_6 <= 0; _th_myfunc_1_tid_7 <= 0; _tmp_2 <= 0; _th_myfunc_1_lock_8 <= 0; _th_myfunc_1_waitcount_9 <= 0; _tmp_3 <= 0; _th_myfunc_1_i_10 <= 0; end else begin case(th_myfunc_1) th_myfunc_1_init: begin if(_th_myfunc_start[1] && (th_blink == 4)) begin _th_myfunc_1_called <= 1; end if(_th_myfunc_start[1] && (th_blink == 4)) begin _th_myfunc_1_tid_6 <= _th_blink_tid_0; end if((th_blink == 4) && _th_myfunc_start[1]) begin th_myfunc_1 <= th_myfunc_1_1; end end th_myfunc_1_1: begin _th_myfunc_1_tid_7 <= _th_myfunc_1_tid_6; th_myfunc_1 <= th_myfunc_1_2; end th_myfunc_1_2: begin $display("-- Thread %d TryLock", _th_myfunc_1_tid_7); th_myfunc_1 <= th_myfunc_1_3; end th_myfunc_1_3: begin th_myfunc_1 <= th_myfunc_1_4; end th_myfunc_1_4: begin _tmp_2 <= _mymutex_lock_reg & (_mymutex_lock_id == 1); th_myfunc_1 <= th_myfunc_1_5; end th_myfunc_1_5: begin _th_myfunc_1_lock_8 <= _tmp_2; th_myfunc_1 <= th_myfunc_1_6; end th_myfunc_1_6: begin _th_myfunc_1_waitcount_9 <= 0; th_myfunc_1 <= th_myfunc_1_7; end th_myfunc_1_7: begin if(!_th_myfunc_1_lock_8) begin th_myfunc_1 <= th_myfunc_1_8; end else begin th_myfunc_1 <= th_myfunc_1_14; end end th_myfunc_1_8: begin $display("-- Thread %d TryLock", _th_myfunc_1_tid_7); th_myfunc_1 <= th_myfunc_1_9; end th_myfunc_1_9: begin _th_myfunc_1_waitcount_9 <= _th_myfunc_1_waitcount_9 + 1; th_myfunc_1 <= th_myfunc_1_10; end th_myfunc_1_10: begin th_myfunc_1 <= th_myfunc_1_11; end th_myfunc_1_11: begin _tmp_3 <= _mymutex_lock_reg & (_mymutex_lock_id == 1); th_myfunc_1 <= th_myfunc_1_12; end th_myfunc_1_12: begin _th_myfunc_1_lock_8 <= _tmp_3; th_myfunc_1 <= th_myfunc_1_13; end th_myfunc_1_13: begin th_myfunc_1 <= th_myfunc_1_7; end th_myfunc_1_14: begin $display("Thread %d Lock: waitcount=%d", _th_myfunc_1_tid_7, _th_myfunc_1_waitcount_9); th_myfunc_1 <= th_myfunc_1_15; end th_myfunc_1_15: begin _th_myfunc_1_i_10 <= 0; th_myfunc_1 <= th_myfunc_1_16; end th_myfunc_1_16: begin if(_th_myfunc_1_i_10 < 20) begin th_myfunc_1 <= th_myfunc_1_17; end else begin th_myfunc_1 <= th_myfunc_1_18; end end th_myfunc_1_17: begin _th_myfunc_1_i_10 <= _th_myfunc_1_i_10 + 1; th_myfunc_1 <= th_myfunc_1_16; end th_myfunc_1_18: begin $display("Thread %d Hello", _th_myfunc_1_tid_7); th_myfunc_1 <= th_myfunc_1_19; end th_myfunc_1_19: begin th_myfunc_1 <= th_myfunc_1_20; end th_myfunc_1_20: begin $display("Thread %d Unlock", _th_myfunc_1_tid_7); th_myfunc_1 <= th_myfunc_1_21; end endcase end end localparam th_myfunc_2_1 = 1; localparam th_myfunc_2_2 = 2; localparam th_myfunc_2_3 = 3; localparam th_myfunc_2_4 = 4; localparam th_myfunc_2_5 = 5; localparam th_myfunc_2_6 = 6; localparam th_myfunc_2_7 = 7; localparam th_myfunc_2_8 = 8; localparam th_myfunc_2_9 = 9; localparam th_myfunc_2_10 = 10; localparam th_myfunc_2_11 = 11; localparam th_myfunc_2_12 = 12; localparam th_myfunc_2_13 = 13; localparam th_myfunc_2_14 = 14; localparam th_myfunc_2_15 = 15; localparam th_myfunc_2_16 = 16; localparam th_myfunc_2_17 = 17; localparam th_myfunc_2_18 = 18; localparam th_myfunc_2_19 = 19; localparam th_myfunc_2_20 = 20; localparam th_myfunc_2_21 = 21; always @(posedge CLK) begin if(RST) begin th_myfunc_2 <= th_myfunc_2_init; _th_myfunc_2_called <= 0; _th_myfunc_2_tid_11 <= 0; _th_myfunc_2_tid_12 <= 0; _tmp_4 <= 0; _th_myfunc_2_lock_13 <= 0; _th_myfunc_2_waitcount_14 <= 0; _tmp_5 <= 0; _th_myfunc_2_i_15 <= 0; end else begin case(th_myfunc_2) th_myfunc_2_init: begin if(_th_myfunc_start[2] && (th_blink == 4)) begin _th_myfunc_2_called <= 1; end if(_th_myfunc_start[2] && (th_blink == 4)) begin _th_myfunc_2_tid_11 <= _th_blink_tid_0; end if((th_blink == 4) && _th_myfunc_start[2]) begin th_myfunc_2 <= th_myfunc_2_1; end end th_myfunc_2_1: begin _th_myfunc_2_tid_12 <= _th_myfunc_2_tid_11; th_myfunc_2 <= th_myfunc_2_2; end th_myfunc_2_2: begin $display("-- Thread %d TryLock", _th_myfunc_2_tid_12); th_myfunc_2 <= th_myfunc_2_3; end th_myfunc_2_3: begin th_myfunc_2 <= th_myfunc_2_4; end th_myfunc_2_4: begin _tmp_4 <= _mymutex_lock_reg & (_mymutex_lock_id == 2); th_myfunc_2 <= th_myfunc_2_5; end th_myfunc_2_5: begin _th_myfunc_2_lock_13 <= _tmp_4; th_myfunc_2 <= th_myfunc_2_6; end th_myfunc_2_6: begin _th_myfunc_2_waitcount_14 <= 0; th_myfunc_2 <= th_myfunc_2_7; end th_myfunc_2_7: begin if(!_th_myfunc_2_lock_13) begin th_myfunc_2 <= th_myfunc_2_8; end else begin th_myfunc_2 <= th_myfunc_2_14; end end th_myfunc_2_8: begin $display("-- Thread %d TryLock", _th_myfunc_2_tid_12); th_myfunc_2 <= th_myfunc_2_9; end th_myfunc_2_9: begin _th_myfunc_2_waitcount_14 <= _th_myfunc_2_waitcount_14 + 1; th_myfunc_2 <= th_myfunc_2_10; end th_myfunc_2_10: begin th_myfunc_2 <= th_myfunc_2_11; end th_myfunc_2_11: begin _tmp_5 <= _mymutex_lock_reg & (_mymutex_lock_id == 2); th_myfunc_2 <= th_myfunc_2_12; end th_myfunc_2_12: begin _th_myfunc_2_lock_13 <= _tmp_5; th_myfunc_2 <= th_myfunc_2_13; end th_myfunc_2_13: begin th_myfunc_2 <= th_myfunc_2_7; end th_myfunc_2_14: begin $display("Thread %d Lock: waitcount=%d", _th_myfunc_2_tid_12, _th_myfunc_2_waitcount_14); th_myfunc_2 <= th_myfunc_2_15; end th_myfunc_2_15: begin _th_myfunc_2_i_15 <= 0; th_myfunc_2 <= th_myfunc_2_16; end th_myfunc_2_16: begin if(_th_myfunc_2_i_15 < 20) begin th_myfunc_2 <= th_myfunc_2_17; end else begin th_myfunc_2 <= th_myfunc_2_18; end end th_myfunc_2_17: begin _th_myfunc_2_i_15 <= _th_myfunc_2_i_15 + 1; th_myfunc_2 <= th_myfunc_2_16; end th_myfunc_2_18: begin $display("Thread %d Hello", _th_myfunc_2_tid_12); th_myfunc_2 <= th_myfunc_2_19; end th_myfunc_2_19: begin th_myfunc_2 <= th_myfunc_2_20; end th_myfunc_2_20: begin $display("Thread %d Unlock", _th_myfunc_2_tid_12); th_myfunc_2 <= th_myfunc_2_21; end endcase end end localparam th_myfunc_3_1 = 1; localparam th_myfunc_3_2 = 2; localparam th_myfunc_3_3 = 3; localparam th_myfunc_3_4 = 4; localparam th_myfunc_3_5 = 5; localparam th_myfunc_3_6 = 6; localparam th_myfunc_3_7 = 7; localparam th_myfunc_3_8 = 8; localparam th_myfunc_3_9 = 9; localparam th_myfunc_3_10 = 10; localparam th_myfunc_3_11 = 11; localparam th_myfunc_3_12 = 12; localparam th_myfunc_3_13 = 13; localparam th_myfunc_3_14 = 14; localparam th_myfunc_3_15 = 15; localparam th_myfunc_3_16 = 16; localparam th_myfunc_3_17 = 17; localparam th_myfunc_3_18 = 18; localparam th_myfunc_3_19 = 19; localparam th_myfunc_3_20 = 20; localparam th_myfunc_3_21 = 21; always @(posedge CLK) begin if(RST) begin th_myfunc_3 <= th_myfunc_3_init; _th_myfunc_3_called <= 0; _th_myfunc_3_tid_16 <= 0; _th_myfunc_3_tid_17 <= 0; _tmp_6 <= 0; _th_myfunc_3_lock_18 <= 0; _th_myfunc_3_waitcount_19 <= 0; _tmp_7 <= 0; _th_myfunc_3_i_20 <= 0; end else begin case(th_myfunc_3) th_myfunc_3_init: begin if(_th_myfunc_start[3] && (th_blink == 4)) begin _th_myfunc_3_called <= 1; end if(_th_myfunc_start[3] && (th_blink == 4)) begin _th_myfunc_3_tid_16 <= _th_blink_tid_0; end if((th_blink == 4) && _th_myfunc_start[3]) begin th_myfunc_3 <= th_myfunc_3_1; end end th_myfunc_3_1: begin _th_myfunc_3_tid_17 <= _th_myfunc_3_tid_16; th_myfunc_3 <= th_myfunc_3_2; end th_myfunc_3_2: begin $display("-- Thread %d TryLock", _th_myfunc_3_tid_17); th_myfunc_3 <= th_myfunc_3_3; end th_myfunc_3_3: begin th_myfunc_3 <= th_myfunc_3_4; end th_myfunc_3_4: begin _tmp_6 <= _mymutex_lock_reg & (_mymutex_lock_id == 3); th_myfunc_3 <= th_myfunc_3_5; end th_myfunc_3_5: begin _th_myfunc_3_lock_18 <= _tmp_6; th_myfunc_3 <= th_myfunc_3_6; end th_myfunc_3_6: begin _th_myfunc_3_waitcount_19 <= 0; th_myfunc_3 <= th_myfunc_3_7; end th_myfunc_3_7: begin if(!_th_myfunc_3_lock_18) begin th_myfunc_3 <= th_myfunc_3_8; end else begin th_myfunc_3 <= th_myfunc_3_14; end end th_myfunc_3_8: begin $display("-- Thread %d TryLock", _th_myfunc_3_tid_17); th_myfunc_3 <= th_myfunc_3_9; end th_myfunc_3_9: begin _th_myfunc_3_waitcount_19 <= _th_myfunc_3_waitcount_19 + 1; th_myfunc_3 <= th_myfunc_3_10; end th_myfunc_3_10: begin th_myfunc_3 <= th_myfunc_3_11; end th_myfunc_3_11: begin _tmp_7 <= _mymutex_lock_reg & (_mymutex_lock_id == 3); th_myfunc_3 <= th_myfunc_3_12; end th_myfunc_3_12: begin _th_myfunc_3_lock_18 <= _tmp_7; th_myfunc_3 <= th_myfunc_3_13; end th_myfunc_3_13: begin th_myfunc_3 <= th_myfunc_3_7; end th_myfunc_3_14: begin $display("Thread %d Lock: waitcount=%d", _th_myfunc_3_tid_17, _th_myfunc_3_waitcount_19); th_myfunc_3 <= th_myfunc_3_15; end th_myfunc_3_15: begin _th_myfunc_3_i_20 <= 0; th_myfunc_3 <= th_myfunc_3_16; end th_myfunc_3_16: begin if(_th_myfunc_3_i_20 < 20) begin th_myfunc_3 <= th_myfunc_3_17; end else begin th_myfunc_3 <= th_myfunc_3_18; end end th_myfunc_3_17: begin _th_myfunc_3_i_20 <= _th_myfunc_3_i_20 + 1; th_myfunc_3 <= th_myfunc_3_16; end th_myfunc_3_18: begin $display("Thread %d Hello", _th_myfunc_3_tid_17); th_myfunc_3 <= th_myfunc_3_19; end th_myfunc_3_19: begin th_myfunc_3 <= th_myfunc_3_20; end th_myfunc_3_20: begin $display("Thread %d Unlock", _th_myfunc_3_tid_17); th_myfunc_3 <= th_myfunc_3_21; end endcase end end localparam th_myfunc_4_1 = 1; localparam th_myfunc_4_2 = 2; localparam th_myfunc_4_3 = 3; localparam th_myfunc_4_4 = 4; localparam th_myfunc_4_5 = 5; localparam th_myfunc_4_6 = 6; localparam th_myfunc_4_7 = 7; localparam th_myfunc_4_8 = 8; localparam th_myfunc_4_9 = 9; localparam th_myfunc_4_10 = 10; localparam th_myfunc_4_11 = 11; localparam th_myfunc_4_12 = 12; localparam th_myfunc_4_13 = 13; localparam th_myfunc_4_14 = 14; localparam th_myfunc_4_15 = 15; localparam th_myfunc_4_16 = 16; localparam th_myfunc_4_17 = 17; localparam th_myfunc_4_18 = 18; localparam th_myfunc_4_19 = 19; localparam th_myfunc_4_20 = 20; localparam th_myfunc_4_21 = 21; always @(posedge CLK) begin if(RST) begin th_myfunc_4 <= th_myfunc_4_init; _th_myfunc_4_called <= 0; _th_myfunc_4_tid_21 <= 0; _th_myfunc_4_tid_22 <= 0; _tmp_8 <= 0; _th_myfunc_4_lock_23 <= 0; _th_myfunc_4_waitcount_24 <= 0; _tmp_9 <= 0; _th_myfunc_4_i_25 <= 0; end else begin case(th_myfunc_4) th_myfunc_4_init: begin if(_th_myfunc_start[4] && (th_blink == 4)) begin _th_myfunc_4_called <= 1; end if(_th_myfunc_start[4] && (th_blink == 4)) begin _th_myfunc_4_tid_21 <= _th_blink_tid_0; end if((th_blink == 4) && _th_myfunc_start[4]) begin th_myfunc_4 <= th_myfunc_4_1; end end th_myfunc_4_1: begin _th_myfunc_4_tid_22 <= _th_myfunc_4_tid_21; th_myfunc_4 <= th_myfunc_4_2; end th_myfunc_4_2: begin $display("-- Thread %d TryLock", _th_myfunc_4_tid_22); th_myfunc_4 <= th_myfunc_4_3; end th_myfunc_4_3: begin th_myfunc_4 <= th_myfunc_4_4; end th_myfunc_4_4: begin _tmp_8 <= _mymutex_lock_reg & (_mymutex_lock_id == 4); th_myfunc_4 <= th_myfunc_4_5; end th_myfunc_4_5: begin _th_myfunc_4_lock_23 <= _tmp_8; th_myfunc_4 <= th_myfunc_4_6; end th_myfunc_4_6: begin _th_myfunc_4_waitcount_24 <= 0; th_myfunc_4 <= th_myfunc_4_7; end th_myfunc_4_7: begin if(!_th_myfunc_4_lock_23) begin th_myfunc_4 <= th_myfunc_4_8; end else begin th_myfunc_4 <= th_myfunc_4_14; end end th_myfunc_4_8: begin $display("-- Thread %d TryLock", _th_myfunc_4_tid_22); th_myfunc_4 <= th_myfunc_4_9; end th_myfunc_4_9: begin _th_myfunc_4_waitcount_24 <= _th_myfunc_4_waitcount_24 + 1; th_myfunc_4 <= th_myfunc_4_10; end th_myfunc_4_10: begin th_myfunc_4 <= th_myfunc_4_11; end th_myfunc_4_11: begin _tmp_9 <= _mymutex_lock_reg & (_mymutex_lock_id == 4); th_myfunc_4 <= th_myfunc_4_12; end th_myfunc_4_12: begin _th_myfunc_4_lock_23 <= _tmp_9; th_myfunc_4 <= th_myfunc_4_13; end th_myfunc_4_13: begin th_myfunc_4 <= th_myfunc_4_7; end th_myfunc_4_14: begin $display("Thread %d Lock: waitcount=%d", _th_myfunc_4_tid_22, _th_myfunc_4_waitcount_24); th_myfunc_4 <= th_myfunc_4_15; end th_myfunc_4_15: begin _th_myfunc_4_i_25 <= 0; th_myfunc_4 <= th_myfunc_4_16; end th_myfunc_4_16: begin if(_th_myfunc_4_i_25 < 20) begin th_myfunc_4 <= th_myfunc_4_17; end else begin th_myfunc_4 <= th_myfunc_4_18; end end th_myfunc_4_17: begin _th_myfunc_4_i_25 <= _th_myfunc_4_i_25 + 1; th_myfunc_4 <= th_myfunc_4_16; end th_myfunc_4_18: begin $display("Thread %d Hello", _th_myfunc_4_tid_22); th_myfunc_4 <= th_myfunc_4_19; end th_myfunc_4_19: begin th_myfunc_4 <= th_myfunc_4_20; end th_myfunc_4_20: begin $display("Thread %d Unlock", _th_myfunc_4_tid_22); th_myfunc_4 <= th_myfunc_4_21; end endcase end end localparam th_myfunc_5_1 = 1; localparam th_myfunc_5_2 = 2; localparam th_myfunc_5_3 = 3; localparam th_myfunc_5_4 = 4; localparam th_myfunc_5_5 = 5; localparam th_myfunc_5_6 = 6; localparam th_myfunc_5_7 = 7; localparam th_myfunc_5_8 = 8; localparam th_myfunc_5_9 = 9; localparam th_myfunc_5_10 = 10; localparam th_myfunc_5_11 = 11; localparam th_myfunc_5_12 = 12; localparam th_myfunc_5_13 = 13; localparam th_myfunc_5_14 = 14; localparam th_myfunc_5_15 = 15; localparam th_myfunc_5_16 = 16; localparam th_myfunc_5_17 = 17; localparam th_myfunc_5_18 = 18; localparam th_myfunc_5_19 = 19; localparam th_myfunc_5_20 = 20; localparam th_myfunc_5_21 = 21; always @(posedge CLK) begin if(RST) begin th_myfunc_5 <= th_myfunc_5_init; _th_myfunc_5_called <= 0; _th_myfunc_5_tid_26 <= 0; _th_myfunc_5_tid_27 <= 0; _tmp_10 <= 0; _th_myfunc_5_lock_28 <= 0; _th_myfunc_5_waitcount_29 <= 0; _tmp_11 <= 0; _th_myfunc_5_i_30 <= 0; end else begin case(th_myfunc_5) th_myfunc_5_init: begin if(_th_myfunc_start[5] && (th_blink == 4)) begin _th_myfunc_5_called <= 1; end if(_th_myfunc_start[5] && (th_blink == 4)) begin _th_myfunc_5_tid_26 <= _th_blink_tid_0; end if((th_blink == 4) && _th_myfunc_start[5]) begin th_myfunc_5 <= th_myfunc_5_1; end end th_myfunc_5_1: begin _th_myfunc_5_tid_27 <= _th_myfunc_5_tid_26; th_myfunc_5 <= th_myfunc_5_2; end th_myfunc_5_2: begin $display("-- Thread %d TryLock", _th_myfunc_5_tid_27); th_myfunc_5 <= th_myfunc_5_3; end th_myfunc_5_3: begin th_myfunc_5 <= th_myfunc_5_4; end th_myfunc_5_4: begin _tmp_10 <= _mymutex_lock_reg & (_mymutex_lock_id == 5); th_myfunc_5 <= th_myfunc_5_5; end th_myfunc_5_5: begin _th_myfunc_5_lock_28 <= _tmp_10; th_myfunc_5 <= th_myfunc_5_6; end th_myfunc_5_6: begin _th_myfunc_5_waitcount_29 <= 0; th_myfunc_5 <= th_myfunc_5_7; end th_myfunc_5_7: begin if(!_th_myfunc_5_lock_28) begin th_myfunc_5 <= th_myfunc_5_8; end else begin th_myfunc_5 <= th_myfunc_5_14; end end th_myfunc_5_8: begin $display("-- Thread %d TryLock", _th_myfunc_5_tid_27); th_myfunc_5 <= th_myfunc_5_9; end th_myfunc_5_9: begin _th_myfunc_5_waitcount_29 <= _th_myfunc_5_waitcount_29 + 1; th_myfunc_5 <= th_myfunc_5_10; end th_myfunc_5_10: begin th_myfunc_5 <= th_myfunc_5_11; end th_myfunc_5_11: begin _tmp_11 <= _mymutex_lock_reg & (_mymutex_lock_id == 5); th_myfunc_5 <= th_myfunc_5_12; end th_myfunc_5_12: begin _th_myfunc_5_lock_28 <= _tmp_11; th_myfunc_5 <= th_myfunc_5_13; end th_myfunc_5_13: begin th_myfunc_5 <= th_myfunc_5_7; end th_myfunc_5_14: begin $display("Thread %d Lock: waitcount=%d", _th_myfunc_5_tid_27, _th_myfunc_5_waitcount_29); th_myfunc_5 <= th_myfunc_5_15; end th_myfunc_5_15: begin _th_myfunc_5_i_30 <= 0; th_myfunc_5 <= th_myfunc_5_16; end th_myfunc_5_16: begin if(_th_myfunc_5_i_30 < 20) begin th_myfunc_5 <= th_myfunc_5_17; end else begin th_myfunc_5 <= th_myfunc_5_18; end end th_myfunc_5_17: begin _th_myfunc_5_i_30 <= _th_myfunc_5_i_30 + 1; th_myfunc_5 <= th_myfunc_5_16; end th_myfunc_5_18: begin $display("Thread %d Hello", _th_myfunc_5_tid_27); th_myfunc_5 <= th_myfunc_5_19; end th_myfunc_5_19: begin th_myfunc_5 <= th_myfunc_5_20; end th_myfunc_5_20: begin $display("Thread %d Unlock", _th_myfunc_5_tid_27); th_myfunc_5 <= th_myfunc_5_21; end endcase end end localparam th_myfunc_6_1 = 1; localparam th_myfunc_6_2 = 2; localparam th_myfunc_6_3 = 3; localparam th_myfunc_6_4 = 4; localparam th_myfunc_6_5 = 5; localparam th_myfunc_6_6 = 6; localparam th_myfunc_6_7 = 7; localparam th_myfunc_6_8 = 8; localparam th_myfunc_6_9 = 9; localparam th_myfunc_6_10 = 10; localparam th_myfunc_6_11 = 11; localparam th_myfunc_6_12 = 12; localparam th_myfunc_6_13 = 13; localparam th_myfunc_6_14 = 14; localparam th_myfunc_6_15 = 15; localparam th_myfunc_6_16 = 16; localparam th_myfunc_6_17 = 17; localparam th_myfunc_6_18 = 18; localparam th_myfunc_6_19 = 19; localparam th_myfunc_6_20 = 20; localparam th_myfunc_6_21 = 21; always @(posedge CLK) begin if(RST) begin th_myfunc_6 <= th_myfunc_6_init; _th_myfunc_6_called <= 0; _th_myfunc_6_tid_31 <= 0; _th_myfunc_6_tid_32 <= 0; _tmp_12 <= 0; _th_myfunc_6_lock_33 <= 0; _th_myfunc_6_waitcount_34 <= 0; _tmp_13 <= 0; _th_myfunc_6_i_35 <= 0; end else begin case(th_myfunc_6) th_myfunc_6_init: begin if(_th_myfunc_start[6] && (th_blink == 4)) begin _th_myfunc_6_called <= 1; end if(_th_myfunc_start[6] && (th_blink == 4)) begin _th_myfunc_6_tid_31 <= _th_blink_tid_0; end if((th_blink == 4) && _th_myfunc_start[6]) begin th_myfunc_6 <= th_myfunc_6_1; end end th_myfunc_6_1: begin _th_myfunc_6_tid_32 <= _th_myfunc_6_tid_31; th_myfunc_6 <= th_myfunc_6_2; end th_myfunc_6_2: begin $display("-- Thread %d TryLock", _th_myfunc_6_tid_32); th_myfunc_6 <= th_myfunc_6_3; end th_myfunc_6_3: begin th_myfunc_6 <= th_myfunc_6_4; end th_myfunc_6_4: begin _tmp_12 <= _mymutex_lock_reg & (_mymutex_lock_id == 6); th_myfunc_6 <= th_myfunc_6_5; end th_myfunc_6_5: begin _th_myfunc_6_lock_33 <= _tmp_12; th_myfunc_6 <= th_myfunc_6_6; end th_myfunc_6_6: begin _th_myfunc_6_waitcount_34 <= 0; th_myfunc_6 <= th_myfunc_6_7; end th_myfunc_6_7: begin if(!_th_myfunc_6_lock_33) begin th_myfunc_6 <= th_myfunc_6_8; end else begin th_myfunc_6 <= th_myfunc_6_14; end end th_myfunc_6_8: begin $display("-- Thread %d TryLock", _th_myfunc_6_tid_32); th_myfunc_6 <= th_myfunc_6_9; end th_myfunc_6_9: begin _th_myfunc_6_waitcount_34 <= _th_myfunc_6_waitcount_34 + 1; th_myfunc_6 <= th_myfunc_6_10; end th_myfunc_6_10: begin th_myfunc_6 <= th_myfunc_6_11; end th_myfunc_6_11: begin _tmp_13 <= _mymutex_lock_reg & (_mymutex_lock_id == 6); th_myfunc_6 <= th_myfunc_6_12; end th_myfunc_6_12: begin _th_myfunc_6_lock_33 <= _tmp_13; th_myfunc_6 <= th_myfunc_6_13; end th_myfunc_6_13: begin th_myfunc_6 <= th_myfunc_6_7; end th_myfunc_6_14: begin $display("Thread %d Lock: waitcount=%d", _th_myfunc_6_tid_32, _th_myfunc_6_waitcount_34); th_myfunc_6 <= th_myfunc_6_15; end th_myfunc_6_15: begin _th_myfunc_6_i_35 <= 0; th_myfunc_6 <= th_myfunc_6_16; end th_myfunc_6_16: begin if(_th_myfunc_6_i_35 < 20) begin th_myfunc_6 <= th_myfunc_6_17; end else begin th_myfunc_6 <= th_myfunc_6_18; end end th_myfunc_6_17: begin _th_myfunc_6_i_35 <= _th_myfunc_6_i_35 + 1; th_myfunc_6 <= th_myfunc_6_16; end th_myfunc_6_18: begin $display("Thread %d Hello", _th_myfunc_6_tid_32); th_myfunc_6 <= th_myfunc_6_19; end th_myfunc_6_19: begin th_myfunc_6 <= th_myfunc_6_20; end th_myfunc_6_20: begin $display("Thread %d Unlock", _th_myfunc_6_tid_32); th_myfunc_6 <= th_myfunc_6_21; end endcase end end localparam th_myfunc_7_1 = 1; localparam th_myfunc_7_2 = 2; localparam th_myfunc_7_3 = 3; localparam th_myfunc_7_4 = 4; localparam th_myfunc_7_5 = 5; localparam th_myfunc_7_6 = 6; localparam th_myfunc_7_7 = 7; localparam th_myfunc_7_8 = 8; localparam th_myfunc_7_9 = 9; localparam th_myfunc_7_10 = 10; localparam th_myfunc_7_11 = 11; localparam th_myfunc_7_12 = 12; localparam th_myfunc_7_13 = 13; localparam th_myfunc_7_14 = 14; localparam th_myfunc_7_15 = 15; localparam th_myfunc_7_16 = 16; localparam th_myfunc_7_17 = 17; localparam th_myfunc_7_18 = 18; localparam th_myfunc_7_19 = 19; localparam th_myfunc_7_20 = 20; localparam th_myfunc_7_21 = 21; always @(posedge CLK) begin if(RST) begin th_myfunc_7 <= th_myfunc_7_init; _th_myfunc_7_called <= 0; _th_myfunc_7_tid_36 <= 0; _th_myfunc_7_tid_37 <= 0; _tmp_14 <= 0; _th_myfunc_7_lock_38 <= 0; _th_myfunc_7_waitcount_39 <= 0; _tmp_15 <= 0; _th_myfunc_7_i_40 <= 0; end else begin case(th_myfunc_7) th_myfunc_7_init: begin if(_th_myfunc_start[7] && (th_blink == 4)) begin _th_myfunc_7_called <= 1; end if(_th_myfunc_start[7] && (th_blink == 4)) begin _th_myfunc_7_tid_36 <= _th_blink_tid_0; end if((th_blink == 4) && _th_myfunc_start[7]) begin th_myfunc_7 <= th_myfunc_7_1; end end th_myfunc_7_1: begin _th_myfunc_7_tid_37 <= _th_myfunc_7_tid_36; th_myfunc_7 <= th_myfunc_7_2; end th_myfunc_7_2: begin $display("-- Thread %d TryLock", _th_myfunc_7_tid_37); th_myfunc_7 <= th_myfunc_7_3; end th_myfunc_7_3: begin th_myfunc_7 <= th_myfunc_7_4; end th_myfunc_7_4: begin _tmp_14 <= _mymutex_lock_reg & (_mymutex_lock_id == 7); th_myfunc_7 <= th_myfunc_7_5; end th_myfunc_7_5: begin _th_myfunc_7_lock_38 <= _tmp_14; th_myfunc_7 <= th_myfunc_7_6; end th_myfunc_7_6: begin _th_myfunc_7_waitcount_39 <= 0; th_myfunc_7 <= th_myfunc_7_7; end th_myfunc_7_7: begin if(!_th_myfunc_7_lock_38) begin th_myfunc_7 <= th_myfunc_7_8; end else begin th_myfunc_7 <= th_myfunc_7_14; end end th_myfunc_7_8: begin $display("-- Thread %d TryLock", _th_myfunc_7_tid_37); th_myfunc_7 <= th_myfunc_7_9; end th_myfunc_7_9: begin _th_myfunc_7_waitcount_39 <= _th_myfunc_7_waitcount_39 + 1; th_myfunc_7 <= th_myfunc_7_10; end th_myfunc_7_10: begin th_myfunc_7 <= th_myfunc_7_11; end th_myfunc_7_11: begin _tmp_15 <= _mymutex_lock_reg & (_mymutex_lock_id == 7); th_myfunc_7 <= th_myfunc_7_12; end th_myfunc_7_12: begin _th_myfunc_7_lock_38 <= _tmp_15; th_myfunc_7 <= th_myfunc_7_13; end th_myfunc_7_13: begin th_myfunc_7 <= th_myfunc_7_7; end th_myfunc_7_14: begin $display("Thread %d Lock: waitcount=%d", _th_myfunc_7_tid_37, _th_myfunc_7_waitcount_39); th_myfunc_7 <= th_myfunc_7_15; end th_myfunc_7_15: begin _th_myfunc_7_i_40 <= 0; th_myfunc_7 <= th_myfunc_7_16; end th_myfunc_7_16: begin if(_th_myfunc_7_i_40 < 20) begin th_myfunc_7 <= th_myfunc_7_17; end else begin th_myfunc_7 <= th_myfunc_7_18; end end th_myfunc_7_17: begin _th_myfunc_7_i_40 <= _th_myfunc_7_i_40 + 1; th_myfunc_7 <= th_myfunc_7_16; end th_myfunc_7_18: begin $display("Thread %d Hello", _th_myfunc_7_tid_37); th_myfunc_7 <= th_myfunc_7_19; end th_myfunc_7_19: begin th_myfunc_7 <= th_myfunc_7_20; end th_myfunc_7_20: begin $display("Thread %d Unlock", _th_myfunc_7_tid_37); th_myfunc_7 <= th_myfunc_7_21; end endcase end end endmodule """ def test(): veriloggen.reset() test_module = thread_mutex_try_lock.mkTest() code = test_module.to_verilog() from pyverilog.vparser.parser import VerilogParser from pyverilog.ast_code_generator.codegen import ASTCodeGenerator parser = VerilogParser() expected_ast = parser.parse(expected_verilog) codegen = ASTCodeGenerator() expected_code = codegen.visit(expected_ast) assert(expected_code == code)
30.889127
99
0.611931
0
0
0
0
0
0
0
0
42,595
0.986384
5203375f63e49c0f7197865189dc9b2c34c83189
926
py
Python
CardiaT/idea/models.py
nikitasingh01/CardiaT
5e9af8d24b75e248776a102075bcd327bf7d04c9
[ "CC0-1.0" ]
null
null
null
CardiaT/idea/models.py
nikitasingh01/CardiaT
5e9af8d24b75e248776a102075bcd327bf7d04c9
[ "CC0-1.0" ]
null
null
null
CardiaT/idea/models.py
nikitasingh01/CardiaT
5e9af8d24b75e248776a102075bcd327bf7d04c9
[ "CC0-1.0" ]
null
null
null
from django.db import models from django.utils import timezone from django.contrib.auth.models import User class Idea(models.Model): title = models.CharField(max_length=100) content = models.TextField() date_posted = models.DateTimeField(default=timezone.now) author = models.ForeignKey(User, on_delete=models.CASCADE) tag_name= models.CharField(max_length=20,default='general') def __str__(self): return self.title class Comments(models.Model): description = models.CharField(max_length=100) pub_date = models.DateTimeField(default=timezone.now) author = models.ForeignKey(User, on_delete=models.CASCADE) idea=models.ForeignKey(Idea, on_delete=models.CASCADE, related_name='comments') # class Tags(models.Model): # tag_name= models.CharField(max_length=20) # idea=models.ForeignKey(Idea, on_delete=models.CASCADE, related_name='tags') # Create your models here.
33.071429
83
0.75378
626
0.676026
0
0
0
0
0
0
200
0.215983
5204c058c4d33af2eac0352bcb292f208d16b5bb
416
py
Python
sample_problems/problems_with_solution31.py
adi01trip01/adi_workspace
f493b3ba84645eec3a57607243760a826880d1a3
[ "MIT" ]
null
null
null
sample_problems/problems_with_solution31.py
adi01trip01/adi_workspace
f493b3ba84645eec3a57607243760a826880d1a3
[ "MIT" ]
null
null
null
sample_problems/problems_with_solution31.py
adi01trip01/adi_workspace
f493b3ba84645eec3a57607243760a826880d1a3
[ "MIT" ]
null
null
null
# Write a Python program to compute the greatest common divisor (GCD) of two positive integers. n1 = int(input("Enter the first number: ")) n2 = int(input("Enter the second number: ")) a1 = [] a2 = [] a3 = [] for i in range(1, n1 + 1): if n1 % i == 0: a1.append(i) for i in range(1, n2 + 1): if n2 % i == 0: a2.append(i) for i in a1: if i in a2: a3.append(i) print(max(a3))
18.086957
95
0.564904
0
0
0
0
0
0
0
0
148
0.355769
5205f2274c544946c12588cc59616bb5d26c16a2
2,110
py
Python
Biological_Questions/Cell_Cycle_Duration/SuperOutliers_Plot_LinTrees.py
The-Kristina/CellComp
29ec7690e0d9adb1a6214937ca41fd1dadce18c6
[ "CNRI-Python", "RSA-MD", "Xnet", "Net-SNMP", "X11" ]
7
2019-05-13T10:07:44.000Z
2022-03-01T16:20:48.000Z
Biological_Questions/Cell_Cycle_Duration/SuperOutliers_Plot_LinTrees.py
The-Kristina/CellComp
29ec7690e0d9adb1a6214937ca41fd1dadce18c6
[ "CNRI-Python", "RSA-MD", "Xnet", "Net-SNMP", "X11" ]
null
null
null
Biological_Questions/Cell_Cycle_Duration/SuperOutliers_Plot_LinTrees.py
The-Kristina/CellComp
29ec7690e0d9adb1a6214937ca41fd1dadce18c6
[ "CNRI-Python", "RSA-MD", "Xnet", "Net-SNMP", "X11" ]
3
2020-04-23T18:13:20.000Z
2020-11-11T18:46:48.000Z
import time start_time = time.process_time() import sys sys.path.append("../") from Biological_Questions.Cell_Cycle_Duration.Shortlist_Outliers import ShortlistOutliers from Movie_Analysis_Pipeline.Merging_Movie_Datasets.Find_Family_Class import FindFamily from Cell_IDs_Analysis.Plotter_Lineage_Trees import PlotLineageTree file = "/Volumes/lowegrp/Data/Kristina/MDCK_WT_Pure/cellIDdetails_merged.txt" # Identify the stacked histogram outliers: left_outliers = ShortlistOutliers(left_or_right="left") print () print (len(left_outliers), left_outliers) left_outliers_cellID_info = [] left_outliers_parent_info = [] left_outliers_rootID_info = [] for outlier in left_outliers: outlier_info = FindFamily(cell_ID=outlier, filtered_file=file).FindItself() left_outliers_cellID_info.append(outlier_info) parent_info = FindFamily(cell_ID=outlier, filtered_file=file).FindParent() left_outliers_parent_info.append(parent_info) rootID_info = FindFamily(cell_ID=outlier, filtered_file=file).FindRoot() left_outliers_rootID_info.append(rootID_info) print (len(left_outliers_cellID_info), left_outliers_cellID_info) print (len(left_outliers_parent_info), left_outliers_parent_info) print (len(left_outliers_rootID_info), left_outliers_rootID_info) # Identify the outliers of those outliers & plot their trees: counter = 0 for outlier_str, outlier, parent, root in zip\ (left_outliers, left_outliers_cellID_info, left_outliers_parent_info, left_outliers_rootID_info): if parent[1] != "NaN" and outlier[1] != "NaN": if parent[1] - outlier[1] >= 0: counter += 1 movie = outlier_str.split("-") cell_ID = movie[0] pos = movie[1] date = movie[2] xml_file = "/Volumes/lowegrp/Data/Kristina/MDCK_WT_Pure/{}/{}/tracks/tracks_type1.xml".format(date, pos) PlotLineageTree(root_ID=root[0], cell_ID=cell_ID, xml_file=xml_file, show=True) print ("{} outliers were identified & their lineage trees plotted in {} mins." .format(counter, round((time.process_time() - start_time) / 60, 2)))
42.2
116
0.754976
0
0
0
0
0
0
0
0
343
0.162559
52060668ae2c9acef042ae7790ea3da70b6ba176
13,283
py
Python
keras_contrib/utils/kito.py
ZFTurbo/keras-contrib
7eca212ab29a620463514223b9efd2198cd910d8
[ "MIT" ]
null
null
null
keras_contrib/utils/kito.py
ZFTurbo/keras-contrib
7eca212ab29a620463514223b9efd2198cd910d8
[ "MIT" ]
null
null
null
keras_contrib/utils/kito.py
ZFTurbo/keras-contrib
7eca212ab29a620463514223b9efd2198cd910d8
[ "MIT" ]
null
null
null
""" Reduce neural net structure (Conv + BN -> Conv) Also works: DepthwiseConv2D + BN -> DepthwiseConv2D SeparableConv2D + BN -> SeparableConv2D This code takes on input trained Keras model and optimize layer structure and weights in such a way that model became much faster (~30%), but works identically to initial model. It can be extremely useful in case you need to process large amount of images with trained model. Reduce operation was tested on all Keras models zoo. See comparison table and full description by link: https://github.com/ZFTurbo/Keras-inference-time-optimizer Author: Roman Solovyev (ZFTurbo) """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np def get_keras_sub_version(): from keras import __version__ type = int(__version__.split('.')[1]) return type def get_input_layers_ids(model, layer, verbose=False): res = dict() for i, l in enumerate(model.layers): layer_id = str(id(l)) res[layer_id] = i inbound_layers = [] layer_id = str(id(layer)) for i, node in enumerate(layer._inbound_nodes): node_key = layer.name + '_ib-' + str(i) if get_keras_sub_version() == 1: network_nodes = model._container_nodes else: network_nodes = model._network_nodes if node_key in network_nodes: for inbound_layer in node.inbound_layers: inbound_layer_id = str(id(inbound_layer)) inbound_layers.append(res[inbound_layer_id]) return inbound_layers def get_output_layers_ids(model, layer, verbose=False): res = dict() for i, l in enumerate(model.layers): layer_id = str(id(l)) res[layer_id] = i outbound_layers = [] layer_id = str(id(layer)) for i, node in enumerate(layer._outbound_nodes): node_key = layer.name + '_ib-' + str(i) if get_keras_sub_version() == 1: network_nodes = model._container_nodes else: network_nodes = model._network_nodes if node_key in network_nodes: outbound_layer_id = str(id(node.outbound_layer)) if outbound_layer_id in res: outbound_layers.append(res[outbound_layer_id]) else: print('Warning, some problem with outbound node on layer {}!'.format(layer.name)) return outbound_layers def get_copy_of_layer(layer, verbose=False): from keras.layers.core import Activation from keras import layers config = layer.get_config() # Non-standard relu6 layer (from MobileNet) if layer.__class__.__name__ == 'Activation': if config['activation'] == 'relu6': if get_keras_sub_version() == 1: from keras.applications.mobilenet import relu6 else: from keras_applications.mobilenet import relu6 layer_copy = Activation(relu6, name=layer.name) return layer_copy layer_copy = layers.deserialize({'class_name': layer.__class__.__name__, 'config': config}) layer_copy.name = layer.name return layer_copy def get_layers_without_output(model, verbose=False): output_tensor = [] output_names = [] for level_id in range(len(model.layers)): layer = model.layers[level_id] output_layers = get_output_layers_ids(model, layer, verbose) if len(output_layers) == 0: try: if type(layer.output) is list: output_tensor += layer.output else: output_tensor.append(layer.output) output_names.append(layer.name) except: # Ugly need to check for correctness for node in layer._inbound_nodes: for i in range(len(node.inbound_layers)): outbound_layer = node.inbound_layers[i].name outbound_tensor_index = node.tensor_indices[i] output_tensor.append(node.output_tensors[outbound_tensor_index]) output_names.append(outbound_layer) if verbose: print('Outputs [{}]: {}'.format(len(output_tensor), output_names)) return output_tensor, output_names def optimize_conv2d_batchnorm_block(m, initial_model, input_layers, conv, bn, verbose=False): from keras import layers from keras.models import Model conv_layer_type = conv.__class__.__name__ conv_config = conv.get_config() conv_config['use_bias'] = True bn_config = bn.get_config() if conv_config['activation'] != 'linear': print('Only linear activation supported for conv + bn optimization!') exit() # Copy Conv2D layer layer_copy = layers.deserialize({'class_name': conv.__class__.__name__, 'config': conv_config}) # We use batch norm name here to find it later layer_copy.name = bn.name # Create new model to initialize layer. We need to store other output tensors as well output_tensor, output_names = get_layers_without_output(m, verbose) input_layer_name = initial_model.layers[input_layers[0]].name prev_layer = m.get_layer(name=input_layer_name) x = layer_copy(prev_layer.output) output_tensor_to_use = [x] for i in range(len(output_names)): if output_names[i] != input_layer_name: output_tensor_to_use.append(output_tensor[i]) if len(output_tensor_to_use) == 1: output_tensor_to_use = output_tensor_to_use[0] tmp_model = Model(inputs=m.input, outputs=output_tensor_to_use) if conv.get_config()['use_bias']: (conv_weights, conv_bias) = conv.get_weights() else: (conv_weights,) = conv.get_weights() if bn_config['scale']: gamma, beta, run_mean, run_std = bn.get_weights() else: gamma = 1.0 beta, run_mean, run_std = bn.get_weights() eps = bn_config['epsilon'] A = gamma / np.sqrt(run_std + eps) if conv.get_config()['use_bias']: B = beta + (gamma * (conv_bias - run_mean) / np.sqrt(run_std + eps)) else: B = beta - ((gamma * run_mean) / np.sqrt(run_std + eps)) if conv_layer_type == 'Conv2D': for i in range(conv_weights.shape[-1]): conv_weights[:, :, :, i] *= A[i] elif conv_layer_type == 'DepthwiseConv2D': for i in range(conv_weights.shape[-2]): conv_weights[:, :, i, :] *= A[i] tmp_model.get_layer(layer_copy.name).set_weights((conv_weights, B)) return tmp_model def optimize_separableconv2d_batchnorm_block(m, initial_model, input_layers, conv, bn, verbose=False): from keras import layers from keras.models import Model conv_config = conv.get_config() conv_config['use_bias'] = True bn_config = bn.get_config() if conv_config['activation'] != 'linear': print('Only linear activation supported for conv + bn optimization!') exit() layer_copy = layers.deserialize({'class_name': conv.__class__.__name__, 'config': conv_config}) # We use batch norm name here to find it later layer_copy.name = bn.name # Create new model to initialize layer. We need to store other output tensors as well output_tensor, output_names = get_layers_without_output(m, verbose) input_layer_name = initial_model.layers[input_layers[0]].name prev_layer = m.get_layer(name=input_layer_name) x = layer_copy(prev_layer.output) output_tensor_to_use = [x] for i in range(len(output_names)): if output_names[i] != input_layer_name: output_tensor_to_use.append(output_tensor[i]) if len(output_tensor_to_use) == 1: output_tensor_to_use = output_tensor_to_use[0] tmp_model = Model(inputs=m.input, outputs=output_tensor_to_use) if conv.get_config()['use_bias']: (conv_weights_3, conv_weights_1, conv_bias) = conv.get_weights() else: (conv_weights_3, conv_weights_1) = conv.get_weights() if bn_config['scale']: gamma, beta, run_mean, run_std = bn.get_weights() else: gamma = 1.0 beta, run_mean, run_std = bn.get_weights() eps = bn_config['epsilon'] A = gamma / np.sqrt(run_std + eps) if conv.get_config()['use_bias']: B = beta + (gamma * (conv_bias - run_mean) / np.sqrt(run_std + eps)) else: B = beta - ((gamma * run_mean) / np.sqrt(run_std + eps)) for i in range(conv_weights_1.shape[-1]): conv_weights_1[:, :, :, i] *= A[i] # print(conv_weights_3.shape, conv_weights_1.shape, A.shape) tmp_model.get_layer(layer_copy.name).set_weights((conv_weights_3, conv_weights_1, B)) return tmp_model def reduce_keras_model(model, verbose=False): from keras.models import Model from keras.models import clone_model x = [] input = [] skip_layers = [] keras_sub_version = get_keras_sub_version() if verbose: print('Keras sub version: {}'.format(keras_sub_version)) # Find all inputs for level_id in range(len(model.layers)): layer = model.layers[level_id] layer_type = layer.__class__.__name__ if layer_type == 'InputLayer': inp1 = get_copy_of_layer(layer, verbose) x.append(inp1) input.append(inp1.output) tmp_model = Model(inputs=input, outputs=input) for level_id in range(len(model.layers)): layer = model.layers[level_id] layer_type = layer.__class__.__name__ # Skip input layers if layer_type == 'InputLayer': continue input_layers = get_input_layers_ids(model, layer, verbose) output_layers = get_output_layers_ids(model, layer, verbose) if verbose: print('Go for {}: {} ({}). Input layers: {} Output layers: {}'.format(level_id, layer_type, layer.name, input_layers, output_layers)) if level_id in skip_layers: if verbose: print('Skip layer because it was removed during optimization!') continue # Special cases for reducing if len(output_layers) == 1: next_layer = model.layers[output_layers[0]] next_layer_type = next_layer.__class__.__name__ if layer_type in ['Conv2D', 'DepthwiseConv2D'] and next_layer_type == 'BatchNormalization': tmp_model = optimize_conv2d_batchnorm_block(tmp_model, model, input_layers, layer, next_layer, verbose) x = tmp_model.layers[-1].output skip_layers.append(output_layers[0]) continue if layer_type in ['SeparableConv2D'] and next_layer_type == 'BatchNormalization': tmp_model = optimize_separableconv2d_batchnorm_block(tmp_model, model, input_layers, layer, next_layer, verbose) x = tmp_model.layers[-1].output skip_layers.append(output_layers[0]) continue if layer_type == 'Model': new_layer = clone_model(layer) new_layer.set_weights(layer.get_weights()) else: new_layer = get_copy_of_layer(layer, verbose) prev_layer = [] for i in range(len(set(input_layers))): search_layer = tmp_model.get_layer(name=model.layers[input_layers[i]].name) try: tens = search_layer.output prev_layer.append(tens) except: # Ugly need to check for correctness for node in search_layer._inbound_nodes: for i in range(len(node.inbound_layers)): outbound_tensor_index = node.tensor_indices[i] prev_layer.append(node.output_tensors[outbound_tensor_index]) if len(prev_layer) == 1: prev_layer = prev_layer[0] output_tensor, output_names = get_layers_without_output(tmp_model, verbose) if layer_type == 'Model': for f in prev_layer: x = new_layer(f) if f in output_tensor: output_tensor.remove(f) output_tensor.append(x) else: x = new_layer(prev_layer) if type(prev_layer) is list: for f in prev_layer: if f in output_tensor: output_tensor.remove(f) else: if prev_layer in output_tensor: output_tensor.remove(prev_layer) if type(x) is list: output_tensor += x else: output_tensor.append(x) tmp_model = Model(inputs=input, outputs=output_tensor) tmp_model.get_layer(name=layer.name).set_weights(layer.get_weights()) output_tensor, output_names = get_layers_without_output(tmp_model, verbose) if verbose: print('Output names: {}'.format(output_names)) model = Model(inputs=input, outputs=output_tensor) return model
38.390173
146
0.623654
0
0
0
0
0
0
0
0
1,896
0.142739
5208c24361058b62ce6e108305cf7e8045d2eb9a
2,031
py
Python
alipay/aop/api/domain/TopicItemVo.py
snowxmas/alipay-sdk-python-all
96870ced60facd96c5bce18d19371720cbda3317
[ "Apache-2.0" ]
213
2018-08-27T16:49:32.000Z
2021-12-29T04:34:12.000Z
alipay/aop/api/domain/TopicItemVo.py
snowxmas/alipay-sdk-python-all
96870ced60facd96c5bce18d19371720cbda3317
[ "Apache-2.0" ]
29
2018-09-29T06:43:00.000Z
2021-09-02T03:27:32.000Z
alipay/aop/api/domain/TopicItemVo.py
snowxmas/alipay-sdk-python-all
96870ced60facd96c5bce18d19371720cbda3317
[ "Apache-2.0" ]
59
2018-08-27T16:59:26.000Z
2022-03-25T10:08:15.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class TopicItemVo(object): def __init__(self): self._desc = None self._id = None self._status = None self._title = None @property def desc(self): return self._desc @desc.setter def desc(self, value): self._desc = value @property def id(self): return self._id @id.setter def id(self, value): self._id = value @property def status(self): return self._status @status.setter def status(self, value): self._status = value @property def title(self): return self._title @title.setter def title(self, value): self._title = value def to_alipay_dict(self): params = dict() if self.desc: if hasattr(self.desc, 'to_alipay_dict'): params['desc'] = self.desc.to_alipay_dict() else: params['desc'] = self.desc if self.id: if hasattr(self.id, 'to_alipay_dict'): params['id'] = self.id.to_alipay_dict() else: params['id'] = self.id if self.status: if hasattr(self.status, 'to_alipay_dict'): params['status'] = self.status.to_alipay_dict() else: params['status'] = self.status if self.title: if hasattr(self.title, 'to_alipay_dict'): params['title'] = self.title.to_alipay_dict() else: params['title'] = self.title return params @staticmethod def from_alipay_dict(d): if not d: return None o = TopicItemVo() if 'desc' in d: o.desc = d['desc'] if 'id' in d: o.id = d['id'] if 'status' in d: o.status = d['status'] if 'title' in d: o.title = d['title'] return o
23.616279
63
0.514525
1,914
0.942393
0
0
839
0.413097
0
0
208
0.102413
52095086bb446dc1b08b9d315803c1b73ef88590
541
py
Python
project/api/views/bank_list.py
Rafiatu/cinch
05f3927363a9f75598611e3f152b90464a588de2
[ "MIT" ]
null
null
null
project/api/views/bank_list.py
Rafiatu/cinch
05f3927363a9f75598611e3f152b90464a588de2
[ "MIT" ]
null
null
null
project/api/views/bank_list.py
Rafiatu/cinch
05f3927363a9f75598611e3f152b90464a588de2
[ "MIT" ]
null
null
null
from rest_framework import status from rest_framework.viewsets import ViewSet from rest_framework.decorators import action from rest_framework.permissions import AllowAny, IsAuthenticated from api.lib.response import Response from payment.get_bank_list import BankList class BankListViewSet(ViewSet): @action(methods=['get'], detail=False, permission_classes=[IsAuthenticated], url_path='*') def get_bank_list(self, request): banks = BankList.call() return Response({'banks':banks.value}, status=status.HTTP_200_OK)
41.615385
94
0.791128
270
0.499076
0
0
234
0.432532
0
0
15
0.027726
5209b7eaee6151a55b670766ca8043416e38ee6c
290
py
Python
Pipeline/4_R0_ARRP/Executable/r0_arrp_make.py
johnlspouge/R0_Unstratified_Case_Data
696b4f45265904de04213bb4bd21390684ad00a6
[ "Unlicense" ]
null
null
null
Pipeline/4_R0_ARRP/Executable/r0_arrp_make.py
johnlspouge/R0_Unstratified_Case_Data
696b4f45265904de04213bb4bd21390684ad00a6
[ "Unlicense" ]
null
null
null
Pipeline/4_R0_ARRP/Executable/r0_arrp_make.py
johnlspouge/R0_Unstratified_Case_Data
696b4f45265904de04213bb4bd21390684ad00a6
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python from os import system log = f'r0_arrp.log' C = ' -c ../../1_Prem_Matrices_to_df/Data/UNSDMethodology.csv' E = ' -e ../../3_Prem_Matrices_to_PF_Eigenvalue/Output/pf_eigenvalue.csv' S = ' -s ../../2_ARRP/Output/slope.csv' system( f'r0_arrp.py {C} {E} {S} > {log}' )
24.166667
73
0.662069
0
0
0
0
0
0
0
0
230
0.793103
520aeb88ed49552d97150bdb774d3492dbf483cc
2,816
py
Python
cloudkitty-9.0.0/cloudkitty/rating/pyscripts/db/api.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
null
null
null
cloudkitty-9.0.0/cloudkitty/rating/pyscripts/db/api.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
5
2019-08-14T06:46:03.000Z
2021-12-13T20:01:25.000Z
cloudkitty-9.0.0/cloudkitty/rating/pyscripts/db/api.py
scottwedge/OpenStack-Stein
7077d1f602031dace92916f14e36b124f474de15
[ "Apache-2.0" ]
2
2020-03-15T01:24:15.000Z
2020-07-22T20:34:26.000Z
# -*- coding: utf-8 -*- # Copyright 2015 Objectif Libre # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Stéphane Albert # import abc from oslo_config import cfg from oslo_db import api as db_api import six _BACKEND_MAPPING = { 'sqlalchemy': 'cloudkitty.rating.pyscripts.db.sqlalchemy.api'} IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True) def get_instance(): """Return a DB API instance.""" return IMPL class NoSuchScript(Exception): """Raised when the script doesn't exist.""" def __init__(self, name=None, uuid=None): super(NoSuchScript, self).__init__( "No such script: %s (UUID: %s)" % (name, uuid)) self.name = name self.uuid = uuid class ScriptAlreadyExists(Exception): """Raised when the script already exists.""" def __init__(self, name, uuid): super(ScriptAlreadyExists, self).__init__( "Script %s already exists (UUID: %s)" % (name, uuid)) self.name = name self.uuid = uuid @six.add_metaclass(abc.ABCMeta) class PyScripts(object): """Base class for pyscripts configuration.""" @abc.abstractmethod def get_migration(self): """Return a migrate manager. """ @abc.abstractmethod def get_script(self, name=None, uuid=None): """Return a script object. :param name: Filter on a script name. :param uuid: The uuid of the script to get. """ @abc.abstractmethod def list_scripts(self): """Return a UUID list of every scripts available. """ @abc.abstractmethod def create_script(self, name, data): """Create a new script. :param name: Name of the script to create. :param data: Content of the python script. """ @abc.abstractmethod def update_script(self, uuid, **kwargs): """Update a script. :param uuid UUID of the script to modify. :param data: Script data. """ @abc.abstractmethod def delete_script(self, name=None, uuid=None): """Delete a list. :param name: Name of the script to delete. :param uuid: UUID of the script to delete. """
27.339806
78
0.631392
1,715
0.608804
0
0
1,178
0.418175
0
0
1,561
0.554136
520b330c590fe9003700958a5e2e871eb51d0cd7
3,739
py
Python
scripts/src/communication/power_consumption.py
GLO3013-E4/COViRondelle2021
f8d23903d0a906e93a7698a555d90ebecdf83969
[ "MIT" ]
null
null
null
scripts/src/communication/power_consumption.py
GLO3013-E4/COViRondelle2021
f8d23903d0a906e93a7698a555d90ebecdf83969
[ "MIT" ]
null
null
null
scripts/src/communication/power_consumption.py
GLO3013-E4/COViRondelle2021
f8d23903d0a906e93a7698a555d90ebecdf83969
[ "MIT" ]
null
null
null
import time from ina219 import INA219 SHUNT_OHMS = 0.1 MAX_EXPECTED_AMPS = 0.2 def read(): ina1 = INA219(SHUNT_OHMS, MAX_EXPECTED_AMPS, address=0x40) ina1.configure(ina1.RANGE_16V, ina1.GAIN_AUTO) # print("ici") # print("INA1 ==============") # print("Bus Voltage : %.3f V" % ina1.voltage()) # print("Bus Current : %.3f mA" % ina1.current()) # print("Supply Voltage : %.3f V" % ina1.supply_voltage()) # print("Shunt voltage : %.3f mV" % ina1.shunt_voltage()) print("Power : %.3f mW" % ina1.power()) # print("") print("") print("") print("") # ina2 = INA219(SHUNT_OHMS, MAX_EXPECTED_AMPS, address=0x45) # ina2.configure(ina2.RANGE_16V, ina2.GAIN_AUTO) # print("ici") # print("INA2 ==============") # print("Bus Voltage : %.3f V" % ina2.voltage()) # print("Bus Current : %.3f mA" % ina2.current()) # print("Supply Voltage : %.3f V" % ina2.supply_voltage()) # print("Shunt voltage : %.3f mV" % ina2.shunt_voltage()) # print("Power : %.3f mW" % ina2.power()) def timedTest(numberOfReads): ina1 = INA219(SHUNT_OHMS, MAX_EXPECTED_AMPS, address=0x40) ina1.configure(ina1.RANGE_16V, ina1.GAIN_AUTO) # ina2 = INA219(SHUNT_OHMS, MAX_EXPECTED_AMPS, address=0x45) # ina2.configure(ina2.RANGE_16V, ina2.GAIN_AUTO) begin = time.time() averagePower1 = 0 averagePower2 = 0 averageCurrent1 = 0 averageVoltage1 = 0 averageCurrent2 = 0 averageVoltage2 = 0 for i in range(0, numberOfReads): averageCurrent1 += ina1.current() averageCurrent2 += ina1.current() averageVoltage1 += ina1.voltage() averageVoltage2 += ina1.voltage() averagePower1 += ina1.power() averagePower2 += ina1.power() averageCurrent1 = averageCurrent1 / numberOfReads averageCurrent2 = averageCurrent2 / numberOfReads averageVoltage1 = averageVoltage1 / numberOfReads averageVoltage2 = averageVoltage2 / numberOfReads averagePower1 = averagePower1 / numberOfReads averagePower2 = averagePower2 / numberOfReads end = time.time() print("Average Current #1 ======= ") print(str(averageCurrent1) + "mA") print("") print("Average Voltage #1 ======= ") print(str(averageVoltage1) + "V") print("") print("Average Power #1 ======= ") print(str(averagePower1) + "mW") print("") print("Average Current #2 ======= ") print(str(averageCurrent2) + "mA") print("") print("Average Voltage #2 ======= ") print(str(averageVoltage2) + "V") print("") print("Average Power #2 ======= ") print(str(averagePower2) + "mW") print("") print("Time used =======") print(end - begin) def average(): ina1 = INA219(SHUNT_OHMS, MAX_EXPECTED_AMPS, address=0x40) ina1.configure(ina1.RANGE_16V, ina1.GAIN_AUTO) ina2 = INA219(SHUNT_OHMS, MAX_EXPECTED_AMPS, address=0x45) ina2.configure(ina2.RANGE_16V, ina2.GAIN_AUTO) averageCurrent = 0 averageTension = 0 averagePower = 0 begin = time.time() for i in range(0, 90): averageCurrent += ina1.current() averageTension += ina2.voltage() averagePower += ina1.power() averageCurrent = averageCurrent / 90 averageTension = averageTension / 90 averagePower = averagePower / 90 end = time.time() print("") print("Average Current ======= ") print(str(averageCurrent) + "mA") print("Average Tension ======= ") print(str(averageTension) + "V") print("Average Power ======= ") print(str(averagePower) + "mW") print("") print("Time used =======") print(end - begin) return 3 if __name__ == "__main__": timedTest(100)
28.984496
64
0.612196
0
0
0
0
0
0
0
0
1,178
0.315058
520bda3f1eb7c2a6497554e137fb84ac3cc5ae35
3,646
py
Python
pyrobolearn/robots/littledog.py
Pandinosaurus/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
[ "Apache-2.0" ]
2
2021-01-21T21:08:30.000Z
2022-03-29T16:45:49.000Z
pyrobolearn/robots/littledog.py
Pandinosaurus/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
[ "Apache-2.0" ]
null
null
null
pyrobolearn/robots/littledog.py
Pandinosaurus/pyrobolearn
9cd7c060723fda7d2779fa255ac998c2c82b8436
[ "Apache-2.0" ]
1
2020-09-29T21:25:39.000Z
2020-09-29T21:25:39.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- """Provide the Little Dog robotic platform. """ import os import numpy as np from pyrobolearn.robots.legged_robot import QuadrupedRobot __author__ = "Brian Delhaisse" __copyright__ = "Copyright 2018, PyRoboLearn" __license__ = "GNU GPLv3" __version__ = "1.0.0" __maintainer__ = "Brian Delhaisse" __email__ = "briandelhaisse@gmail.com" __status__ = "Development" class LittleDog(QuadrupedRobot): r"""Little Dog References: - [1] "The LittleDog Robot", Murphy et al., 2010 https://journals.sagepub.com/doi/abs/10.1177/0278364910387457?journalCode=ijra - [2] https://github.com/RobotLocomotion/LittleDog """ def __init__(self, simulator, position=(0, 0, 0.2), orientation=(0, 0, 0, 1), fixed_base=False, scale=1., urdf=os.path.dirname(__file__) + '/urdfs/littledog/littleDog.urdf'): """ Initialize the LittleDog robots. Args: simulator (Simulator): simulator instance. position (np.array[float[3]]): Cartesian world position. orientation (np.array[float[4]]): Cartesian world orientation expressed as a quaternion [x,y,z,w]. fixed_base (bool): if True, the robot base will be fixed in the world. scale (float): scaling factor that is used to scale the robot. urdf (str): path to the urdf. Do not change it unless you know what you are doing. """ # check parameters if position is None: position = (0., 0., 0.2) if len(position) == 2: # assume x, y are given position = tuple(position) + (0.2,) if orientation is None: orientation = (0, 0, 0, 1) if fixed_base is None: fixed_base = False super(LittleDog, self).__init__(simulator, urdf, position, orientation, fixed_base, scale) self.name = 'littledog' self.legs = [[self.get_link_ids(link) for link in links if link in self.link_names] for links in [['front_left_hip', 'front_left_upper_leg', 'front_left_lower_leg'], ['front_right_hip', 'front_right_upper_leg', 'front_right_lower_leg'], ['back_left_hip', 'back_left_upper_leg', 'back_left_lower_leg'], ['back_right_hip', 'back_right_upper_leg', 'back_right_lower_leg']]] self.feet = [self.get_link_ids(link) for link in ['front_left_lower_leg', 'front_right_lower_leg', 'back_left_lower_leg', 'back_right_lower_leg'] if link in self.link_names] self.kp = 24. * np.ones(12) self.kd = np.array([0.5, 0.5, 0.16, 0.5, 0.5, 0.16, 0.5, 0.5, 0.16, 0.5, 0.5, 0.16]) self.set_joint_positions([-0.6, -0.6, 0.6, 0.6], self.feet) self.joint_nominal_config = np.array([0., 0., -0.6, 0., 0., -0.6, 0., 0., 0.6, 0., 0., 0.6]) # Tests if __name__ == "__main__": from itertools import count from pyrobolearn.simulators import Bullet from pyrobolearn.worlds import BasicWorld # Create simulator sim = Bullet() # create world world = BasicWorld(sim) # create robot robot = LittleDog(sim) # print information about the robot robot.print_info() # Position control using sliders # robot.add_joint_slider() # run simulator for _ in count(): # robot.update_joint_slider() # robot.compute_and_draw_com_position() # robot.compute_and_draw_projected_com_position() world.step(sleep_dt=1./240)
36.46
110
0.611629
2,577
0.706802
0
0
0
0
0
0
1,717
0.470927
520c123c3cc2054e4fcd79af635f3285abb8eea2
3,818
py
Python
Scripts/simulation/business/advertising_manager.py
velocist/TS4CheatsInfo
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
[ "Apache-2.0" ]
null
null
null
Scripts/simulation/business/advertising_manager.py
velocist/TS4CheatsInfo
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
[ "Apache-2.0" ]
null
null
null
Scripts/simulation/business/advertising_manager.py
velocist/TS4CheatsInfo
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
[ "Apache-2.0" ]
null
null
null
# uncompyle6 version 3.7.4 # Python bytecode 3.7 (3394) # Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)] # Embedded file name: T:\InGame\Gameplay\Scripts\Server\business\advertising_manager.py # Compiled at: 2017-04-27 01:01:18 # Size of source mod 2**32: 6129 bytes from protocolbuffers import Business_pb2, DistributorOps_pb2 from business.business_enums import BusinessAdvertisingType from distributor.ops import GenericProtocolBufferOp from distributor.system import Distributor import services, sims4 logger = sims4.log.Logger('Business', default_owner='jdimailig') class HasAdvertisingManagerMixin: def __init__(self, *args, **kwargs): (super().__init__)(*args, **kwargs) self._advertising_manager = AdvertisingManager.create_from_business_manager(self) def get_advertising_multiplier(self): return self._advertising_manager.get_advertising_multiplier() def set_advertising_type(self, advertising_type): self._advertising_manager.set_advertising_type(advertising_type) def get_advertising_type_for_gsi(self): return str(self._advertising_manager._advertising_type) def get_current_advertising_cost(self): return self._advertising_manager.get_current_advertising_cost() class AdvertisingManager: @classmethod def create_from_business_manager(cls, business_manager): return AdvertisingManager(business_manager, business_manager.tuning_data.advertising_configuration) def __init__(self, business_manager, advertising_configuration): self._business_manager = business_manager self._configuration = advertising_configuration self._advertising_type = advertising_configuration.default_advertising_type self._advertising_update_time = None self._advertising_cost = 0 def clear_state(self): self._advertising_cost = 0 self._advertising_update_time = None def open_business(self): self.set_advertising_type(self._advertising_type) def get_current_advertising_cost(self): return self._advertising_cost + self._get_advertising_cost_since_last_update() def get_advertising_cost_per_hour(self): return self._configuration.get_advertising_cost_per_hour(self._advertising_type) def set_advertising_type(self, advertising_type): self._advertising_cost += self._get_advertising_cost_since_last_update() self._advertising_update_time = services.time_service().sim_now if advertising_type == BusinessAdvertisingType.INVALID: logger.error('Attempting to set an INVALID advertising type to {}. This will be ignored.', advertising_type) else: self._advertising_type = advertising_type self._send_advertisement_update_message() def get_advertising_multiplier(self): return self._configuration.get_customer_count_multiplier(self._advertising_type) def _get_advertising_cost_since_last_update(self): now = services.time_service().sim_now running_cost = 0 if self._advertising_update_time is None: self._advertising_update_time = now running_cost = 0 else: hours_in_ad_type = (now - self._advertising_update_time).in_hours() running_cost = hours_in_ad_type * self.get_advertising_cost_per_hour() return running_cost def _send_advertisement_update_message(self): msg = Business_pb2.BusinessAdvertisementUpdate() msg.zone_id = self._business_manager.business_zone_id msg.advertisement_chosen = self._advertising_type op = GenericProtocolBufferOp(DistributorOps_pb2.Operation.BUSINESS_ADVERTISEMENT_DATA_UPDATE, msg) Distributor.instance().add_op_with_no_owner(op)
43.885057
120
0.76087
3,184
0.833944
0
0
181
0.047407
0
0
417
0.109219
520d7cb39f32defc0f55c008264ebcf3b5b045f7
8,603
py
Python
app.py
ansokolov/28days
98c3bd093dd716aa38a67ad03b7457c5b83220bc
[ "MIT" ]
null
null
null
app.py
ansokolov/28days
98c3bd093dd716aa38a67ad03b7457c5b83220bc
[ "MIT" ]
null
null
null
app.py
ansokolov/28days
98c3bd093dd716aa38a67ad03b7457c5b83220bc
[ "MIT" ]
null
null
null
import os import datetime import csv import json i = 0 def initial_menu(): user_action = input("1. Log In \n" + "2. Create new user \n" + "3. Create with CSV \n" + "4. Update with CSV \n") if user_action == "1": user_name = input("What is your name? \n") file_name = user_name + ".json" if os.path.isfile(file_name): secondary_menu(file_name) else: user_choice = input("There is no such a user. Would you like to create new user? \n") if user_choice == "Yes": initial_menu() else: exit() if user_action == "2": user_name = input("What is your name? \n") file_name = user_name + ".json" user_profile = open(file_name,"w") current_date = datetime.datetime.now() filled_profile = { "name": "", "created_on": "", "words": []} filled_profile["name"] = user_name filled_profile["created_on"] = current_date.strftime("%Y-%m-%d") json.dump(filled_profile, user_profile) user_profile.close() print("The profile was create successfully. You can log in now.") if user_action == "3": user_name = input("What is your name? \n") file_name = user_name + ".json" csv_file = open(user_name + ".csv","r") csv_data = csv.reader(csv_file, delimiter=";") user_profile = open(file_name,"w") filled_profile = { "name": "", "created_on": "", "words": []} filled_profile["name"] = user_name current_date = datetime.datetime.now() filled_profile["created_on"] = current_date.strftime("%Y-%m-%d") words = filled_profile["words"] word_profile = {"word": "", "sentence": "", "translation": "", "learnt_on": "", "last_repeat": "", "repetitions": ""} for row in csv_data: word_profile["word"] = row[0] word_profile["sentence"] = row[1] word_profile["translation"] = row[2] word_profile["learnt_on"] = row[3] word_profile["last_repeat"] = row[4] word_profile["repetitions"] = row[5] words.append(word_profile) json.dump(filled_profile, user_profile) user_profile.close() print("The profile was create successfully. You can log in now.") if user_action == "4": user_name = input("What is your name? \n") file_name = user_name + ".json" if os.path.isfile(file_name): user_file = open(file_name,"r") user_profile = user_file.read() user_data = json.loads(user_profile) user_file.close() csv_file = open(user_name + ".csv","r") csv_data = csv.reader(csv_file, delimiter=";") user_file = open(file_name,"w") words = user_data["words"] word_profile = {"word": "", "sentence": "", "translation": "", "learnt_on": "", "last_repeat": "", "repetitions": ""} for row in csv_data: word_profile["word"] = row[0] word_profile["sentence"] = row[1] word_profile["translation"] = row[2] word_profile["learnt_on"] = row[3] word_profile["last_repeat"] = row[4] word_profile["repetitions"] = row[5] words.append(word_profile) json.dump(user_data, user_file) user_file.close() print("The profile was updated successfully.") else: user_choice = input("There is no such a user. Would you like to create new user? \n") if user_choice == "Yes": initial_menu() else: exit() def secondary_menu(file_name): user_choice = input("1. Repeat Words \n" + "2. Add new words \n") if user_choice == "1": repeat_words(file_name) elif user_choice == "2": new_word(file_name) else: print("There is no such option.") secondary_menu(file_name) def words_menu(user_profile): user_choice = input("1. Add new word \n" + "2. Back \n") if user_choice == "1": new_word(user_profile) elif user_choice == "2": secondary_menu(user_profile) else: print("There is no such option.") words_menu(user_profile) def new_word(file_name): user_profile = open(file_name,"r") data = user_profile.read() user_profile.close() serialized_data = json.loads(data) word_profile = {"word": "", "sentence": "", "translation": "", "learnt_on": "", "last_repeat": "", "repetitions": ""} word = input("What is the word you learnt? \n") sentence = input("In which sentence did you see that word? \n") translation = input("What does the word mean? \n") words = serialized_data["words"] word_profile["word"] = word word_profile["sentence"] = sentence word_profile["translation"] = translation current_date = datetime.datetime.now() word_profile["learnt_on"] = current_date.strftime("%Y-%m-%d") word_profile["repetitions"] = 0 words.append(word_profile) user_profile = open(file_name,"w+") json.dump(serialized_data, user_profile) user_profile.close() print("The word has been successfully saved.") user_choice = input("1. Add new word \n" + "2. Back \n") if user_choice == "1": new_word(file_name) elif user_choice == "2": secondary_menu(user_profile) else: print("There is no such option.") secondary_menu(user_profile) def repeat_words(file_name): user_profile = open(file_name,"r") data = user_profile.read() user_profile.close() serialized_data = json.loads(data) words = serialized_data["words"] i = 0 while i < len(words): word = words[i] count = 0 if int(word["repetitions"]) == 0: last_repeat = word["learnt_on"] calculated_date = calculate_date_difference(1) if calculated_date == last_repeat: word, i, count = show_sentence(word, i, count) else: i += 1 elif int(word["repetitions"]) < 7 and word["repetitions"] > 0: last_repeat = word["last_repeat"] calculated_date = calculate_date_difference(1) if calculated_date == last_repeat: word, i, count = show_sentence(word, i, count) else: i += 1 elif int(word["repetitions"]) == 7: last_repeat = word["last_repeat"] calculated_date = calculate_date_difference(7) if calculated_date == last_repeat: word, i, count = show_sentence(word, i, count) else: i += 1 elif int(word["repetitions"]) == 8: last_repeat = word["last_repeat"] calculated_date = calculate_date_difference(14) if calculated_date == last_repeat: word, i, count = show_sentence(word, i, count) else: i += 1 else: i += 1 if count == 0: print("There is no words for today. Come tomorrow.") else: print("That's all the words for today.") user_profile = open(file_name,"w+") json.dump(serialized_data, user_profile) user_profile.close() def calculate_date_difference(difference): current_date = datetime.datetime.now() calculated_date = current_date - datetime.timedelta(days=difference) calculated_date = calculated_date.strftime("%Y-%m-%d") return calculated_date def show_sentence(word, i, count): print(word["sentence"]) word["repetitions"] = int(word["repetitions"]) + 1 current_date = datetime.datetime.now() word["last_repeat"] = current_date.strftime("%Y-%m-%d") count += 1 i += 1 return word, i, count print("Welcome to 28 days words learning app. \n") initial_menu()
34.550201
97
0.535511
0
0
0
0
0
0
0
0
1,781
0.207021
520f3837a1ca983b092354bd55ad5a67b59b1069
8,598
py
Python
tests/integration_tests/test_shell_plate_rbe2.py
tchin-divergent/tacs
34743b370da4ab6ea16d24de7c574c3fec9d333a
[ "Apache-2.0" ]
null
null
null
tests/integration_tests/test_shell_plate_rbe2.py
tchin-divergent/tacs
34743b370da4ab6ea16d24de7c574c3fec9d333a
[ "Apache-2.0" ]
null
null
null
tests/integration_tests/test_shell_plate_rbe2.py
tchin-divergent/tacs
34743b370da4ab6ea16d24de7c574c3fec9d333a
[ "Apache-2.0" ]
null
null
null
import numpy as np from mpi4py import MPI from tacs import TACS, elements, constitutive, functions from static_analysis_base_test import StaticTestCase ''' Create a two separate cantilevered plates connected by an RBE3 element. Apply a load at the RBE2 center node and test KSFailure, StructuralMass, and Compliance functions and sensitivities ----------- ----------- | |\ /| | | | \ / | | | Plate 1 |__\/__| Plate 2 | | | /\ | | | | / \ | | | |/ \| | ------------ ----------- ''' FUNC_REFS = np.array([1.2600980396870352, 51400.0, 3767896.1409673616, 2.912191091671254]) # Length of plate in x/y direction Lx = 10.0 Ly = 10.0 # Number of elements in x/y direction for each plate nx = 4 ny = 4 # applied force at center node applied_force = np.array([1e8, 0.0, 1.0e6, 0.0, 0.0, 1e8]) # KS function weight ksweight = 10.0 class ProblemTest(StaticTestCase.StaticTest): N_PROCS = 2 # this is how many MPI processes to use for this TestCase. def setup_assembler(self, comm, dtype): """ Setup mesh and tacs assembler for problem we will be testing. """ # Overwrite default check values if dtype == complex: self.rtol = 1e-5 self.atol = 1e-8 self.dh = 1e-50 else: self.rtol = 1e-1 self.atol = 1e-4 self.dh = 1e-5 # Create the stiffness object props = constitutive.MaterialProperties(rho=2570.0, E=70e9, nu=0.3, ys=350e6) stiff = constitutive.IsoShellConstitutive(props, t=0.1, tNum=0) # Set up the element transform function transform = elements.ShellNaturalTransform() shell = elements.Quad4Shell(transform, stiff) num_rbe_nodes = 0 # Allocate the TACSCreator object vars_per_node = shell.getVarsPerNode() creator = TACS.Creator(comm, vars_per_node) if comm.rank == 0: num_elems = nx * ny num_nodes = (nx + 1) * (ny + 1) # discretize (left) plate x = np.linspace(0, Lx, nx + 1, dtype) y = np.linspace(0, Ly, ny + 1, dtype) left_xyz = np.zeros([nx + 1, ny + 1, 3], dtype) left_xyz[:, :, 0], left_xyz[:, :, 1] = np.meshgrid(x, y, indexing='ij') left_node_ids = np.arange(num_nodes, dtype=np.intc).reshape(nx + 1, ny + 1) # Define right plate by copying left plate and shifting 2 m right_xyz = left_xyz.copy() right_xyz[:, :, 0] += 2.0 * Lx right_node_ids = left_node_ids + num_nodes # Double the node/element count num_nodes *= 2 num_elems *= 2 # Set connectivity for each plate element conn = [] for i in range(nx): for j in range(ny): conn.extend([left_node_ids[i, j], left_node_ids[i + 1, j], left_node_ids[i, j + 1], left_node_ids[i + 1, j + 1]]) conn.extend([right_node_ids[i, j], right_node_ids[i + 1, j], right_node_ids[i, j + 1], right_node_ids[i + 1, j + 1]]) # Append connectivity for rbe element center_node_id = num_nodes center_node_xyz = np.array([1.5 * Lx, 0.5 * Ly, 0.0], dtype=dtype) num_nodes += 1 # Add center node as indep rbe node rbe_conn = [center_node_id] dep_nodes = [] dummy_nodes = [] # Append all dependent nodes and a dummy node for each dep node added for j in range(ny + 1): # Add nodes on right edge of left plate as dep RBE nodes dep_nodes.append(left_node_ids[-1, j]) dummy_node_id = num_nodes dummy_nodes.append(dummy_node_id) # Add nodes on left edge of right plate as indep RBE nodes dep_nodes.append(right_node_ids[0, j]) dummy_node_id = num_nodes + 1 dummy_nodes.append(dummy_node_id) # Increment node count for new dummy nodes num_nodes += 2 rbe_conn.extend(dep_nodes) rbe_conn.extend(dummy_nodes) dummy_node_xyz = np.zeros([len(dep_nodes), 3], dtype=dtype) # Add rbe to global connectivity num_rbe_nodes = len(rbe_conn) conn.extend(rbe_conn) num_elems += 1 # Set element info for plates conn = np.array(conn, dtype=np.intc) ptr = np.arange(0, 4 * num_elems + 1, 4, dtype=np.intc) comp_ids = np.zeros(num_elems, dtype=np.intc) # Correct last entries for RBE ptr[-1] = ptr[-2] + num_rbe_nodes comp_ids[-1] = 1 creator.setGlobalConnectivity(num_nodes, ptr, conn, comp_ids) # Set up the boundary conditions (fixed at left hand edge) bcnodes = np.append(left_node_ids[0, :], right_node_ids[-1, :]) creator.setBoundaryConditions(bcnodes) # Set the node locations xyz = np.append(left_xyz.flatten(), right_xyz.flatten()) xyz = np.append(xyz.flatten(), center_node_xyz) xyz = np.append(xyz.flatten(), dummy_node_xyz) creator.setNodes(xyz.flatten()) # Set up rbe object num_rbe_nodes = comm.bcast(num_rbe_nodes, root=0) # Which dependent dofs are connected dep_dofs = np.array([1, 1, 1, 1, 1, 1], np.intc) # Set the artificial stiffness to be low to pass the sensitivity tests # This will affect the accuracy of the element behavior rbe = elements.RBE2(num_rbe_nodes, dep_dofs, C1=1e2, C2=1e-1) # Set the elements for each (only two) component element_list = [shell, rbe] creator.setElements(element_list) # Create the tacs assembler object assembler = creator.createTACS() return assembler def setup_tacs_vecs(self, assembler, force_vec, dv_pert_vec, ans_pert_vec, xpts_pert_vec): """ Setup user-defined vectors for analysis and fd/cs sensitivity verification """ local_num_nodes = assembler.getNumOwnedNodes() vars_per_node = assembler.getVarsPerNode() # The nodes have been distributed across processors now # Let's find which nodes this processor owns xpts0 = assembler.createNodeVec() assembler.getNodes(xpts0) xpts0_array = xpts0.getArray() # Split node vector into numpy arrays for easier parsing of vectors local_xyz = xpts0_array.reshape(local_num_nodes, 3) local_x, local_y, local_z = local_xyz[:, 0], local_xyz[:, 1], local_xyz[:, 2] # Create force vector f_array = force_vec.getArray().reshape(local_num_nodes, vars_per_node) # Apply distributed forces at tip of beam # Apply Qxx f_array[np.logical_and(local_x == 1.5 * Lx, local_y == 0.5 * Ly), :] = applied_force # Create temporary dv vec for doing fd/cs dv_pert_array = dv_pert_vec.getArray() dv_pert_array[:] = 1.0 # Create temporary state variable vec for doing fd/cs ans_pert_array = ans_pert_vec.getArray() # Define perturbation array that uniformly moves all nodes on right edge of left plate to the upward ans_pert_array = ans_pert_array.reshape(local_num_nodes, vars_per_node) ans_pert_array[local_x == Lx, 1] = 1.0 # Define perturbation array that uniformly moves all nodes on right edge of plate to the right xpts_pert_array = xpts_pert_vec.getArray() xpts_pert_array = xpts_pert_array.reshape(local_num_nodes, 3) # Define perturbation array that uniformly moves all nodes on right edge of left plate to the right xpts_pert_array[local_x == Lx, 0] = 1.0 return def setup_funcs(self, assembler): """ Create a list of functions to be tested and their reference values for the problem """ func_list = [functions.KSFailure(assembler, ksWeight=ksweight), functions.StructuralMass(assembler), functions.Compliance(assembler), functions.KSDisplacement(assembler, ksWeight=ksweight, direction=[1.0, 1.0, 1.0])] return func_list, FUNC_REFS
39.62212
108
0.581996
7,652
0.889974
0
0
0
0
0
0
2,579
0.299953
5214b68870ece3123bc4f5d2a4c1da7549fb6608
31,649
py
Python
src/conntact/assembly_algorithm_blocks.py
swri-robotics/ConnTact
3b31a6107403acebefc91616455f6d73c4181db1
[ "Apache-2.0" ]
9
2021-10-08T18:57:05.000Z
2022-03-29T06:07:53.000Z
src/conntact/assembly_algorithm_blocks.py
swri-robotics/ConnTact
3b31a6107403acebefc91616455f6d73c4181db1
[ "Apache-2.0" ]
4
2021-10-12T15:35:59.000Z
2022-01-12T21:36:40.000Z
src/conntact/assembly_algorithm_blocks.py
swri-robotics/ConnTact
3b31a6107403acebefc91616455f6d73c4181db1
[ "Apache-2.0" ]
1
2021-12-28T13:11:01.000Z
2021-12-28T13:11:01.000Z
# Copyright 2021 Southwest Research Institute # Licensed under the Apache License, Version 2.0 #UR IP Address is now 175.31.1.137 #Computer has to be 175.31.1.150 # Imports for ros # from _typeshed import StrPath from builtins import staticmethod from operator import truediv from pickle import STRING, TRUE import string from colorama.initialise import reset_all from numpy.core.numeric import allclose import rospy import sys import numpy as np import matplotlib.pyplot as plt from rospkg import RosPack from geometry_msgs.msg import WrenchStamped, Wrench, TransformStamped, PoseStamped, Pose, Point, Quaternion, Vector3, Transform from rospy.core import configure_logging import tf.transformations as trfm from colorama import Fore, Back, Style, init # from sensor_msgs.msg import JointState # from assembly_ros.srv import ExecuteStart, ExecuteRestart, ExecuteStop from controller_manager_msgs.srv import SwitchController, LoadController, ListControllers from tf2_geometry_msgs.tf2_geometry_msgs import do_transform_pose import tf2_ros import tf2_py # import tf2 import tf2_geometry_msgs from threading import Lock from conntact.assembly_tools import AssemblyTools from transitions import Machine """State names For loop purposes, the state name *must* be identical to "state_"+(loop method name) i.e. in state "state_finding_surface" the method "finding_surface" will be run repeatedly; if it does not exist, an error will occur. """ IDLE_STATE = 'state_idle' CHECK_FEEDBACK_STATE = 'state_check_load_cell_feedback' APPROACH_STATE = 'state_finding_surface' FIND_HOLE_STATE = 'state_finding_hole' INSERTING_PEG_STATE = 'state_inserting_along_axis' COMPLETION_STATE = 'state_completed_insertion' EXIT_STATE = 'state_exit' SAFETY_RETRACT_STATE = 'state_safety_retraction' #Trigger names CHECK_FEEDBACK_TRIGGER = 'check loadcell feedback' APPROACH_SURFACE_TRIGGER = 'start approach' FIND_HOLE_TRIGGER = 'surface found' INSERT_PEG_TRIGGER = 'hole found' ASSEMBLY_COMPLETED_TRIGGER = 'assembly completed' SAFETY_RETRACTION_TRIGGER = 'retract to safety' RESTART_TEST_TRIGGER = 'restart test' STEP_COMPLETE_TRIGGER = 'next step' RUN_LOOP_TRIGGER = 'run looped code' class AlgorithmBlocks(AssemblyTools): def __init__(self, ROS_rate, start_time): #Configuration variables, to be moved to a yaml file later: self.speed_static = [1/1000,1/1000,1/1000] #Speed at which the system considers itself stopped. Rel. to target hole. force_dangerous = [55,55,65] #Force value which kills the program. Rel. to gripper. force_transverse_dangerous = np.array([30,30,30]) #Force value transverse to the line from the TCP to the force sensor which kills the program. Rel. to gripper. force_warning = [40,40,50] #Force value which pauses the program. Rel. to gripper. force_transverse_warning = np.array([20,20,20]) #torque value transverse to the line from the TCP to the force sensor which kills the program. Rel. to gripper. self.max_force_error = [4, 4, 4] #Allowable error force with no actual loads on the gripper. self.cap_check_forces = force_dangerous, force_transverse_dangerous, force_warning, force_transverse_warning self._bias_wrench = self.create_wrench([0,0,0], [0,0,0]).wrench #Calculated to remove the steady-state error from wrench readings. #List the official states here. Takes strings, but the tokens created above so typos are less likely from repeated typing of strings (unchecked by interpreter). states = [ IDLE_STATE, CHECK_FEEDBACK_STATE, APPROACH_STATE, FIND_HOLE_STATE, INSERTING_PEG_STATE, COMPLETION_STATE, SAFETY_RETRACT_STATE ] #Define the valid transitions from/to each state. Here's where you define the functionality of the state machine. The system executes the first transition in this list which matches BOTH the trigger AND the CURRENT state. transitions = [ {'trigger':CHECK_FEEDBACK_TRIGGER , 'source':IDLE_STATE , 'dest':CHECK_FEEDBACK_STATE }, {'trigger':APPROACH_SURFACE_TRIGGER , 'source':CHECK_FEEDBACK_STATE, 'dest':APPROACH_STATE }, {'trigger':FIND_HOLE_TRIGGER , 'source':APPROACH_STATE , 'dest':FIND_HOLE_STATE }, {'trigger':INSERT_PEG_TRIGGER , 'source':FIND_HOLE_STATE , 'dest':INSERTING_PEG_STATE }, {'trigger':ASSEMBLY_COMPLETED_TRIGGER, 'source':INSERTING_PEG_STATE , 'dest':COMPLETION_STATE }, {'trigger':SAFETY_RETRACTION_TRIGGER , 'source':'*' , 'dest':SAFETY_RETRACT_STATE, 'unless':'is_already_retracting' }, {'trigger':RESTART_TEST_TRIGGER , 'source':SAFETY_RETRACT_STATE, 'dest':CHECK_FEEDBACK_STATE }, {'trigger':RUN_LOOP_TRIGGER , 'source':'*', 'dest':None, 'after': 'run_loop'} ] self.steps:dict = { APPROACH_STATE: (findSurface, []) } self.previousState = None #Store a reference to the previous state here. # self.surface_height = 0.0 Machine.__init__(self, states=states, transitions=transitions, initial=IDLE_STATE) AssemblyTools.__init__(self, ROS_rate, start_time) # Set up Colorama for colorful terminal outputs on all platforms init(autoreset=True) # temporary selector for this algorithm's TCP; easily switch from tip to corner-centrered search self.tcp_selected = 'tip' #Store a reference to the AssemblyStep class in use by the current State if it exists: self.step:AssemblyStep = None def post_action(self, trigger_name): """Defines the next trigger which the state machine should execute. """ return [trigger_name, True] def is_already_retracting(self): return self.is_state_safety_retraction() def on_enter_state_check_load_cell_feedback(self): # self.select_tool('corner') self.reset_on_state_enter() self.select_tool(self.tcp_selected) self._log_state_transition() def on_enter_state_finding_surface(self): # self.select_tool('corner') self.reset_on_state_enter() self.select_tool(self.tcp_selected) self._log_state_transition() def on_enter_state_finding_hole(self): # self.select_tool('corner') self.reset_on_state_enter() self.select_tool(self.tcp_selected) self._log_state_transition() def on_enter_state_inserting_along_axis(self): self.reset_on_state_enter() self._log_state_transition() def on_enter_state_completed_insertion(self): self.reset_on_state_enter() self._log_state_transition() def on_enter_state_retracting_to_safety(self): self.reset_on_state_enter() self._log_state_transition() def _log_state_transition(self): rospy.loginfo(Fore.BLACK + Back.WHITE +"State transition to " + str(self.state) + " at time = " + str(rospy.get_rostime()) + Style.RESET_ALL ) def reset_on_state_enter(self): self.completion_confidence = 0 def update_commands(self): # rospy.logerr_throttle(2, "Preparing to publish pose: " + str(self.pose_vec) + " and wrench: " + str(self.wrench_vec)) # rospy.logerr_throttle(2, "Preparing to publish pose: " + str(self.pose_vec)) # rospy.logerr_throttle(2, "Current pose: " + str(self.current_pose.transform) ) self.publish_pose(self.pose_vec) self.publish_wrench(self.wrench_vec) def run_loop(self): """Runs the method with name matching the state name. Superceded by AssemblyStep class type if one exists. """ state_name=str(self.state) if("state_") in state_name: if(state_name in self.steps): #This step has been realized as a Step class if(not self.step): #Set step to an instance of the referred class and pass in the parameters. self.step = self.steps[state_name][0](self, *self.steps[state_name][1]) rospy.loginfo( Fore.GREEN + "Created step object " + str(type(self.step)) + Style.RESET_ALL ) else: self.step.execute() if (self.step.checkCompletion()): self.next_trigger, self.switch_state = self.step.onExit() else: # This step has been realized as a looping method. method_name = "self."+state_name[state_name.find('state_')+6:]+'()' try: rospy.loginfo_throttle(2, Fore.WHITE + "In state "+state_name + Style.RESET_ALL + ", now executing " + method_name) exec(method_name) except (NameError, AttributeError): rospy.logerr_throttle(2, "State name " + method_name + " does not match 'state_'+(state loop method name) in algorithm!") pass except: print("Unexpected error when trying to locate state loop name:", sys.exc_info()[0]) raise else: rospy.logerr("Invalid state name! Terminating.") quit() def algorithm_execute(self): """Main execution loop. A True exit state will cause the buffered Trigger "self.next_trigger" to be run, changing the state. If using a state realized as an AssemblyStep class, we delete the old step here. Also executes the once-per-cycle non-step commands needed for continuous safe operation. """ self.completion_confidence = 0 self.next_trigger, self.switch_state = self.post_action(CHECK_FEEDBACK_TRIGGER) rospy.loginfo(Fore.BLACK + Back.GREEN + "Beginning search algorithm. "+Style.RESET_ALL) while not rospy.is_shutdown() and self.state != EXIT_STATE: # Main program loop. # Refresh values, run process trigger (either loop-back to perform state actions or transition to new state), then output controller commands and wait for next loop time. self.all_states_calc() self.checkForceCap() if(self.switch_state): #If the command to change states has come in: self.switch_state = False if(self.step): #If a Step class has been defined for the current State, we delete it to be tidy. del self.step self.step = None else: # If we're not switching states, we use RUN_LOOP_TRIGGER to execute this state's loop code. self.next_trigger = RUN_LOOP_TRIGGER # Execute the trigger chosen self.trigger(self.next_trigger) # Publish robot motion commands only once per loop, right at the end of the loop: self.update_commands() self._rate.sleep() def arbitrary_axis_comply(self, direction_vector = [0,0,1], desired_orientation = [0, 1, 0, 0]): """Generates a command pose vector which causes the robot to hold a certain orientation and comply in one dimension while staying on track in the others. :param desiredTaskSpacePosition: (array-like) vector indicating hole position in robot frame :param direction_vector: (array-like list of bools) vector of bools or 0/1 values to indicate which axes comply and which try to stay the same as those of the target hole position. :param desired_orientation: (list of floats) quaternion parameters for orientation; currently disabled because changes in orientation are dangerous and unpredictable. Use TCPs instead. """ #initially set the new command position to be the current physical (disturbed) position #This allows movement allowed by outside/command forces to move the robot at a steady rate. pose_position = self.current_pose.transform.translation if(not direction_vector[0]): pose_position.x = self.target_hole_pose.pose.position.x if(not direction_vector[1]): pose_position.y = self.target_hole_pose.pose.position.y if(not direction_vector[2]): pose_position.z = self.target_hole_pose.pose.position.z pose_orientation = [0, 1, 0, 0] # pose_orientation = desired_orientation #Let this be handled by the TCP system, it reduces dangerous wiggling. return [[pose_position.x, pose_position.y, pose_position.z], pose_orientation] def check_load_cell_feedback(self): # self.switch_state = False #Take an average of static sensor reading to check that it's stable. if (self.curr_time_numpy > 1.5): self._bias_wrench = self._average_wrench_gripper rospy.loginfo("Measured bias wrench. Force: " + str(self.as_array(self._bias_wrench.force)) +" and Torque: " + str(self.as_array(self._bias_wrench.torque))) # acceptable = self.vectorRegionCompare_symmetrical(self.as_array(self._bias_wrench.force), np.ndarray(self.max_force_error)) acceptable = True if(acceptable): rospy.logerr("Starting linear search.") self.next_trigger, self.switch_state = self.post_action(APPROACH_SURFACE_TRIGGER) else: rospy.logerr("Starting wrench is dangerously high. Suspending. Try restarting robot if values seem wrong.") self.next_trigger, self.switch_state = self.post_action(SAFETY_RETRACTION_TRIGGER) def finding_surface(self): #seek in Z direction until we stop moving for about 1 second. # Also requires "seeking_force" to be compensated pretty exactly by a static surface. #Take an average of static sensor reading to check that it's stable. seeking_force = [0,0,-7] self.wrench_vec = self.get_command_wrench(seeking_force) self.pose_vec = self.linear_search_position([0,0,0]) #doesn't orbit, just drops straight downward rospy.logwarn_throttle(1, "Running finding_surface method according to the old traditions") # if(not self.force_cap_check(*self.cap_check_forces)): # self.next_trigger, self.switch_state = self.post_action(SAFETY_RETRACTION_TRIGGER) # rospy.logerr("Force/torque unsafe; pausing application.") # # elif( self.vectorRegionCompare_symmetrical(self.average_speed, self.speed_static) # # and self.vectorRegionCompare(self.as_array(self._average_wrench_world.force), [10,10,seeking_force*-1.5], [-10,-10,seeking_force*-.75])): # el if(self.checkIfStatic(np.array(self.speed_static)) and self.checkIfColliding(np.array(seeking_force))): self.completion_confidence = self.completion_confidence + 1/self._rate_selected rospy.logerr_throttle(1, "Monitoring for flat surface, confidence = " + str(self.completion_confidence)) #if((rospy.Time.now()-marked_time).to_sec() > .50): #if we've satisfied this condition for 1 second if(self.completion_confidence > .90): #Stopped moving vertically and in contact with something that counters push force rospy.logerr("Flat surface detected! Moving to spiral search!") #Measure flat surface height: self.surface_height = self.current_pose.transform.translation.z self.next_trigger, self.switch_state = self.post_action(FIND_HOLE_TRIGGER) self.completion_confidence = 0.01 else: self.completion_confidence = np.max( np.array([self.completion_confidence * 95/self._rate_selected, .001])) def finding_hole(self): #Spiral until we descend 1/3 the specified hole depth (provisional fraction) #This triggers the hole position estimate to be updated to limit crazy #forces and oscillations. Also reduces spiral size. seeking_force = -7.0 self.wrench_vec = self.get_command_wrench([0,0,seeking_force]) self.pose_vec = self.spiral_search_motion(self._spiral_params["frequency"], self._spiral_params["min_amplitude"], self._spiral_params["max_cycles"]) if(not self.force_cap_check(*self.cap_check_forces)): self.next_trigger, self.switch_state = self.post_action(SAFETY_RETRACTION_TRIGGER) rospy.logerr("Force/torque unsafe; pausing application.") elif( self.current_pose.transform.translation.z <= self.surface_height - .0004): #If we've descended at least 5mm below the flat surface detected, consider it a hole. self.completion_confidence = self.completion_confidence + 1/self._rate_selected rospy.logerr_throttle(1, "Monitoring for hole location, confidence = " + str(self.completion_confidence)) if(self.completion_confidence > .90): #Descended from surface detection point. Updating hole location estimate. self.x_pos_offset = self.current_pose.transform.translation.x self.y_pos_offset = self.current_pose.transform.translation.y self._amp_limit_cp = 2 * np.pi * 4 #limits to 3 spirals outward before returning to center. #TODO - Make these runtime changes pass as parameters to the "spiral_search_basic_compliance_control" function rospy.logerr_throttle(1.0, "Hole found, peg inserting...") self.next_trigger, self.switch_state = self.post_action(INSERT_PEG_TRIGGER) else: self.completion_confidence = np.max( np.array([self.completion_confidence * 95/self._rate_selected, .01])) if(self.current_pose.transform.translation.z >= self.surface_height - self.hole_depth): rospy.loginfo_throttle(1, Fore.YELLOW + "Height is still " + str(self.current_pose.transform.translation.z) + " whereas we should drop down to " + str(self.surface_height - self.hole_depth) + Style.RESET_ALL ) def inserting_along_axis(self): #Continue spiraling downward. Outward normal force is used to verify that the peg can't move #horizontally. We keep going until vertical speed is very near to zero. seeking_force = -5.0 self.wrench_vec = self.get_command_wrench([0,0,seeking_force]) self.pose_vec = self.full_compliance_position() if(not self.force_cap_check(*self.cap_check_forces)): self.next_trigger, self.switch_state = self.post_action(SAFETY_RETRACTION_TRIGGER) rospy.logerr("Force/torque unsafe; pausing application.") elif( self.vectorRegionCompare_symmetrical(self.average_speed, self.speed_static) #and not self.vectorRegionCompare(self.as_array(self._average_wrench_world.force), [6,6,80], [-6,-6,-80]) and self.vectorRegionCompare(self.as_array(self._average_wrench_world.force), [1.5,1.5,seeking_force*-1.5], [-1.5,-1.5,seeking_force*-.75]) and self.current_pose.transform.translation.z <= self.surface_height - self.hole_depth): self.completion_confidence = self.completion_confidence + 1/self._rate_selected rospy.logerr_throttle(1, "Monitoring for peg insertion, confidence = " + str(self.completion_confidence)) #if((rospy.Time.now()-marked_time).to_sec() > .50): #if we've satisfied this condition for 1 second if(self.completion_confidence > .90): #Stopped moving vertically and in contact with something that counters push force self.next_trigger, self.switch_state = self.post_action(ASSEMBLY_COMPLETED_TRIGGER) else: # self.completion_confidence = np.max( np.array([self.completion_confidence * 95/self._rate_selected, .01])) if(self.current_pose.transform.translation.z >= self.surface_height - self.hole_depth): rospy.loginfo_throttle(1, Fore.YELLOW + "Height is still " + str(self.current_pose.transform.translation.z) + " whereas we should drop down to " + str(self.surface_height - self.hole_depth) + Style.RESET_ALL) def completed_insertion(self): #Inserted properly. rospy.loginfo_throttle(1, Fore.RED + "Hole found, peg inserted! Done!" +Style.RESET_ALL) if(self.current_pose.transform.translation.z > self.restart_height+.02): #High enough, won't pull itself upward. seeking_force = 2.5 rospy.loginfo_once(Back.GREEN + Fore.WHITE + Style.BRIGHT + "Completed Task!" + Style.RESET_ALL) quit() else: #pull upward gently to move out of trouble hopefully. seeking_force = 20 self.force_cap_check(*self.cap_check_forces) self.wrench_vec = self.get_command_wrench([0,0,seeking_force]) self.pose_vec = self.full_compliance_position() def safety_retraction(self): #Safety passivation; chill and pull out. Actually restarts itself if everything's chill enough. if(self.current_pose.transform.translation.z > self.restart_height+.05): #High enough, won't pull itself upward. seeking_force = 3.5 else: #pull upward gently to move out of trouble. seeking_force = 10 self.wrench_vec = self.get_command_wrench([0,0,seeking_force]) self.pose_vec = self.full_compliance_position() rospy.loginfo_throttle(1, Fore.RED + "Task suspended for safety. Freewheeling until low forces and height reset above " + str(self.restart_height) + ': ' + str(self.current_pose.transform.translation.z) + Style.RESET_ALL) if( self.vectorRegionCompare_symmetrical(self.average_speed, [2,2,2]) and self.vectorRegionCompare_symmetrical(self.as_array(self._average_wrench_gripper.force), self.max_force_error) and self.current_pose.transform.translation.z > self.restart_height): self.completion_confidence = self.completion_confidence + 1/self._rate_selected rospy.loginfo_throttle(1, Fore.RED + "Static. Restarting confidence: " + str( np.round(self.completion_confidence, 2) ) + " out of 1." +Style.RESET_ALL) #if((rospy.Time.now()-marked_time).to_sec() > .50): #if we've satisfied this condition for 1 second if(self.completion_confidence > 1): #Restart Search rospy.loginfo_throttle(1.0, "Restarting test!") self.next_trigger, self.switch_state = self.post_action(RESTART_TEST_TRIGGER) else: self.completion_confidence = np.max( np.array([self.completion_confidence * 90/self._rate_selected, .01])) if(self.current_pose.transform.translation.z > self.restart_height): rospy.loginfo_throttle(1, Fore.RED + "That's high enough! Let robot stop and come to zero force." +Style.RESET_ALL) #All state callbacks need to calculate this in a while loop def all_states_calc(self): #All once-per-loop functions self.current_pose = self.get_current_pos() self.curr_time = rospy.get_rostime() - self._start_time self.curr_time_numpy = np.double(self.curr_time.to_sec()) marked_state = 1; #returns to this state after a soft restart in state 99 # self.wrench_vec = self.get_command_wrench([0,0,-2]) # self.pose_vec = self.full_compliance_position() self.update_avg_speed() self.update_average_wrench() # self._update_plots() # rospy.loginfo_throttle(1, Fore.BLUE + "Average wrench in newtons is force \n" + str(self._average_wrench_world.force)+ # " and torque \n" + str(self._average_wrench_world.torque) + Style.RESET_ALL) # rospy.loginfo_throttle(1, Fore.CYAN + "\nAverage speed in mm/second is \n" + str(1000*self.average_speed) +Style.RESET_ALL) self.publish_plotted_values() def checkForceCap(self): if(not self.force_cap_check(*self.cap_check_forces)): self.next_trigger, self.switch_state = self.post_action(SAFETY_RETRACTION_TRIGGER) rospy.logerr("Force/torque unsafe; pausing application.") class AssemblyStep: ''' The default AssemblyStep provides a helpful structure to impliment discrete tasks in AssemblyBlocks. The default functionality below moves the TCP in a specified direction and ends when a rigid obstacle halts its motion. In general, the pattern goes thus: ::init:: runs when the Step is created, normally right before the first loop of its associated Step. The parameters entered in the AlgorithmBlocks.steps dictionary will be sent to the init function. ::execute:: runs each time the AlgorithmBlocks instance runs its loop. The continuous behavior of the robot should be defined here. ::checkCompletion:: runs each loop cycle, being triggered by the AlgorithmBlocks loop like 'execute' is. It checks the exitConditions method (below) to evaluate conditions, and gains/loses completion_confidence. The confidence behavior makes decision-making much more consistent, largely eliminating trouble from sensor noise, transient forces, and other disruptions. It returns a Boolean value; True should indicate that the exit conditions for the Step have been satisfied consistently and reliably. This triggers AlgorithmBlocks to run the Exit method. See below. ::exitConditions:: is the boolean "check" which checkCompletion uses to build/lose confidence that it is finished. Gives an instantaneous evaluation of conditions. Prone to noise due to sensor/control/transient messiness. ::onExit:: is run by the AlgorithmBlocks execution loop right before this Step object is Deleted. It should output the switch_state boolean (normally True since the step is done) and the trigger for the next step (normally STEP_COMPLETE_TRIGGER which simply moves us to the next Step in sequence, as dictated by the AlgorithmBlocks state machine). Any other end-of-step actions, like saving information to the AlgorithmBlocks object for later steps, can be done here. ''' def __init__(self, algorithmBlocks:(AlgorithmBlocks)) -> None: #set up the parameters for this step self.completion_confidence = 0.0 self.seeking_force = [0,0,0] self.comply_axes = [1,1,1] # self.desiredOrientation = trfm.quaternion_from_euler(0,0,-90) self.desiredOrientation = trfm.quaternion_from_euler(0,0,0) self.done = False #Set up exit condition sensitivity self.exitPeriod = .5 #Seconds to stay within bounds self.exitThreshold = .99 #Percentage of time for the last period self.holdStartTime = 0; #Pass in a reference to the AlgorithmBlocks parent class; this reduces data copying in memory self.assembly = algorithmBlocks def execute(self): '''Executed once per loop while this State is active. By default, just runs UpdateCommands to keep the compliance motion profile running. ''' self.updateCommands() def updateCommands(self): '''Updates the commanded position and wrench. These are published in the AlgorithmBlocks main loop. ''' #Command wrench self.assembly.wrench_vec = self.assembly.get_command_wrench(self.seeking_force) #Command pose self.assembly.pose_vec = self.assembly.arbitrary_axis_comply(self.comply_axes) def checkCompletion(self): """Check if the step is complete. Default behavior is to check the exit conditions and gain/lose confidence between 0 and 1. ExitConditions returning True adds a step toward 1; False steps down toward 0. Once confidence is above exitThreshold, a timer begins for duration exitPeriod. """ if(self.exitConditions()): if(self.completion_confidence < 1): self.completion_confidence += 1/(self.assembly._rate_selected) if(self.completion_confidence > self.exitThreshold): if(self.holdStartTime == 0): #Start counting down to completion as long as we don't drop below threshold again: self.holdStartTime = rospy.get_time() rospy.logerr("Countdown beginning at time " + str(self.holdStartTime)) elif(self.holdStartTime < rospy.get_time() - self.exitPeriod ): #it's been long enough, exit loop rospy.logerr("Countdown ending at time " + str(rospy.get_time())) return True else: # Confidence has dropped below the threshold, cancel the countdown. self.holdStartTime = 0 else: #Exit conditions not true if(self.completion_confidence>0.0): self.completion_confidence -= 1/(self.assembly._rate_selected) return False def exitConditions(self)->bool: return self.noForce() def static(self)->bool: return self.assembly.checkIfStatic(np.array(self.assembly.speed_static)) def collision(self)->bool: return self.assembly.checkIfColliding(np.array(self.seeking_force)) def noForce(self)->bool: '''Checks the current forces against an expected force of zero, helpfully telling us if the robot is in free motion :return: (bool) whether the force is fairly close to zero. ''' return self.assembly.checkIfColliding(np.zeros(3)) def onExit(self): """Executed once, when the change-state trigger is registered. """ return STEP_COMPLETE_TRIGGER, True class findSurface(AssemblyStep): def __init__(self, algorithmBlocks:(AlgorithmBlocks)) -> None: AssemblyStep.__init__(self, algorithmBlocks) self.comply_axes = [0,0,1] self.seeking_force = [0,0,-7] def exitConditions(self)->bool: return self.static() and self.collision() def onExit(self): """Executed once, when the change-state trigger is registered. """ #Measure flat surface height and report it to AssemblyBlocks: self.assembly.surface_height = self.assembly.current_pose.transform.translation.z return super().onExit() # class findHole(AssemblyStep): # def __init__(self, algorithmBlocks:(AlgorithmBlocks)) -> None: # AssemblyStep.__init__(self, algorithmBlocks) # self.comply_axes = [0,0,1] # self.seeking_force = [0,0,-7] # def exitConditions(self)->bool: # return self.droppedDown(.0004) # def droppedDown(self, distance): # return (self.assembly.current_pose.z < self.assembly.surface_height - distance) # def onExit(self): # """Executed once, when the change-state trigger is registered. # """ # #Measure flat surface height and report it to AssemblyBlocks: # self.assembly.target_hole_pose.x = self.assembly.current_pose.transform.translation.x # self.assembly.target_hole_pose.y = self.assembly.current_pose.transform.translation.y # return super().onExit() # self.pose_vec = self.spiral_search_motion(self._spiral_params["frequency"], # self._spiral_params["min_amplitude"], self._spiral_params["max_cycles"])
53.642373
570
0.678378
28,333
0.895226
0
0
0
0
0
0
13,747
0.434358
52161f02cf931a0b27e36147101430c4098e9222
989
py
Python
2017/misc/ncme/server.py
HackGT/ctf-problems
a9608eb431af887d764f9ff2d8919e418a051c3d
[ "MIT" ]
2
2019-01-28T03:37:32.000Z
2020-06-09T15:40:14.000Z
2017/misc/ncme/server.py
HackGT/ctf-problems
a9608eb431af887d764f9ff2d8919e418a051c3d
[ "MIT" ]
null
null
null
2017/misc/ncme/server.py
HackGT/ctf-problems
a9608eb431af887d764f9ff2d8919e418a051c3d
[ "MIT" ]
null
null
null
''' Simple socket server using threads ''' import socket import sys from thread import * def clientthread(conn): conn.send('Nice job! The flag is hackgt{here_kitty_kitty}\n') conn.close() HOST = '' # Symbolic name, meaning all available interfaces PORT = 9000 # Arbitrary non-privileged port s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) print('Socket created') #Bind socket to local host and port try: s.bind((HOST, PORT)) except socket.error as msg: print('Bind failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]) sys.exit() print('Socket bind complete') #Start listening on socket s.listen(10) print('Socket now listening') #now keep talking with the client try: while 1: #wait to accept a connection - blocking call conn, addr = s.accept() print('Connected with ' + addr[0] + ':' + str(addr[1])) start_new_thread(clientthread, (conn,)) except KeyboardInterrupt: s.close()
24.121951
76
0.659252
0
0
0
0
0
0
0
0
435
0.439838
5216612398027c08af99299f71aba5216ea6eb3e
563
bzl
Python
tools/build_defs/buck_helpers.bzl
YifanShenSZ/pytorch
b4232f7cbe407909f9d95b91304c73fdc4c66a50
[ "Intel" ]
null
null
null
tools/build_defs/buck_helpers.bzl
YifanShenSZ/pytorch
b4232f7cbe407909f9d95b91304c73fdc4c66a50
[ "Intel" ]
null
null
null
tools/build_defs/buck_helpers.bzl
YifanShenSZ/pytorch
b4232f7cbe407909f9d95b91304c73fdc4c66a50
[ "Intel" ]
null
null
null
# Only used for PyTorch open source BUCK build IGNORED_ATTRIBUTE_PREFIX = [ "apple", "fbobjc", "windows", "fbandroid", "macosx", ] IGNORED_ATTRIBUTES = [ "feature", "platforms", ] def filter_attributes(kwgs): keys = list(kwgs.keys()) # drop unncessary attributes for key in keys: if key in IGNORED_ATTRIBUTES: kwgs.pop(key) else: for invalid_prefix in IGNORED_ATTRIBUTE_PREFIX: if key.startswith(invalid_prefix): kwgs.pop(key) return kwgs
20.107143
59
0.595027
0
0
0
0
0
0
0
0
137
0.243339
52169f9fb1131b55d0003765e76973d7f4ef98f8
1,901
py
Python
Chapter8/datacurator.py
PacktPublishing/Testing-Time-Machines
fa85c5cb5252bf3dc83cad5d8ef3d3d425295df1
[ "MIT" ]
1
2022-01-02T16:38:29.000Z
2022-01-02T16:38:29.000Z
Chapter8/datacurator.py
PacktPublishing/Testing-Time-Machines
fa85c5cb5252bf3dc83cad5d8ef3d3d425295df1
[ "MIT" ]
null
null
null
Chapter8/datacurator.py
PacktPublishing/Testing-Time-Machines
fa85c5cb5252bf3dc83cad5d8ef3d3d425295df1
[ "MIT" ]
1
2022-01-02T16:38:31.000Z
2022-01-02T16:38:31.000Z
import datetime import pandas as pd # Retrieve the test cases from the csv into a dictionary dataFile = open('testcases.csv', 'r') dataFrame = pd.read_csv('testcases.csv', index_col=0) dataDictionary = dataFrame.transpose().to_dict() # Create a new dictionary to save the results result = {} # Data optimization -> get the maximum number of steps and of differences oldest = 0 maxSteps = 0 for i in dataDictionary: createdDate = datetime.datetime.fromisoformat(dataDictionary[i]['CreatedDate']) currentDate = datetime.datetime.now() difference = currentDate - createdDate daysOfDifference = difference.days dataDictionary[i]['daysDiff'] = daysOfDifference if daysOfDifference > oldest: oldest = daysOfDifference if dataDictionary[i]['Steps'] > maxSteps: maxSteps = dataDictionary[i]['Steps'] # Save the data in the new dictionary for i in dataDictionary: result[i] = {} passes = dataDictionary[i]['passes'] fails = dataDictionary[i]['fails'] # Data optimization - get the percentage of passes passingTest = (passes / (fails + passes)) * 100 # Data optimization - get the percentage of test age testAge = (dataDictionary[i]['daysDiff'] / oldest) * 100 priorityTest = dataDictionary[i]['Priority'] # Data optimization - get the percentage of number steps numSteps = (dataDictionary[i]['Steps'] / maxSteps) * 100 # Save the new values result[i]['Priority'] = float(priorityTest) result[i]['Age'] = float(testAge) result[i]['Steps'] = float(numSteps) result[i]['Passing'] = float(passingTest) result[i]['Expected'] = float(dataDictionary[i]['LastResult']) # Save to the new Data File dataFrame = pd.DataFrame.from_dict(result, orient='index') dataFile = open('newData.csv', 'w') dataFrame.to_csv(dataFile, sep=',') dataFile.close()
41.326087
83
0.680168
0
0
0
0
0
0
0
0
608
0.319832
5217f39046ec32e0713bd16621436917f0adb823
9,403
py
Python
src/rumour_dnn_evaluator.py
jerrygaoLondon/RPDNN
ace69bda190f74a1a66f8bf06b7d86b4bd5579e7
[ "MIT" ]
17
2020-03-15T12:06:25.000Z
2021-12-16T15:34:50.000Z
src/rumour_dnn_evaluator.py
skykisl/jerrygaoLondon0
6af65e0b1e2c2bbb1fd67b5f9fa26e4f447288ba
[ "MIT" ]
1
2021-08-13T10:22:20.000Z
2021-08-18T23:53:11.000Z
src/rumour_dnn_evaluator.py
skykisl/jerrygaoLondon0
6af65e0b1e2c2bbb1fd67b5f9fa26e4f447288ba
[ "MIT" ]
12
2020-04-01T16:20:06.000Z
2021-11-24T09:52:56.000Z
import os from datetime import timedelta from allennlp.data.iterators import BucketIterator from allennlp.data.token_indexers import ELMoTokenCharactersIndexer from allennlp_rumor_classifier import load_classifier_from_archive, RumorTweetsDataReader from allennlp_rumor_classifier import timestamped_print if __name__ == '__main__': import optparse parser = optparse.OptionParser() parser.add_option('-m', '--model', dest="model", help="model directory to be evaluated", default=None) parser.add_option('-t', '--testset', dest="testset", help="test set csv file path", default=None) parser.add_option('-g', '--n_gpu', dest="n_gpu", help="gpu device(s) to use (-1: no gpu, 0: 1 gpu)", default=-1) parser.add_option('-f', '--feature_setting', dest="feature_setting", help="expriment training setting for 1) source tweet content only (no context); " " 2) context metadata (NF) only (no content); 3) context content only (no source content and no NF) ; " "4) context (CM+CC) only (no source content); 5) Full_Model without CM; 6) Full_Model without CC;", default=-1) parser.add_option('-w', '--temporal_window', dest="event_temporal_window_mins", help="varying time window in minutes, accepting str for a list of number in minutes " "(e.g.,'15, 30, 45, 60, 90, 120, 180, 240, 360, 480, 600, 720, 840, " "960,1080,1200,1440,1680,1920,2160,2400,2640,2880') .", default=None) # retweet is disabled # Our preliminary results shows that retweets metadata are very noisy. # Simply adding retweets into context cause underfitting and poor performance. parser.add_option("--disable_context_type", dest="disable_context_type_option", help="disable social context option: 0: accept all types of context; " "1: disable reply; 2: disable retweet (default); ", default=2) # We only experimented the first two options 0) and 1). parser.add_option('-a', '--attention', dest="attention_option", help="select available attention options: " "0) no attention (use final state of LSTM); " "1) hierarchical attention (default); " "2) self_attention_net", default=1) parser.add_option("--max_cxt_size", dest="max_cxt_size_option", help="maximum social context size (default 200)", default=200) options, args = parser.parse_args() rumour_dnn_model_dir = options.model test_set_csv_path = options.testset no_gpu = int(options.n_gpu) feature_setting_option = int(options.feature_setting) event_temporal_filter_hr = options.event_temporal_window_mins disable_context_type_option = int(options.disable_context_type_option) attention_option = int(options.attention_option) max_cxt_size_option = int(options.max_cxt_size_option) print("================= model settings for prediction ========================") print("rumour_dnn_model_dir: ", rumour_dnn_model_dir) print("test_set_csv_path: ", test_set_csv_path) print("no_gpu: ", no_gpu) print("training (feature_setting option) setting (1: source content only; 2: context metadata only; " "3: context content only; 4: context (CC + CM) only); 5: Full_Model without CM; 6: Full_Model without CC; : ", feature_setting_option) print("event_temporal_filter_hr: ", event_temporal_filter_hr) print("disable social context type (0: accept all types of context; 1: disable reply; " "2: disable retweet (default)): ", disable_context_type_option) print("attention_option ( 0) no attention (use final state of LSTM); 1) hierarchical attention (default); ) ", attention_option) print("max_cxt_size: ", max_cxt_size_option) print("============================================================") if feature_setting_option not in [-1, 1, 2, 3, 4, 5, 6]: raise ValueError( "Supported training (feature_setting option) setting (1: source content only; 2: context metadata only; " "3: context content only; 4: context (CC + CM) only); 5: Full_Model without CM; 6: Full_Model without CC. " "However, we got [%s]" % feature_setting_option) event_varying_time_window_mins = [] if event_temporal_filter_hr: event_varying_time_window_mins = event_temporal_filter_hr.split(",") print("event varying time window in hours: ", event_varying_time_window_mins) event_varying_timedeltas = [] if len(event_varying_time_window_mins) > 0: for event_varying_time_window_mins_i in event_varying_time_window_mins: event_varying_timedeltas.append(timedelta(minutes=int(event_varying_time_window_mins_i))) print("Experimental context varying window: ") print(event_varying_timedeltas) print("evaluating model [%s] on test set [%s]" % (rumour_dnn_model_dir, test_set_csv_path)) from allennlp_rumor_classifier import config_gpu_use config_gpu_use(no_gpu) model_weight_file = os.path.join(rumour_dnn_model_dir, "weights_best.th") vocab_dir = os.path.join(rumour_dnn_model_dir, "vocabulary") import numpy as np numerical_feature_global_means = np.array( [33482.2189387, 113.50235784, 16110.72212533, 1480.63278539, 1984.97753317, 0.44584125, 13076.6074171, 1144.7288986, 0.01814169, 67.85327272, 4.6219718, 40.2049446, 0.46842004, 11.86826366, 11.45688415, 0.02726267, 2.30499867, 1.51966541, 0.14687646, 0.0030709, 0.88728322, 0.10091743, 0.10361821, 0.07962697, 12.33783996, 2485.25345758, 1., 0.82859045]) numerical_feature_global_std = np.array([96641.67591616, 2193.38510189, 577886.49773141, 7192.66662092, 134946.70443628, 0.22736203, 37326.88297683, 741.21114409, 0.13346375, 632.37911747, 47.66767052, 612.61064067, 0.49900171, 9.09843269, 5.25300108, 0.16284784, 127.00542757, 58.64079579, 0.35398272, 0.05533058, 0.31624628, 0.30121936, 0.3149778, 0.27071482, 6.86573998, 45822.31909476, 0., 0.37686645]) model, rumor_dnn_predictor = load_classifier_from_archive(vocab_dir_path=vocab_dir, model_weight_file=model_weight_file, n_gpu_use=no_gpu, max_cxt_size=max_cxt_size_option, feature_setting=feature_setting_option, global_means=numerical_feature_global_means, global_stds=numerical_feature_global_std, attention_option=attention_option) elmo_token_indexer = ELMoTokenCharactersIndexer() rumor_train_set_reader = RumorTweetsDataReader(token_indexers={'elmo': elmo_token_indexer}) test_instances = rumor_train_set_reader.read(test_set_csv_path) from training_util import evaluate data_iterator = BucketIterator(batch_size=128, sorting_keys=[("sentence", "num_tokens")]) data_iterator.index_with(model.vocab) # =============== apply settings to trained model =========== model.feature_setting = feature_setting_option model.set_disable_cxt_type_option(disable_context_type_option) # cannot reset attention mechanism and cannot reset maximum context size in evaluation # model.set_attention_mechanism(attention_option) # model.set_max_cxt_size(max_cxt_size_option) print("maximum cxt size: ", model.max_cxt_size) print("model architecture: ") print(model) print("==============================") all_metrics = [] if len(event_varying_timedeltas) == 0: metrics = evaluate(model, test_instances, data_iterator, no_gpu, "") timestamped_print("Evaluation results :") for key, metric in metrics.items(): print("%s: %s" % (key, metric)) else: for event_varying_timedelta in event_varying_timedeltas: timestamped_print("evaluate model with time window [%s]" % (str(event_varying_timedelta))) model.event_varying_timedelta = event_varying_timedelta metrics = evaluate(model, test_instances, data_iterator, no_gpu, "") timestamped_print("Evaluation results at [%s] :" % str(event_varying_timedelta)) for key, metric in metrics.items(): print("%s: %s" % (key, metric)) all_metrics.append(metrics) print(all_metrics) print("completed")
52.530726
130
0.610443
0
0
0
0
0
0
0
0
2,906
0.30905
5217f9c7a2fa74aac7a50560f523fcdec28bd266
511
py
Python
1528_shuffle_string.py
kannan5/LeetCode
2523f5aed7fd8f087004d711597a966088c12db7
[ "MIT" ]
null
null
null
1528_shuffle_string.py
kannan5/LeetCode
2523f5aed7fd8f087004d711597a966088c12db7
[ "MIT" ]
null
null
null
1528_shuffle_string.py
kannan5/LeetCode
2523f5aed7fd8f087004d711597a966088c12db7
[ "MIT" ]
null
null
null
import math class Solution: def restoreString(self, s, indices): str_list, mov_index = list(s), 0 traverse = len(indices) for x in range(0, traverse): mov_index = indices[x] if mov_index == x: continue str_list[mov_index] = s[x] return "".join(str_list) if __name__ == '__main__': a = Solution() print(a.restoreString("codeleet", [4, 5, 6, 7, 0, 2, 1, 3])) print(a.restoreString("aiohn", [3, 1, 4, 2, 0]))
24.333333
64
0.542074
330
0.645793
0
0
0
0
0
0
29
0.056751
5219024973c615e90068876e93ddceda320d7c3a
2,218
py
Python
python2.7/site-packages/cpyrit/config.py
84KaliPleXon3/sslstrip-hsts-openwrt
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
[ "MIT" ]
4
2020-10-31T19:52:05.000Z
2021-09-22T11:39:27.000Z
python2.7/site-packages/cpyrit/config.py
84KaliPleXon3/sslstrip-hsts-openwrt
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
[ "MIT" ]
null
null
null
python2.7/site-packages/cpyrit/config.py
84KaliPleXon3/sslstrip-hsts-openwrt
f875ded48078a3ed84bffef1e69dcbeaf2e77ae3
[ "MIT" ]
2
2020-02-27T08:28:35.000Z
2020-09-13T12:39:26.000Z
# -*- coding: UTF-8 -*- # # Copyright 2008-2011, Lukas Lueg, lukas.lueg@gmail.com # # This file is part of Pyrit. # # Pyrit is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Pyrit is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Pyrit. If not, see <http://www.gnu.org/licenses/>. from __future__ import with_statement import os import sys def default_config(): config = {'default_storage': 'file://', \ 'rpc_server': 'false', \ 'rpc_announce': 'true', \ 'rpc_announce_broadcast': 'false', \ 'rpc_knownclients': '', \ 'workunit_size': '75000', \ 'limit_ncpus': 0} return config def read_configfile(filename): config = default_config() with open(filename, 'rb') as f: for line in f: if line.startswith('#') or '=' not in line: continue option, value = map(str.strip, line.split('=', 1)) if option in config: config[option] = value else: print >> sys.stderr, "WARNING: Unknown option '%s' " \ "in configfile '%s'" % (option, filename) return config def write_configfile(config, filename): with open(filename, 'wb') as f: for option, value in sorted(config.items()): f.write("%s = %s\n" % (option, value)) configpath = os.path.expanduser(os.path.join('~', '.pyrit')) default_configfile = os.path.join(configpath, 'config') if os.path.exists(default_configfile): cfg = read_configfile(default_configfile) else: cfg = default_config() if not os.path.exists(configpath): os.makedirs(configpath) write_configfile(cfg, default_configfile)
32.617647
77
0.618124
0
0
0
0
0
0
0
0
996
0.449053
52192abd9407b91e90fb61d5319cec65580111e5
34
py
Python
exercises/spiral-matrix/spiral_matrix.py
kishankj/python
82042de746128127502e109111e6c4e8ab002af6
[ "MIT" ]
1,177
2017-06-21T20:24:06.000Z
2022-03-29T02:30:55.000Z
exercises/spiral-matrix/spiral_matrix.py
kishankj/python
82042de746128127502e109111e6c4e8ab002af6
[ "MIT" ]
1,890
2017-06-18T20:06:10.000Z
2022-03-31T18:35:51.000Z
exercises/spiral-matrix/spiral_matrix.py
kishankj/python
82042de746128127502e109111e6c4e8ab002af6
[ "MIT" ]
1,095
2017-06-26T23:06:19.000Z
2022-03-29T03:25:38.000Z
def spiral_matrix(size): pass
11.333333
24
0.705882
0
0
0
0
0
0
0
0
0
0
521945c5bd364091ce442a30df0c8df3c6864883
12,982
py
Python
run_project.py
suryamp97/CSE_535_Project2_Boolean_Query_and_Inverted_Index
9986805c61a7f70fceca554a73bbbc7306b47be6
[ "MIT" ]
1
2021-10-14T23:43:34.000Z
2021-10-14T23:43:34.000Z
run_project.py
suryamp97/CSE_535_Project2_Boolean_Query_and_Inverted_Index
9986805c61a7f70fceca554a73bbbc7306b47be6
[ "MIT" ]
null
null
null
run_project.py
suryamp97/CSE_535_Project2_Boolean_Query_and_Inverted_Index
9986805c61a7f70fceca554a73bbbc7306b47be6
[ "MIT" ]
null
null
null
''' @author: Sougata Saha Institute: University at Buffalo ''' from tqdm import tqdm from preprocessor import Preprocessor from indexer import Indexer from collections import OrderedDict from linkedlist import LinkedList import inspect as inspector import sys import argparse import json import time import random import flask from flask import Flask from flask import request import hashlib import copy app = Flask(__name__) class ProjectRunner: def __init__(self): self.preprocessor = Preprocessor() self.indexer = Indexer() def _merge(self, plist1, plist2, skip): """ Implement the merge algorithm to merge 2 postings list at a time. Use appropriate parameters & return types. While merging 2 postings list, preserve the maximum tf-idf value of a document. To be implemented.""" m_l = LinkedList() comparisons = 0 pl1 = plist1 pl2 = plist2 p1 = pl1.start_node p2 = pl2.start_node if not skip: if pl1 is not None and pl2 is not None: while p1 and p2: if p1.value == p2.value: idf_ = max(p1.tf_idf, p2.tf_idf) m_l.insert_at_end(idf_,p1.value) p1 = p1.next p2 = p2.next elif p1.value < p2.value: p1 = p1.next else: p2 = p2.next comparisons += 1 #print("nonskipcomp: ",comparisons) else: if pl1 is not None and pl2 is not None: while p1 and p2: comparisons += 1 if p1.value == p2.value: idf_ = max(p1.tf_idf, p2.tf_idf) m_l.insert_at_end(idf_,p1.value) p1 = p1.next p2 = p2.next elif p1.value < p2.value: if p1.skip and (p1.skip.value <= p2.value): while p1.skip and (p1.skip.value <= p2.value): p1 = p1.skip else: p1 = p1.next elif p2.skip and (p2.skip.value <= p1.value): while p2.skip and (p2.skip.value <= p1.value): p2 = p2.skip else: p2 = p2.next #print("skipcomp: ",comparisons) m_l.add_skip_connections() # if not(skip or toSort): # print("comp",comparisons) # print("p1 len: ",len(plist1.traverse_list()),"p2 len: ",len(plist2.traverse_list())) return m_l, comparisons def _daat_and(self, qlist, skip, toSort): m_l = None q_dict={} query_list = [] for q in qlist: q_dict[q]= self.indexer.inverted_index[q].length for k,v in sorted(q_dict.items(), key=lambda item: item[1]): query_list.append(k) n_t = len(query_list) # if not(skip or toSort): # print("ql",qlist) # print("qdict",q_dict) # print("final q list",query_list , n_t) tot_comp = 0 if n_t==1: p_l = self._get_postings(query_list[0]) return p_l else: for i in range(1, n_t): if m_l: m_l, comparisons = self._merge(m_l, self.indexer.inverted_index[query_list[i]], skip) tot_comp += comparisons else: m_l, comparisons = self._merge(self.indexer.inverted_index[query_list[i-1]],self.indexer.inverted_index[query_list[i]], skip) tot_comp += comparisons temp_dict = {} tt = m_l.start_node while tt: temp_dict[tt.value] = tt.tf_idf tt=tt.next if toSort: m_res = [] for k,v in sorted(temp_dict.items(), key=lambda item: item[1], reverse=True): m_res.append(k) return m_res,tot_comp return m_l.traverse_list(), tot_comp def _get_postings(self,term_, toSkip): """ Function to get the postings list of a term from the index. Use appropriate parameters & return types. To be implemented.""" postings_list = [] if term_ in self.indexer.inverted_index: if not toSkip: postings_list=self.indexer.inverted_index[term_].traverse_list() else: postings_list=self.indexer.inverted_index[term_].traverse_skips() return postings_list def _output_formatter(self, op): """ This formats the result in the required format. Do NOT change.""" if op is None or len(op) == 0: return [], 0 op_no_score = [int(i) for i in op] results_cnt = len(op_no_score) return op_no_score, results_cnt def run_indexer(self, corpus): """ This function reads & indexes the corpus. After creating the inverted index, it sorts the index by the terms, add skip pointers, and calculates the tf-idf scores. Already implemented, but you can modify the orchestration, as you seem fit.""" with open(corpus, 'r') as fp: for line in tqdm(fp.readlines()): doc_id, document = self.preprocessor.get_doc_id(line) tokenized_document = self.preprocessor.tokenizer(document) self.indexer.generate_inverted_index(doc_id, tokenized_document) self.indexer.sort_terms() print("inverted indices: ") self.indexer.add_skip_connections() self.indexer.calculate_tf_idf() def sanity_checker(self, command): """ DO NOT MODIFY THIS. THIS IS USED BY THE GRADER. """ index = self.indexer.get_index() kw = random.choice(list(index.keys())) return {"index_type": str(type(index)), "indexer_type": str(type(self.indexer)), "post_mem": str(index[kw]), "post_type": str(type(index[kw])), "node_mem": str(index[kw].start_node), "node_type": str(type(index[kw].start_node)), "node_value": str(index[kw].start_node.value), "command_result": eval(command) if "." in command else ""} def run_queries(self, query_list, random_command): """ DO NOT CHANGE THE output_dict definition""" output_dict = {'postingsList': {}, 'postingsListSkip': {}, 'daatAnd': {}, 'daatAndSkip': {}, 'daatAndTfIdf': {}, 'daatAndSkipTfIdf': {}, 'sanity': self.sanity_checker(random_command)} for query in tqdm(query_list): """ Run each query against the index. You should do the following for each query: 1. Pre-process & tokenize the query. 2. For each query token, get the postings list & postings list with skip pointers. 3. Get the DAAT AND query results & number of comparisons with & without skip pointers. 4. Get the DAAT AND query results & number of comparisons with & without skip pointers, along with sorting by tf-idf scores.""" input_term_arr = self.preprocessor.tokenizer(query) # Tokenized query. To be implemented. #print(input_term_arr) for term in input_term_arr: postings, skip_postings = None, None """ Implement logic to populate initialize the above variables. The below code formats your result to the required format. To be implemented.""" postings = self._get_postings(term, False) skip_postings = self._get_postings(term, True) output_dict['postingsList'][term] = postings output_dict['postingsListSkip'][term] = skip_postings and_op_no_skip, and_comparisons_no_skip = self._daat_and(input_term_arr, False, False) and_op_skip,and_comparisons_skip = self._daat_and(input_term_arr, True, False) and_op_no_skip_sorted, and_comparisons_no_skip_sorted = self._daat_and(input_term_arr, False, True) and_op_skip_sorted, and_comparisons_skip_sorted = self._daat_and(input_term_arr, True, True) """ Implement logic to populate initialize the above variables. The below code formats your result to the required format. To be implemented.""" and_op_no_score_no_skip, and_results_cnt_no_skip = self._output_formatter(and_op_no_skip) and_op_no_score_skip, and_results_cnt_skip = self._output_formatter(and_op_skip) and_op_no_score_no_skip_sorted, and_results_cnt_no_skip_sorted = self._output_formatter(and_op_no_skip_sorted) and_op_no_score_skip_sorted, and_results_cnt_skip_sorted = self._output_formatter(and_op_skip_sorted) output_dict['daatAnd'][query.strip()] = {} output_dict['daatAnd'][query.strip()]['results'] = and_op_no_score_no_skip output_dict['daatAnd'][query.strip()]['num_docs'] = and_results_cnt_no_skip output_dict['daatAnd'][query.strip()]['num_comparisons'] = and_comparisons_no_skip output_dict['daatAndSkip'][query.strip()] = {} output_dict['daatAndSkip'][query.strip()]['results'] = and_op_no_score_skip output_dict['daatAndSkip'][query.strip()]['num_docs'] = and_results_cnt_skip output_dict['daatAndSkip'][query.strip()]['num_comparisons'] = and_comparisons_skip output_dict['daatAndTfIdf'][query.strip()] = {} output_dict['daatAndTfIdf'][query.strip()]['results'] = and_op_no_score_no_skip_sorted output_dict['daatAndTfIdf'][query.strip()]['num_docs'] = and_results_cnt_no_skip_sorted output_dict['daatAndTfIdf'][query.strip()]['num_comparisons'] = and_comparisons_no_skip_sorted output_dict['daatAndSkipTfIdf'][query.strip()] = {} output_dict['daatAndSkipTfIdf'][query.strip()]['results'] = and_op_no_score_skip_sorted output_dict['daatAndSkipTfIdf'][query.strip()]['num_docs'] = and_results_cnt_skip_sorted output_dict['daatAndSkipTfIdf'][query.strip()]['num_comparisons'] = and_comparisons_skip_sorted return output_dict @app.route("/execute_query", methods=['POST']) def execute_query(): """ This function handles the POST request to your endpoint. Do NOT change it.""" start_time = time.time() queries = request.json["queries"] random_command = request.json["random_command"] """ Running the queries against the pre-loaded index. """ output_dict = runner.run_queries(queries, random_command) """ Dumping the results to a JSON file. """ with open(output_location, 'w') as fp: json.dump(output_dict, fp) response = { "Response": output_dict, "time_taken": str(time.time() - start_time), "username_hash": username_hash } return flask.jsonify(response) if __name__ == "__main__": """ Driver code for the project, which defines the global variables. Do NOT change it.""" output_location = "project2_output.json" parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("--corpus", type=str, help="Corpus File name, with path.") parser.add_argument("--queries", type=str, help="Queries File name, with path.") parser.add_argument("--output_location", type=str, help="Output file name.", default=output_location) parser.add_argument("--username", type=str, help="Your UB username. It's the part of your UB email id before the @buffalo.edu. " "DO NOT pass incorrect value here") argv = parser.parse_args() corpus = argv.corpus queries = argv.queries output_location = argv.output_location username_hash = hashlib.md5(argv.username.encode()).hexdigest() """ Initialize the project runner""" runner = ProjectRunner() """ Index the documents from beforehand. When the API endpoint is hit, queries are run against this pre-loaded in memory index. """ runner.run_indexer(corpus) # with open(queries, 'r') as q: # querylist= q.readlines() # output_dict = runner.run_queries(querylist, "[0]") # with open(output_location, 'w') as fp: # json.dump(output_dict, fp) app.run(host="0.0.0.0", port=9999)
40.952681
145
0.58558
10,358
0.797874
0
0
717
0.05523
0
0
3,875
0.29849
521955dfd92712ad98f507c7f4f176ac63438b6c
16,234
py
Python
app/home/views.py
1IllI1/BBS_MIGRATE
b99d3521529ac6b5c039c8d5aa7cb5782ae899a9
[ "MIT" ]
null
null
null
app/home/views.py
1IllI1/BBS_MIGRATE
b99d3521529ac6b5c039c8d5aa7cb5782ae899a9
[ "MIT" ]
null
null
null
app/home/views.py
1IllI1/BBS_MIGRATE
b99d3521529ac6b5c039c8d5aa7cb5782ae899a9
[ "MIT" ]
null
null
null
# coding:utf8 # 调用蓝图 from . import home from flask import render_template, redirect, url_for, flash, session, request,current_app from app.home.forms import RegistForm, LoginForm, UserdetailForm, PwdForm, CommentForm, PostForm from app.models import User, UserLoginLog, Comment, Post,Col from werkzeug.security import generate_password_hash from werkzeug.utils import secure_filename import uuid from app import db from app.home.email import send_mail from functools import wraps import time import os # 定义用户登录判断装饰器 def user_login_req(func): @wraps(func) def decorated_function(*args, **kwargs): # session不存在时请求登录 if "user" not in session: return redirect(url_for("home.user_login", next=request.url)) return func(*args, **kwargs) return decorated_function # html测试路由 @home.route('/usefortest/') def ust(): return render_template('home/USERFORTEST.html') # 首页路由 @home.route('/') def index(): # posts = Post.query.all() current_user_id = 0 current_user_name ="" if "user" in session: current_user_name = session["user"] user = User.query.filter_by(name=current_user_name).first() current_user_id = user.id page_index = request.args.get('page', 1, type=int) query = Post.query.join(User).filter(User.id == Post.user_id).order_by(Post.addtime.desc()) pagination = query.paginate(page_index, per_page=10, error_out=False) posts = pagination.items return render_template('home/index.html', posts=posts, pagination=pagination,current_user_name=current_user_name,current_user_id=current_user_id) #首页删除个人发布的内容 @home.route("/index/del/") @user_login_req def index_del(): #获取当前登录用户id current_user_name = session["user"] user = User.query.filter_by(name=current_user_name).first() current_user_id = user.id index_id = request.args.get("id", '0') post = Post.query.get_or_404(int(index_id)) if post.user_id != current_user_id: flash("删除不合法") return redirect(url_for("home.index")) db.session.delete(post) db.session.commit() flash("删除成功") return redirect(url_for("home.index")) #设置帖子关注 @home.route("/index/col/") @user_login_req def index_col(): #获取当前登录用户id current_user_name = session["user"] user = User.query.filter_by(name=current_user_name).first() current_user_id = user.id index_id = request.args.get("id", '0') col_check = Col.query.filter_by(id=index_id).count() if col_check == 0: col=Col( post_id=index_id, user_id=current_user_id ) db.session.add(col) db.session.commit() flash("收藏成功","ok") flash("收藏已存在","err") return redirect(url_for("home.index")) #设置评论关注 @home.route("/play/col/") @user_login_req def play_col(): #获取当前登录用户id current_user_name = session["user"] user = User.query.filter_by(name=current_user_name).first() current_user_id = user.id index_id = request.args.get("id", '0') col_check = Col.query.filter_by(id=index_id).count() if col_check == 0: col=Col( comment_id=index_id, user_id=current_user_id ) db.session.add(col) db.session.commit() flash("收藏成功","ok") flash("收藏已存在","err") return redirect(url_for("home.index")) # # from io import BytesIO # from . import verify_code # @home.route('/code') # def get_code(): # image, code = verify_code.get_verify_code() # # 图片以二进制形式写入 # buf = BytesIO() # image.save(buf, 'jpeg') # buf_str = buf.getvalue() # # 把buf_str作为response返回前端,并设置首部字段 # response = verify_code.make_response(buf_str) # response.headers['Content-Type'] = 'image/gif' # # 将验证码字符串储存在session中 # session['image'] = code # return response @home.route('/activate/<token>') def activate(token): #验证token 提取id if User.check_active_token(token): flash("账户已经激活") return redirect(url_for("home.user_login")) else: flash("激活失败") return redirect(url_for("home.index")) # 登录路由 @home.route("/login/", methods=["POST", "GET"]) def user_login(): form = LoginForm() if form.validate_on_submit(): data = form.data user = User.query.filter_by(name=data["name"]).first() print("登录按钮被点击") # if session.get('image').lower() != form.verify_code.data.lower(): # flash('验证码错误') # return render_template('home/user_login.html', form=form) print("用户激活状态"+str(user.activate)) if user.activate: if not user.check_pwd(data["pwd"]): flash("用户名或密码错误!") return redirect(url_for("home.user_login")) session["user"] = data["name"] #session["user_id"] = user.id userloginlog = UserLoginLog( user_id=user.id, ip=request.remote_addr, addtime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) ) db.session.add(userloginlog) db.session.commit() return redirect(request.args.get('next') or url_for("home.index")) else: flash("用户尚未激活,请激活以后再登录") return render_template('home/user_login.html', form=form) # 登出路由 @home.route("/logout/") @user_login_req def logout(): session.pop("user") return redirect(url_for("home.user_login")) # 会员注册 @home.route("/register/", methods=['GET', "POST"]) def register(): form = RegistForm() if form.validate_on_submit(): data = form.data user = User( name=data["name"], email=data["email"], phone=data["phone"], pwd=generate_password_hash(data["pwd"]), uuid=uuid.uuid4().hex, addtime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) ) print(user) check = User.query.filter_by(name=data["name"]).count() if check == 0: db.session.add(user) db.session.commit() print("用户数据提交到数据库") token = user.generate_active_token() # 发送用户账户激活的邮件 send_mail(user.email, '激活您的账户', 'email/activate', username=user.name, token=token) # 弹出消息 提示用户 flash("注册成功,请点击邮件中的链接完成激活",'ok') return redirect(url_for("home.user_login")) flash("用户名已存在","err") return render_template('home/register.html', form=form) # 修改文件名称 def change_filename(filename): fileinfo = os.path.splitext(filename) # 对名字进行前后缀分离 #注意此处datetime.now() filename = time.strftime("%Y%m%d%H%M%S") + "_" + fileinfo[-1] # 生成新文件名 return filename # 用户中心 @home.route("/user/", methods=["GET", "POST"]) @user_login_req def user(): form = UserdetailForm() user = User.query.filter_by(name=(session["user"])).first() if user.face is not None: form.face.validators = [] if request.method == "GET": form.name.data = user.name form.email.data = user.email form.phone.data = user.phone form.info.data = user.info if form.validate_on_submit(): print('button pressed') data = form.data # if data["name"] != user.name and name_count == 1: # flash("用户名已被占用") # return redirect(url_for("home.user")) if request.method == 'POST': if request.files['imageup']: file = request.files['imageup'] print("获取文件成功") filename = secure_filename(str(hash(file.filename)))+str(user.id)+".jpg" print("secure成功"+filename) del_face = user.face file.save(os.path.join(current_app.config['UP_DIR']+os.sep+"users",filename)) print("上传成功" + filename) #os.remove(os.path.join(app.config['UP_DIR'] + os.sep+"users", del_face)) print("删除文件"+del_face+"成功") user.face = filename user.name=data["name"] user.email=data["email"] user.phone=data["phone"] user.info=data["info"] db.session.add(user) db.session.commit() flash("修改成功!") return redirect(url_for("home.user")) flash("失败") return render_template('home/user.html', form=form, user=user) @home.route("/pwd/", methods=["GET", "POST"]) @user_login_req def pwd(): form = PwdForm() if form.validate_on_submit(): data = form.data user = User.query.filter_by(name=session["user"]).first() user.pwd = generate_password_hash(data["new_pwd"]) db.session.add(user) db.session.commit() flash("修改密码成功,请重新登录!", "ok") return redirect(url_for("home.logout")) return render_template('home/pwd.html', form=form) # 会员中心评论列表 评论功能在paly路由中 @home.route("/comments/") @user_login_req def comments(): user_name = session["user"] user = User.query.filter_by(name=user_name).first() page = request.args.get('page', 1, type=int) # query = Comment.query.order_by(Comment.addtime.desc()) query = Comment.query.filter(Comment.user_id == user.id).order_by(Comment.addtime.desc()) pagination = query.paginate(page, per_page=10, error_out=False) comments = pagination.items return render_template('home/comments.html', user=user,user_name=user_name, comments=comments,pagination=pagination) @home.route("/comments/del/") @user_login_req def comment_del(): comment_id = request.args.get("id", '') comment = Comment.query.get_or_404(int(comment_id)) db.session.delete(comment) db.session.commit() flash("评论删除成功") return redirect(url_for("home.comments")) @home.route("/postrecords/") @user_login_req def postrecords(): user_name = session["user"] user = User.query.filter_by(name=user_name).first() user_id = user.id user = User.query.filter_by(id=user_id).first() page = request.args.get('page', 1, type=int) # query = Comment.query.order_by(Comment.addtime.desc()) query = Post.query.filter(Post.user_id == user_id).order_by(Post.addtime.desc()) pagination = query.paginate(page, per_page=5, error_out=False) posts = pagination.items return render_template('home/post_records.html', user=user,user_name=user_name, posts=posts, pagination=pagination) @home.route("/postrecords/del/") @user_login_req def post_del(): post_id = request.args.get("id", '') post = Post.query.get_or_404(int(post_id)) comment = Comment.query.filter_by(post_id=post_id).all() db.session.delete(post) db.session.commit() db.session.delete(comment) db.session.commit() flash("主题帖删除成功") return redirect(url_for("home.postrecords")) @home.route("/loginlog/", methods=["POST", "GET"]) @user_login_req def loginlog(): current_user_name = session["user"] user = User.query.filter_by(name=current_user_name).first() user_login_log = UserLoginLog.query.filter_by( user_id=user.id ).order_by( UserLoginLog.addtime.desc() # 此处限制了查寻到的登录日志为前15条 ).limit(15).all() return render_template("home/loginlog.html", user_login_log=user_login_log) @home.route("/col/del/") @user_login_req def col_del(): current_user_name = session["user"] user= User.query.filter_by(name=current_user_name).first() current_user_id = user.id col_id = request.args.get("id", '') col = Col.query.get_or_404(int(col_id)) if col.user_id != current_user_id: flash("收藏删除不合法") return redirect(url_for("home.col")) db.session.delete(col) db.session.commit() flash("收藏删除成功") return redirect(url_for("home.col")) ##会员中心收藏列表 @home.route("/col/") @user_login_req def col(): current_user_name = session["user"] user = User.query.filter_by(name=current_user_name).first() user_id = user.id # 获取当前分页页面编号(编号,默认值,类型) page = request.args.get('page', 1, type=int) # 从数据库中查找对应用户的收藏 #query =Col.query.filter_by(user_id =user_id).order_by(Col.addtime.desc()) query = Col.query.join(Post).join(User).filter(Col.user_id==user_id,Col.post_id == Col.post_id).order_by(Col.addtime.desc()) # 对当前贴的评论进行分页(分页号,每页展示的数量,error) pagination = query.paginate(page, per_page=5, error_out=False) # 获得分页后当前页显示的评论 cols = pagination.items # 渲染主题帖展示页面 print(query) return render_template('home/col.html',cols=cols,pagination=pagination) @home.route("/index/") def reindex(): # z此处index重复 return redirect(url_for("home.index")) @home.route('/animation/') def animation(): data = {'sgd.jpg', 'sutstudent.jpg', 'sutsight01.jpg', 'sutsight02.jpg', 'hxxy.jpg'} return render_template('home/animation.html', data=data) @home.route('/search/') def search(): current_user_id = 0 current_user_name = "" if "user" in session: current_user_name = session["user"] user = User.query.filter_by(name=current_user_name).first() current_user_id = user.id # 获取查询的内容 # search=request.args.get("search",'',type=str) search = request.args.get("search", "搜索结果为空") # print("搜索的的内容"+search) # 获取当前分页页面编号(编号,默认值,类型) page = request.args.get('page', 1, type=int) # 从数据库中查找对应当前主题贴的评论 query = Post.query.filter(Post.title.ilike('%' + search + '%')).order_by(Post.addtime.desc()) # 对当前主题帖的评论数量进行统计 post_count = Post.query.filter(Post.title.ilike('%' + search + '%')).count() # 对当前贴的评论进行分页(分页号,每页展示的数量,error) pagination = query.paginate(page, per_page=5, error_out=False) # 获得分页后当前页显示的评论 comments = pagination.items # 渲染主题帖展示页面 return render_template("home/search.html", search=search, count=post_count, current_user_name=current_user_name,pagination=pagination, results=comments,current_user_id=current_user_id) # 主题帖详情页 @home.route('/play/', methods=["GET", "POST"]) def play(): # 从请求参数拿到请求的post_id post_id = request.args.get("post_id", "") # 评论表单 form = CommentForm() # 清除表单内容 form.data['content'] = "" # 利用post_id找到要显示的主题贴 post = Post.query.filter(Post.id == post_id).first() # 利用post_id在User表中查找作者姓名 author = User.query.filter(User.id == post.user_id).first() # 从session中取得当前登陆中的用户名 current_user_id = 0 current_user_name = '游客' if "user" in session: current_user_name = session["user"] user = User.query.filter_by(name=current_user_name).first() current_user_id = user.id # 若用户登录则显示评论发布表单 if "user" in session and form.validate_on_submit(): comment = Comment( content=form.data["content"], post_id=int(post_id), user_id=current_user_id, addtime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) ) db.session.add(comment) db.session.commit() flash("评论提交成功!") # 获取当前分页页面编号(编号,默认值,类型) page = request.args.get('page', 1, type=int) # 从数据库中查找对应当前主题贴的评论 query = Comment.query.join(User).filter(Comment.post_id == post_id).order_by(Comment.addtime.desc()) # 对当前主题帖的评论数量进行统计 comment_count = Comment.query.filter(Comment.post_id == post_id).count() # 对当前贴的评论进行分页(分页号,每页展示的数量,error) pagination = query.paginate(page, per_page=5, error_out=False) # 获得分页后当前页显示的评论 comments = pagination.items # 渲染主题帖展示页面 return render_template("home/play.html", post=post, form=form, comments=comments, pagination=pagination, author=author,current_user_name=current_user_name, count=comment_count,current_user_id=current_user_id) @home.route('/post/', methods=["GET", "POST"]) @user_login_req def post(): form = PostForm() current_user_name = session["user"] user = User.query.filter_by(name=current_user_name).first() current_user_id = user.id if form.validate_on_submit(): data = form.data post = Post( title=data["title"], content=data["content"], user_id=current_user_id, addtime=time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) ) db.session.add(post) db.session.commit() flash("发布主题帖成功") return render_template("home/post_add.html", form=form,current_user_name=current_user_name) #404 @home.errorhandler(404) def page_not_found(error): return render_template("home/404.html"),404
33.334702
188
0.644512
0
0
0
0
16,075
0.900964
0
0
5,248
0.294137
521ab16042032bc33c8f25cd62fd25ddff343b15
3,797
py
Python
NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Astropy/astropy-1.1.2/astropy/units/cgs.py
sahirsharma/Martian
062e9b47849512863c16713811f347ad7e121b56
[ "MIT" ]
null
null
null
NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Astropy/astropy-1.1.2/astropy/units/cgs.py
sahirsharma/Martian
062e9b47849512863c16713811f347ad7e121b56
[ "MIT" ]
null
null
null
NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Astropy/astropy-1.1.2/astropy/units/cgs.py
sahirsharma/Martian
062e9b47849512863c16713811f347ad7e121b56
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Licensed under a 3-clause BSD style license - see LICENSE.rst """ This package defines the CGS units. They are also available in the top-level `astropy.units` namespace. """ from __future__ import absolute_import, division, print_function, unicode_literals from ..utils.compat.fractions import Fraction from . import si from .core import UnitBase, def_unit _ns = globals() def_unit(['cm', 'centimeter'], si.cm, namespace=_ns, prefixes=False) g = si.g s = si.s C = si.C rad = si.rad sr = si.sr cd = si.cd K = si.K deg_C = si.deg_C mol = si.mol ########################################################################## # ACCELERATION def_unit(['Gal', 'gal'], cm / s ** 2, namespace=_ns, prefixes=True, doc="Gal: CGS unit of acceleration") ########################################################################## # ENERGY # Use CGS definition of erg def_unit(['erg'], g * cm ** 2 / s ** 2, namespace=_ns, prefixes=True, doc="erg: CGS unit of energy") ########################################################################## # FORCE def_unit(['dyn', 'dyne'], g * cm / s ** 2, namespace=_ns, prefixes=True, doc="dyne: CGS unit of force") ########################################################################## # PRESSURE def_unit(['Ba', 'Barye', 'barye'], g / (cm * s ** 2), namespace=_ns, prefixes=True, doc="Barye: CGS unit of pressure") ########################################################################## # DYNAMIC VISCOSITY def_unit(['P', 'poise'], g / (cm * s), namespace=_ns, prefixes=True, doc="poise: CGS unit of dynamic viscosity") ########################################################################## # KINEMATIC VISCOSITY def_unit(['St', 'stokes'], cm ** 2 / s, namespace=_ns, prefixes=True, doc="stokes: CGS unit of kinematic viscosity") ########################################################################## # WAVENUMBER def_unit(['k', 'Kayser', 'kayser'], cm ** -1, namespace=_ns, prefixes=True, doc="kayser: CGS unit of wavenumber") ########################################################################### # ELECTRICAL def_unit(['D', 'Debye', 'debye'], Fraction(1, 3) * 1e-29 * C * si.m, namespace=_ns, prefixes=True, doc="Debye: CGS unit of electric dipole moment") def_unit(['Fr', 'Franklin', 'statcoulomb', 'statC', 'esu'], g ** Fraction(1, 2) * cm ** Fraction(3, 2) * s ** -1, namespace=_ns, doc='Franklin: CGS (ESU) unit of charge') def_unit(['statA', 'statampere'], Fr * s ** -1, namespace=_ns, doc='statampere: CGS (ESU) unit of current') def_unit(['Bi', 'Biot', 'abA', 'abampere', 'emu'], g ** Fraction(1, 2) * cm ** Fraction(1, 2) * s ** -1, namespace=_ns, doc='Biot: CGS (EMU) unit of current') def_unit(['abC', 'abcoulomb'], Bi * s, namespace=_ns, doc='abcoulomb: CGS (EMU) of charge') ########################################################################### # MAGNETIC def_unit(['G', 'Gauss', 'gauss'], 1e-4 * si.T, namespace=_ns, prefixes=True, doc="Gauss: CGS unit for magnetic field") ########################################################################### # BASES bases = set([cm, g, s, rad, cd, K, mol]) ########################################################################### # CLEANUP del UnitBase del def_unit del si del Fraction ########################################################################### # DOCSTRING # This generates a docstring for this module that describes all of the # standard units defined here. from .utils import generate_unit_summary as _generate_unit_summary if __doc__ is not None: __doc__ += _generate_unit_summary(globals())
27.715328
82
0.475112
0
0
0
0
0
0
0
0
2,038
0.53674
521b98e3d1cb73025b648a7809467100f6409c97
5,164
py
Python
sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_version.py
albailey/config
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
[ "Apache-2.0" ]
10
2020-02-07T18:57:44.000Z
2021-09-11T10:29:34.000Z
sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_version.py
albailey/config
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
[ "Apache-2.0" ]
1
2021-01-14T12:01:55.000Z
2021-01-14T12:01:55.000Z
sysinv/sysinv/sysinv/sysinv/api/controllers/v1/kube_version.py
albailey/config
40ebe63d7dfc6a0a03216ebe55ed3ec9cf5410b9
[ "Apache-2.0" ]
10
2020-10-13T08:37:46.000Z
2022-02-09T00:21:25.000Z
# # Copyright (c) 2019 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from sysinv.api.controllers.v1 import base from sysinv.api.controllers.v1 import collection from sysinv.common import exception from sysinv.common import kubernetes from sysinv import objects class KubeVersion(base.APIBase): """API representation of a k8s version.""" version = wtypes.text "Unique version for this entry" upgrade_from = [wtypes.text] "List of versions that can upgrade to this version" downgrade_to = [wtypes.text] "List of versions that this version can downgrade to" applied_patches = [wtypes.text] "List of patches that must be applied before upgrading to this version" available_patches = [wtypes.text] "List of patches that must be available before upgrading to this version" target = bool "Denotes whether this is the target version" state = wtypes.text "State of this version" def __init__(self, **kwargs): self.fields = list(objects.kube_version.fields.keys()) for k in self.fields: if not hasattr(self, k): continue setattr(self, k, kwargs.get(k, wtypes.Unset)) @classmethod def convert_with_links(cls, rpc_kube_version, expand=True): kube_version = KubeVersion(**rpc_kube_version.as_dict()) if not expand: kube_version.unset_fields_except(['version', 'target', 'state']) # The version is not a database object so does not have timestamps. kube_version.created_at = wtypes.Unset kube_version.updated_at = wtypes.Unset return kube_version class KubeVersionCollection(collection.Collection): """API representation of a collection of k8s versions.""" kube_versions = [KubeVersion] "A list containing kubernetes version objects" def __init__(self, **kwargs): self._type = 'kube_versions' @classmethod def convert_with_links(cls, rpc_kube_version, expand=False): collection = KubeVersionCollection() collection.kube_versions = [KubeVersion.convert_with_links(p, expand) for p in rpc_kube_version] return collection class KubeVersionController(rest.RestController): """REST controller for Kubernetes Versions.""" def __init__(self, parent=None, **kwargs): self._parent = parent self._kube_operator = kubernetes.KubeOperator() @staticmethod def _update_target(version_obj, upgrade_to_version): """Determines whether this is the target version""" if upgrade_to_version is not None: if upgrade_to_version == version_obj.version: # We are in an upgrade and this is the to_version version_obj.target = True else: # We are in an upgrade and this is not the to_version version_obj.target = False elif version_obj.state == kubernetes.KUBE_STATE_ACTIVE: # We are not in an upgrade and this is the active version version_obj.target = True else: # This is not the version you are looking for version_obj.target = False @wsme_pecan.wsexpose(KubeVersionCollection) def get_all(self): """Retrieve a list of kubernetes versions.""" # Get the current upgrade (if one exists) upgrade_to_version = None try: kube_upgrade_obj = pecan.request.dbapi.kube_upgrade_get_one() upgrade_to_version = kube_upgrade_obj.to_version except exception.NotFound: pass # Get the dynamic version information version_states = self._kube_operator.kube_get_version_states() rpc_kube_versions = [] for version in kubernetes.get_kube_versions(): version_obj = objects.kube_version.get_by_version( version['version']) version_obj.state = version_states[version['version']] self._update_target(version_obj, upgrade_to_version) rpc_kube_versions.append(version_obj) return KubeVersionCollection.convert_with_links(rpc_kube_versions) @wsme_pecan.wsexpose(KubeVersion, wtypes.text) def get_one(self, version): """Retrieve information about the given kubernetes version.""" # Get the static version information rpc_kube_version = objects.kube_version.get_by_version(version) # Get the dynamic version information version_states = self._kube_operator.kube_get_version_states() rpc_kube_version.state = version_states[version] # Get the current upgrade (if one exists) upgrade_to_version = None try: kube_upgrade_obj = pecan.request.dbapi.kube_upgrade_get_one() upgrade_to_version = kube_upgrade_obj.to_version except exception.NotFound: pass self._update_target(rpc_kube_version, upgrade_to_version) return KubeVersion.convert_with_links(rpc_kube_version)
34.198675
77
0.678737
4,767
0.923122
0
0
3,309
0.640782
0
0
1,300
0.251743
521bf34ea08938ba109cb9382937e70cd9798623
1,699
py
Python
starter.py
michaelStettler/unsup2
741df1585eb49c4f5f9e41849140a91419b49179
[ "Apache-2.0" ]
null
null
null
starter.py
michaelStettler/unsup2
741df1585eb49c4f5f9e41849140a91419b49179
[ "Apache-2.0" ]
null
null
null
starter.py
michaelStettler/unsup2
741df1585eb49c4f5f9e41849140a91419b49179
[ "Apache-2.0" ]
null
null
null
import sys import pylab as plb import numpy as np import mountaincar class DummyAgent(): """A not so good agent for the mountain-car task. """ def __init__(self, mountain_car = None, parameter1 = 3.0): if mountain_car is None: self.mountain_car = mountaincar.MountainCar() else: self.mountain_car = mountain_car self.parameter1 = parameter1 def visualize_trial(self, n_steps = 200): """Do a trial without learning, with display. Parameters ---------- n_steps -- number of steps to simulate for """ # prepare for the visualization plb.ion() plb.pause(0.0001) mv = mountaincar.MountainCarViewer(self.mountain_car) mv.create_figure(n_steps, n_steps) plb.show() # make sure the mountain-car is reset self.mountain_car.reset() for n in range(n_steps): print('\rt =', self.mountain_car.t, sys.stdout.flush()) # choose a random action self.mountain_car.apply_force(np.random.randint(3) - 1) # simulate the timestep self.mountain_car.simulate_timesteps(100, 0.01) # update the visualization mv.update_figure() plb.show() plb.pause(0.0001) # check for rewards if self.mountain_car.R > 0.0: print("\rreward obtained at t = ", self.mountain_car.t) break def learn(self): # This is your job! pass if __name__ == "__main__": d = DummyAgent() d.visualize_trial() plb.show()
26.138462
71
0.557975
1,539
0.905827
0
0
0
0
0
0
427
0.251324
521cb4608b60e2e11c8bc3b079b7fd0fe6924d2c
2,260
py
Python
grove/alpha/jordan_gradient/jordan_gradient.py
mkeshita/grove
dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3
[ "Apache-2.0" ]
229
2017-01-10T03:11:54.000Z
2018-11-26T10:57:49.000Z
grove/alpha/jordan_gradient/jordan_gradient.py
mkeshita/grove
dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3
[ "Apache-2.0" ]
123
2017-01-10T21:06:51.000Z
2018-11-27T19:38:22.000Z
grove/alpha/jordan_gradient/jordan_gradient.py
mkeshita/grove
dc6bf6ec63e8c435fe52b1e00f707d5ce4cdb9b3
[ "Apache-2.0" ]
95
2017-01-10T03:03:45.000Z
2018-11-28T00:42:28.000Z
import numpy as np from pyquil import Program from pyquil.api import QuantumComputer, get_qc from grove.alpha.jordan_gradient.gradient_utils import (binary_float_to_decimal_float, measurements_to_bf) from grove.alpha.phaseestimation.phase_estimation import phase_estimation def gradient_program(f_h: float, precision: int) -> Program: """ Gradient estimation via Jordan's algorithm (10.1103/PhysRevLett.95.050501). :param f_h: Oracle output at perturbation h. :param precision: Bit precision of gradient. :return: Quil program to estimate gradient of f. """ # encode oracle values into phase phase_factor = np.exp(1.0j * 2 * np.pi * abs(f_h)) U = np.array([[phase_factor, 0], [0, phase_factor]]) p_gradient = phase_estimation(U, precision) return p_gradient def estimate_gradient(f_h: float, precision: int, gradient_max: int = 1, n_measurements: int = 50, qc: QuantumComputer = None) -> float: """ Estimate the gradient using function evaluation at perturbation, h. :param f_h: Oracle output at perturbation h. :param precision: Bit precision of gradient. :param gradient_max: OOM estimate of largest gradient value. :param n_measurements: Number of times to measure system. :param qc: The QuantumComputer object. :return: Decimal estimate of gradient. """ # scale f_h by range of values gradient can take on f_h *= 1. / gradient_max # generate gradient program perturbation_sign = np.sign(f_h) p_gradient = gradient_program(f_h, precision) # run gradient program if qc is None: qc = get_qc(f"{len(p_gradient.get_qubits())}q-qvm") p_gradient.wrap_in_numshots_loop(n_measurements) executable = qc.compiler.native_quil_to_executable(p_gradient) measurements = qc.run(executable) # summarize measurements bf_estimate = perturbation_sign * measurements_to_bf(measurements) bf_explicit = '{0:.16f}'.format(bf_estimate) deci_estimate = binary_float_to_decimal_float(bf_explicit) # rescale gradient deci_estimate *= gradient_max return deci_estimate
33.731343
86
0.684513
0
0
0
0
0
0
0
0
861
0.380973
521f46f810658251a27b571cb4480e27cd10089c
732
py
Python
test/benchmark/jvp_conv2d.py
jabader97/backpack
089daafa0d611e13901fd7ecf8a0d708ce7a5928
[ "MIT" ]
395
2019-10-04T09:37:52.000Z
2022-03-29T18:00:56.000Z
test/benchmark/jvp_conv2d.py
jabader97/backpack
089daafa0d611e13901fd7ecf8a0d708ce7a5928
[ "MIT" ]
78
2019-10-11T18:56:43.000Z
2022-03-23T01:49:54.000Z
test/benchmark/jvp_conv2d.py
jabader97/backpack
089daafa0d611e13901fd7ecf8a0d708ce7a5928
[ "MIT" ]
50
2019-10-03T16:31:10.000Z
2022-03-15T19:36:14.000Z
from torch import randn from torch.nn import Conv2d from backpack import extend def data_conv2d(device="cpu"): N, Cin, Hin, Win = 100, 10, 32, 32 Cout, KernelH, KernelW = 25, 5, 5 X = randn(N, Cin, Hin, Win, requires_grad=True, device=device) module = extend(Conv2d(Cin, Cout, (KernelH, KernelW))).to(device=device) out = module(X) Hout = Hin - (KernelH - 1) Wout = Win - (KernelW - 1) vin = randn(N, Cout, Hout, Wout, device=device) vout = randn(N, Cin, Hin, Win, device=device) return { "X": X, "module": module, "output": out, "vout_ag": vout, "vout_bp": vout.view(N, -1, 1), "vin_ag": vin, "vin_bp": vin.view(N, -1, 1), }
25.241379
76
0.568306
0
0
0
0
0
0
0
0
58
0.079235
521fcaa4ac03261f5d66136190c373388bc3f18a
5,229
py
Python
BSVega_vs_Upsilon.py
JanObloj/Robust-uncertainty-sensitivity-analysis
b6da9889171e8d5c4b676ca33a3748d7c4b884c7
[ "CC0-1.0" ]
null
null
null
BSVega_vs_Upsilon.py
JanObloj/Robust-uncertainty-sensitivity-analysis
b6da9889171e8d5c4b676ca33a3748d7c4b884c7
[ "CC0-1.0" ]
null
null
null
BSVega_vs_Upsilon.py
JanObloj/Robust-uncertainty-sensitivity-analysis
b6da9889171e8d5c4b676ca33a3748d7c4b884c7
[ "CC0-1.0" ]
null
null
null
# -*- coding: utf-8 -*- """ Spyder Editor Code written by Samuel Drapeau with modifications by Johannes Wiesel and Jan Obloj This file produces plots comparing our first order sensitivity with BS vega. """ # %% # To run the stuff, you need the package plotly in your anaconda "conda install plotly" import plotly.graph_objs as go from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot import plotly.io as pio init_notebook_mode() pio.renderers.default='svg' import numpy as np import numpy.random import pandas as pd from scipy.stats import norm, multivariate_normal from scipy.optimize import minimize import time _tstart_stack = [] def tic(): _tstart_stack.append(time.time()) def toc(fmt="Elapsed: %s s"): print(fmt % (time.time() - _tstart_stack.pop())) # %% # We first provide the computation of a call option according to BS (we assume Log normal distribution) # definition of the dplus and minus functions # and the BS formula. def dplus(S, K, T, sigma): sigmaT = sigma * T ** 0.5 return np.log(S/K)/sigmaT + sigmaT/2 def dminus(S, K, T, sigma): sigmaT = sigma * T ** 0.5 return np.log(S/K)/sigmaT - sigmaT/2 def BS(S, K, T, sigma, Type = 1): factor1 = S * norm.cdf(Type * dplus(S, K, T, sigma)) factor2 = K * norm.cdf(Type * dminus(S, K, T, sigma)) return Type * (factor1 - factor2) # Now we provide the computation for the exact call according to the computations in BDT # We take p = 2 def Robust_Call_Exact_fun(S, K, T, sigma, delta): def fun(v): #v[0] = a, v[1] = lambda price = BS(S,max(K - (2 * v[0] + 1)/ (2 * v[1]),0.000001), T, sigma) return price + v[0] ** 2 / (2 * v[1]) + 0.5 * v[1] * delta ** 2 def cons_fun(v): # the value of v[0] should be constrained to keep strike positive tmp = K - (2 * v[0] + 1)/ (2 * v[1]) return tmp cons = ({'type': 'ineq', 'fun' : cons_fun}) guess = np.array([0, 1]) bounds = ((-np.Inf, np.Inf), (0, np.Inf)) res = minimize(fun, guess, constraints=cons, method='SLSQP', bounds=bounds ) return res.fun Robust_Call_Exact = np.vectorize(Robust_Call_Exact_fun) # Now we provide the computation for the first order model uncertainty sensitivity (Upsilon) # and the resulting BS robust price approximation # We take p = 2 def Robust_Call_Upsilon(S, K, T, sigma, delta): muK = norm.cdf(dminus(S, K, T, sigma)) correction = np.sqrt(muK * (1-muK)) return correction def Robust_Call_Approximation(S, K, T, sigma, delta): price = BS(S, K, T, sigma) correction = Robust_Call_Upsilon(S, K, T, sigma, delta) return price + correction * delta # %% # Ploting the robust call and FO appriximation for a given strike and increasing uncertainty radius S = 1 K = 1.2 T = 1 sigma = 0.2 Delta = np.linspace(0, 0.2, 50) Y0 = BS(S, K, T, sigma) Y1 = Robust_Call_Approximation(S, K, T, sigma, Delta) Y2 = Robust_Call_Exact(S, K, T, sigma, Delta) fig = go.Figure() fig.add_scatter(x = Delta, y = Y1, name = 'FO') fig.add_scatter(x = Delta, y = Y2, name = 'RBS') #fig.layout.title = "Exact Robust Call vs First Order Approx: Strike K="+str(K)+", BS Price="+str(np.round(Y0,4)) fig.layout.xaxis.title = "delta" fig.layout.yaxis.title = "Price" iplot(fig) # %% # Ploting the robust call and FO appriximation for a given radius of uncertainty and a range of strikes S = 1 K = np.linspace(0.6, 1.4, 100) T = 1 sigma = 0.2 delta = 0.05 Y0 = Robust_Call_Approximation(S, K, T, sigma, delta) Y1 = Robust_Call_Exact(S, K, T, sigma, delta) Y2 = BS(S, K, T, sigma) fig = go.Figure() fig.add_scatter(x = K, y = Y0, name = 'FO') fig.add_scatter(x = K, y = Y1, name = 'Exact') fig.add_scatter(x = K, y = Y2, name = 'BS') fig.layout.title = "Call Price vs Exact Robust Call and First Order Approx : delta ="+str(delta) fig.layout.xaxis.title = "Strike" fig.layout.yaxis.title = "Price" iplot(fig) # %% # Run a plot to comapre BS Vega and BS Upsilon (Uncertainty Sensitivity) # Plots show the sensitivities S = 1 K = np.linspace(0.4 * S, 2 * S, 100) T = 1 sigma = 0.2 delta = 0.02 #is irrelevant here Y1 = S * (norm.pdf(dplus(S, K , T, sigma))) Y0 = S * (Robust_Call_Upsilon(S, K, T, sigma, delta)) fig = go.Figure() fig.add_scatter(x = K, y = Y0, name = 'BS Upsilon') fig.add_scatter(x = K, y = Y1, name = 'BS Vega') #fig.layout.title = "Call Price Sensitivity: Vega vs Upsilon, sigma= "+str(sigma) fig.layout.xaxis.title = "Strike" fig.layout.yaxis.title = "Price" iplot(fig) # %% # Run a ploting to comapre BS Vega and BS Upsilon (Uncertainty Sensitivity) # Plots show the sensitivities S = 1 K = np.linspace(0.6 * S, 1.4 * S, 100) T = 1 sigma = 0.2 delta = 0.02 #is irrelevant here Y0 = S * (norm.pdf(dplus(S, K * np.exp(T * sigma ** 2), T, sigma)) + 1/2-1/np.sqrt(2 * np.pi)) Y1 = S * (Robust_Call_Upsilon(S, K, T, sigma, delta)) fig = go.Figure() fig.add_scatter(x = K, y = Y0, name = 'BS Vega (shifted) + const') fig.add_scatter(x = K, y = Y1, name = 'BS Upsilon') fig.layout.title = "Call Price Sensitivity: Vega vs Upsilon, sigma= "+str(sigma) fig.layout.xaxis.title = "Strike" fig.layout.yaxis.title = "Price" iplot(fig)
27.376963
113
0.649455
0
0
0
0
0
0
0
0
1,779
0.340218
522390db2778845f2c2d845e756b665082e86b09
4,360
py
Python
lib/python/archive/cplexTest11.py
craigmax-dev/Mixed-Integer-Linear-Programming-for-Spacecraft-Maneuvers
d14bda5f0dca2bf6b7e25b58e36d10e096b7612f
[ "MIT" ]
null
null
null
lib/python/archive/cplexTest11.py
craigmax-dev/Mixed-Integer-Linear-Programming-for-Spacecraft-Maneuvers
d14bda5f0dca2bf6b7e25b58e36d10e096b7612f
[ "MIT" ]
3
2020-09-30T09:22:50.000Z
2020-10-20T19:10:53.000Z
lib/python/archive/cplexTest11.py
craigmax-dev/Mixed-Integer-Linear-Programming-for-Spacecraft-Maneuvers
d14bda5f0dca2bf6b7e25b58e36d10e096b7612f
[ "MIT" ]
null
null
null
import sys import docplex.mp import cplex # Teams in 1st division team_div1 = ["Baltimore Ravens","Cincinnati Bengals", "Cleveland Browns","Pittsburgh Steelers","Houston Texans", "Indianapolis Colts","Jacksonville Jaguars","Tennessee Titans","Buffalo Bills","Miami Dolphins", "New England Patriots","New York Jets","Denver Broncos","Kansas City Chiefs","Oakland Raiders", "San Diego Chargers"] # Teams in 2nd division team_div2 = ["Chicago Bears","Detroit Lions","Green Bay Packers","Minnesota Vikings","Atlanta Falcons", "Carolina Panthers","New Orleans Saints","Tampa Bay Buccaneers","Dallas Cowboys","New York Giants", "Philadelphia Eagles","Washington Redskins","Arizona Cardinals","San Francisco 49ers", "Seattle Seahawks","St. Louis Rams"] #number_of_matches_to_play = 1 # Number of match to play between two teams on the league # Schedule parameters nb_teams_in_division = 8 max_teams_in_division = 16 number_of_matches_inside_division = 1 number_of_matches_outside_division = 1 CSS = """ body { margin: 0; font-family: Helvetica; } table.dataframe { border-collapse: collapse; border: none; } table.dataframe tr { border: none; } table.dataframe td, table.dataframe th { margin: 0; border: 1px solid white; padding-left: 0.25em; padding-right: 0.25em; } table.dataframe th:not(:empty) { background-color: #fec; text-align: left; font-weight: normal; } table.dataframe tr:nth-child(2) th:empty { border-left: none; border-right: 1px dashed #888; } table.dataframe td { border: 2px solid #ccf; background-color: #f4f4ff; } table.dataframe thead th:first-child { display: none; } table.dataframe tbody th { display: none; } """ from IPython.core.display import HTML HTML('<style>{}</style>'.format(CSS)) import pandas as pd team1 = pd.DataFrame(team_div1) team2 = pd.DataFrame(team_div2) team1.columns = ["AFC"] team2.columns = ["NFC"] teams = pd.concat([team1,team2], axis=1) from IPython.display import display display(teams) import numpy as np nb_teams = 2 * nb_teams_in_division teams = range(nb_teams) # Calculate the number of weeks necessary nb_weeks = (nb_teams_in_division - 1) * number_of_matches_inside_division \ + nb_teams_in_division * number_of_matches_outside_division # Weeks to schedule weeks = range(nb_weeks) # Season is split into two halves first_half_weeks = range(int(np.floor(nb_weeks / 2))) nb_first_half_games = int(np.floor(nb_weeks / 3)) from collections import namedtuple match = namedtuple("match",["team1","team2","is_divisional"]) matches = {match(t1,t2, 1 if ( t2 <= nb_teams_in_division or t1 > nb_teams_in_division) else 0) for t1 in teams for t2 in teams if t1 < t2} nb_play = { m : number_of_matches_inside_division if m.is_divisional==1 else number_of_matches_outside_division for m in matches} from docplex.mp.environment import Environment env = Environment() env.print_information() from docplex.mp.model import Model mdl = Model("sports") plays = mdl.binary_var_matrix(matches, weeks, lambda ij: "x_%s_%d" %(str(ij[0]), ij[1])) mdl.add_constraints( mdl.sum(plays[m,w] for w in weeks) == nb_play[m] for m in matches) mdl.print_information() mdl.add_constraints( mdl.sum(plays[m,w] for m in matches if (m.team1 == t or m.team2 == t) ) == 1 for w in weeks for t in teams) mdl.print_information() mdl.add_constraints( plays[m,w] + plays[m,w+1] <= 1 for w in weeks for m in matches if w < nb_weeks-1) mdl.print_information() mdl.add_constraints( mdl.sum(plays[m,w] for w in first_half_weeks for m in matches if (((m.team1 == t or m.team2 == t) and m.is_divisional == 1 ))) >= nb_first_half_games for t in teams) mdl.print_information() gain = { w : w*w for w in weeks} # If an intradivisional pair plays in week w, Gain[w] is added to the objective. mdl.maximize( mdl.sum (m.is_divisional * gain[w] * plays[m,w] for m in matches for w in weeks) ) mdl.print_information() assert mdl.solve(), "!!! Solve of the model fails" mdl.report()
29.659864
115
0.666743
0
0
0
0
0
0
0
0
1,766
0.405046
52240c1143495c764743a4c98d2bd58e20e4257a
1,995
py
Python
DeepLearningExamples/TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular/preproc.py
puririshi98/benchmark
79f554f1e1cf36f62994c78e0e6e5b360f554022
[ "BSD-3-Clause" ]
null
null
null
DeepLearningExamples/TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular/preproc.py
puririshi98/benchmark
79f554f1e1cf36f62994c78e0e6e5b360f554022
[ "BSD-3-Clause" ]
null
null
null
DeepLearningExamples/TensorFlow2/Recommendation/WideAndDeep/data/outbrain/nvtabular/preproc.py
puririshi98/benchmark
79f554f1e1cf36f62994c78e0e6e5b360f554022
[ "BSD-3-Clause" ]
null
null
null
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os os.environ['TF_MEMORY_ALLOCATION'] = "0.0" from data.outbrain.nvtabular.utils.converter import nvt_to_tfrecords from data.outbrain.nvtabular.utils.workflow import execute_pipeline from data.outbrain.nvtabular.utils.arguments import parse_args from data.outbrain.nvtabular.utils.setup import create_config def is_empty(path): return not os.path.exists(path) or (not os.path.isfile(path) and not os.listdir(path)) def main(): args = parse_args() config = create_config(args) if is_empty(args.metadata_path): logging.warning('Creating new stats data into {}'.format(config['stats_file'])) execute_pipeline(config) else: logging.warning('Directory is not empty {args.metadata_path}') logging.warning('Skipping NVTabular preprocessing') if os.path.exists(config['output_train_folder']) and os.path.exists(config['output_valid_folder']): if is_empty(config['tfrecords_path']): logging.warning('Executing NVTabular parquets to TFRecords conversion') nvt_to_tfrecords(config) else: logging.warning(f"Directory is not empty {config['tfrecords_path']}") logging.warning('Skipping TFrecords conversion') else: logging.warning(f'Train and validation dataset not found in {args.metadata_path}') if __name__ == '__main__': main()
38.365385
103
0.732331
0
0
0
0
0
0
0
0
1,017
0.509774
5224ab7c6a4faffec5a38c6d30530aaf66d6c24c
3,596
py
Python
sfzlint/lint.py
kmturley/sfzlint
e92189301d798d00f3919cccd1ce29da2c7fa361
[ "MIT" ]
null
null
null
sfzlint/lint.py
kmturley/sfzlint
e92189301d798d00f3919cccd1ce29da2c7fa361
[ "MIT" ]
null
null
null
sfzlint/lint.py
kmturley/sfzlint
e92189301d798d00f3919cccd1ce29da2c7fa361
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- import re import sys import xml.etree.ElementTree as ET from argparse import ArgumentParser from pathlib import Path from lark.exceptions import UnexpectedCharacters, UnexpectedToken from .parser import validate, SFZ, SFZValidatorConfig from . import spec, settings formats = { 'default': '{path}:{line}:{col}:{sev} {msg}', 'nopath': '{filename}:{line}:{col}:{sev} {msg}', } def ecb(path, e_format=formats['default']): def err_callback(sev, msg, token, file_path): msg_path = file_path if file_path else path message = e_format.format( path=msg_path, dirname=path.parent, filename=path.name, line=token.line, col=token.column, sev=sev[0], msg=msg) print(message) return err_callback def lint(options): spec_versions = set(options.spec_version) if options.spec_version else None path = Path(options.file) if not path.exists: raise IOError(f'{path} not found') if path.is_dir(): filenames = path.glob('**/*.sfz') else: filenames = path, for filename in filenames: config = SFZValidatorConfig( spec_versions=spec_versions, file_path=filename, check_includes=options.check_includes, ) if options.rel_path: config.rel_path = options.rel_path if filename.suffix == '.xml': lint_xml(filename, config) else: lint_sfz(filename, config=config) def lint_xml(filename, config): with open(filename) as fob: xml = fob.read() # xml is "malformed" because it lacks a single root element # solution is to wrap it in a "root" tag tree = ET.fromstring( re.sub(r"(<\?xml[^>]+\?>)", r"\1<root>", xml) + "</root>") defines = { d.attrib['name'][1:]: d.attrib['value'] for d in tree.findall('.//Define')} for ae in tree.findall('.//AriaElement'): ae_path = filename.parent / ae.attrib['path'] config.file_name = ae_path config.rel_path = ae_path.parent config.check_includes = True # Always check on program .xml if defines: config.sfz = SFZ(defines=defines) lint_sfz(ae_path, config) def lint_sfz(filename, config): err_cb = ecb(filename) try: validate(filename, err_cb=err_cb, config=config) except (UnexpectedCharacters, UnexpectedToken) as e: message = str(e).split('\n', 1)[0] err_cb('ERR', message, e, filename) def main(): parser = ArgumentParser(description='linter/validator for sfz files') parser.add_argument( 'file', type=Path, help='sfz file or directory to recursivly search') parser.add_argument( '--format', choices=formats.keys(), help='error format for output') parser.add_argument( '--spec-version', nargs='*', choices=tuple(spec.ver_mapping.values()), help='sfz spec to validate against') parser.add_argument( '-i', '--check-includes', action='store_true', help='read and check any #include files as well') parser.add_argument( '--rel-path', help='validate includes and sample paths relative to this path') parser.add_argument( '--no-pickle', action='store_true', help='do not use the pickle cache (for testing)') args = parser.parse_args() settings.pickle = not args.no_pickle lint(args) if __name__ == '__main__': try: main() except BrokenPipeError: sys.stderr.close()
30.474576
79
0.618187
0
0
0
0
0
0
0
0
768
0.213571
5227a62aa3a0d8203233b6c9c2725988f35970a3
2,809
py
Python
utils/GeneralUtils.py
nanohedra/nanohedra
3921b7f5ce10e0e3393c3b675bb97ccbecb96663
[ "MIT" ]
2
2020-12-07T00:38:32.000Z
2021-05-13T19:36:17.000Z
utils/GeneralUtils.py
nanohedra/nanohedra
3921b7f5ce10e0e3393c3b675bb97ccbecb96663
[ "MIT" ]
null
null
null
utils/GeneralUtils.py
nanohedra/nanohedra
3921b7f5ce10e0e3393c3b675bb97ccbecb96663
[ "MIT" ]
1
2021-05-13T19:36:18.000Z
2021-05-13T19:36:18.000Z
import numpy as np # Copyright 2020 Joshua Laniado and Todd O. Yeates. __author__ = "Joshua Laniado and Todd O. Yeates" __copyright__ = "Copyright 2020, Nanohedra" __version__ = "1.0" def euclidean_squared_3d(coordinates_1, coordinates_2): if len(coordinates_1) != 3 or len(coordinates_2) != 3: raise ValueError("len(coordinate list) != 3") elif type(coordinates_1) is not list or type(coordinates_2) is not list: raise TypeError("input parameters are not of type list") else: x1, y1, z1 = coordinates_1[0], coordinates_1[1], coordinates_1[2] x2, y2, z2 = coordinates_2[0], coordinates_2[1], coordinates_2[2] return (x1 - x2) ** 2 + (y1 - y2) ** 2 + (z1 - z2) ** 2 def center_of_mass_3d(coordinates): n = len(coordinates) if n != 0: cm = [0. for j in range(3)] for i in range(n): for j in range(3): cm[j] = cm[j] + coordinates[i][j] for j in range(3): cm[j] = cm[j] / n return cm else: print "ERROR CALCULATING CENTER OF MASS" return None def rot_txint_set_txext_frag_coord_sets(coord_sets, rot_mat=None, internal_tx_vec=None, set_mat=None, ext_tx_vec=None): if coord_sets != list(): # Get the length of each coordinate set coord_set_lens = [] for coord_set in coord_sets: coord_set_lens.append(len(coord_set)) # Stack coordinate set arrays in sequence vertically (row wise) coord_sets_vstacked = np.vstack(coord_sets) # Rotate stacked coordinates if rotation matrix is provided if rot_mat is not None: rot_mat_T = np.transpose(rot_mat) coord_sets_vstacked = np.matmul(coord_sets_vstacked, rot_mat_T) # Translate stacked coordinates if internal translation vector is provided if internal_tx_vec is not None: coord_sets_vstacked = coord_sets_vstacked + internal_tx_vec # Set stacked coordinates if setting matrix is provided if set_mat is not None: set_mat_T = np.transpose(set_mat) coord_sets_vstacked = np.matmul(coord_sets_vstacked, set_mat_T) # Translate stacked coordinates if external translation vector is provided if ext_tx_vec is not None: coord_sets_vstacked = coord_sets_vstacked + ext_tx_vec # Slice stacked coordinates back into coordinate sets transformed_coord_sets = [] slice_index_1 = 0 for coord_set_len in coord_set_lens: slice_index_2 = slice_index_1 + coord_set_len transformed_coord_sets.append(coord_sets_vstacked[slice_index_1:slice_index_2].tolist()) slice_index_1 += coord_set_len return transformed_coord_sets else: return []
33.047059
119
0.655393
0
0
0
0
0
0
0
0
635
0.226059
5228ebf5dbffc66cea1495a066ef36fc82add276
1,187
py
Python
vscode/app.py
abdennour/samples
b4661b15bf003416c368f90e4ee3181a20c80313
[ "Apache-2.0" ]
null
null
null
vscode/app.py
abdennour/samples
b4661b15bf003416c368f90e4ee3181a20c80313
[ "Apache-2.0" ]
null
null
null
vscode/app.py
abdennour/samples
b4661b15bf003416c368f90e4ee3181a20c80313
[ "Apache-2.0" ]
2
2021-02-01T16:55:10.000Z
2021-06-17T01:49:18.000Z
from flask import Flask, render_template, request, make_response, g import os import socket import random import json import collections hostname = socket.gethostname() votes = collections.defaultdict(int) app = Flask(__name__) def getOptions(): option_a = 'Cats' option_b = 'Dogs' return option_a, option_b @app.route("/", methods=['POST','GET']) def hello(): vote = None option_a, option_b = getOptions() if request.method == 'POST': vote = request.form['vote'] vote = option_a if vote == "a" else option_b votes[vote] = votes[vote] + 1 resp = make_response(render_template( 'index.html', option_a=option_a, option_b=option_b, hostname=hostname, votes_a=votes[option_a], votes_b=votes[option_b], )) return resp if __name__ == "__main__": extra_files = [] if "development" == os.getenv("FLASK_ENV"): app.jinja_env.auto_reload = True app.config['TEMPLATES_AUTO_RELOAD'] = True extra_files=[ "./static/stylesheets/style.css" ] app.run( host='0.0.0.0', port=8080, extra_files=extra_files )
23.27451
67
0.620051
0
0
0
0
500
0.42123
0
0
151
0.127211
5229cb40a928e00a2ff0016043a6e3c37305c288
4,959
py
Python
src/NumericLettersParsers.py
shaigoldman/YotzerHaMafteach
e457de2fc56e9cb372133b4bc61b7d5deca0c67a
[ "MIT" ]
null
null
null
src/NumericLettersParsers.py
shaigoldman/YotzerHaMafteach
e457de2fc56e9cb372133b4bc61b7d5deca0c67a
[ "MIT" ]
null
null
null
src/NumericLettersParsers.py
shaigoldman/YotzerHaMafteach
e457de2fc56e9cb372133b4bc61b7d5deca0c67a
[ "MIT" ]
null
null
null
"""Hebrew Letter-Based Numering Identifiers and Quantifiers""" def remove_chucks(hebrew_word) -> str: """ Hebrew numbering systems use 'chuck-chucks' (aka quotation marks that aren't being used to signify a quotation) in between letters that are meant as numerics rather than words. To make it easier to interpret their numeric value we may often want to remove the chuck to better read the value of the actual letters. This function does that. """ for chuck in ['״', '׳', '"', "'"]: while chuck in hebrew_word: hebrew_word = (hebrew_word[:hebrew_word.index(chuck)] + hebrew_word[hebrew_word.index(chuck)+1:]) return hebrew_word def chuck_with(word, prefix) -> str: """ This combines a word with a prefix and then readjusts the 'chuck-chuck' so it is in between the last two letters as is standard in hebrew writing. """ word = remove_chucks(word) word = prefix + word word = word[:-1] + '״' + word[-1] return word def has_chuck(word) -> bool: """ Returns True if there is a chuck-chuck in the word str. A function is neccesary because different ascii characters can represent chuck-chucks in hebrew. """ return ("'" in word or '"' in word or '״' in word) def can_be_prefix(letter, place) -> bool: """ In hebrew writing, some letters can serve as word prefixes, and should be discounted when considering the numeric value of the whole word. This returns True if the letter in the given position of the str can be a prefix. """ first_only = 'בכמלו' can_be_seconds = 'השד' if place <=2 and letter in can_be_seconds: return True elif place == 1 and letter in first_only: return True return False def is_numeric(hebrew_word, allow_prefix=False, fifty_cap=False, allow_hundreds=True) -> bool: """ Determines if a hebrew word may actually be not a word at all, but just a numeric placeholder. Args: hebrew_word (str): the word in question. allow_prefix (bool): if True, will ignore certain letters at the start of the word if they can be assumed as grammatical prefixes. fifty_cap (bool): if True, if the word can only form a number over fifty, it will be considered non-numeric. allow_hundreds (bool): since it is rare to have values over 100 represented by hebrew letters, pass false to return True if the only possible numeric values are above 100. Returns: True if it is possible for this word to be a number, False otherwise. """ ones = 'אבגדהוזחט' tens = 'יכלמנסעפצ' hundreds = 'קרשת' if fifty_cap: tens = 'יכלמנ' allow_hundreds=False hebrew_word = remove_chucks(hebrew_word) current_place = 100 prev_letter = 'ת' for index, letter in enumerate(hebrew_word): if allow_prefix and can_be_prefix(letter, index+1): continue elif letter in hundreds: if not allow_hundreds: return False elif current_place <= 100: return False elif hundreds.index(prev_letter) < hundreds.index(letter): return False elif (hundreds.index(prev_letter) == hundreds.index(letter) and letter != 'ת'): return False elif letter in tens: if current_place <= 10: return False current_place = 10 elif letter in ones: if current_place <= 1: return False current_place = 1 else: return False prev_letter = letter return True def remove_pref(h_str, allowed='מבו') -> str: if h_str[0] == 'ו': h_str = h_str[1:] if h_str[0] in allowed: h_str = h_str[1:] return h_str def gematria(letters_str): value = 0 ones = 'אבגדהוזחט' for ot in ones: if ot in letters_str: value += (ones.index(ot)+1) tens = 'יכלמנסעפצ' for ot in tens: if ot in letters_str: value += (tens.index(ot)+1) * 10 hunds = 'קרשת' for ot in hunds: if ot in letters_str: value += (hunds.index(ot)+1) * 100 return value def daf_numerical(daf_str): value = gematria(daf_str) if ':' in daf_str: value += .5 return value def perek_halacha_numerical(ph_str): if ':' in ph_str: perek, halacha = ph_str.split(':') else: perek = ph_str halacha = '' perek_value = gematria(perek) halacha_value = gematria(halacha) total_value = perek_value + halacha_value/100. return total_value
31.993548
71
0.589635
0
0
0
0
0
0
0
0
2,134
0.424592
522aa2be9a82dc1012b8fc44e2804018f597f455
1,024
py
Python
tests/test_add.py
OwenMcDonnell/pglet-python
1f7d72c6e5d34536f8ee03fac64b9dd34e465656
[ "MIT" ]
null
null
null
tests/test_add.py
OwenMcDonnell/pglet-python
1f7d72c6e5d34536f8ee03fac64b9dd34e465656
[ "MIT" ]
null
null
null
tests/test_add.py
OwenMcDonnell/pglet-python
1f7d72c6e5d34536f8ee03fac64b9dd34e465656
[ "MIT" ]
null
null
null
import pytest import pglet from pglet import Textbox, Stack @pytest.fixture def page(): return pglet.page('test_add', no_window=True) def test_add_single_control(page): result = page.add(Textbox(id="txt1", label="First name:")) assert result.id == "txt1", "Test failed" def test_add_controls_argv(page): t1 = Textbox(id="firstName", label="First name:") t2 = Textbox(id="lastName", label="Last name:") result = page.add(t1, t2, to="page", at=0) assert result == [t1, t2], "Test failed" def test_add_controls_list(page): t1 = Textbox(id="firstName", label="First name:") t2 = Textbox(id="lastName", label="Last name:") result = page.add([t1, t2], to="page", at=0) assert result == [t1, t2], "Test failed" def test_add_controls_to_another_control(page): stack = Stack(id="stack1", horizontal=True) page.add(stack) t1 = page.add(Textbox(id="firstName", label="First name:"), to=stack, at=0) assert t1.id == "stack1:firstName", "Test failed"
32
63
0.655273
0
0
0
0
77
0.075195
0
0
241
0.235352
522af989e057c103ede689708f11a25f3ff8af2c
2,197
py
Python
src/collector/utils.py
baboon-king/2c
2b1093c9f15c713dac51aec5c1244ec285e49782
[ "Apache-2.0" ]
null
null
null
src/collector/utils.py
baboon-king/2c
2b1093c9f15c713dac51aec5c1244ec285e49782
[ "Apache-2.0" ]
null
null
null
src/collector/utils.py
baboon-king/2c
2b1093c9f15c713dac51aec5c1244ec285e49782
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python """ Created by howie.hu at 2021/04/29. Description: 采集器相关通用工具函数 Changelog: all notable changes to this file will be documented """ import os import html2text import requests from gne import GeneralNewsExtractor from readability import Document from textrank4zh import TextRank4Keyword from src.config import Config from src.utils import LOGGER def fetch_keyword_list(url_or_text: str = None): """ 获取文本的关键词列表 :param url_or_text: :return: """ if url_or_text.startswith("http"): print(url_or_text) resp = send_get_request(url_or_text) if resp: text = html_to_text_gne(resp.text) else: text = None else: text = url_or_text tr4w = TextRank4Keyword( stop_words_file=os.path.join(Config.BASE_DIR, "model_data/data/stop_words.txt") ) tr4w.analyze(text=text, lower=True, window=2) keyword_list = [] for item in tr4w.get_keywords(20, word_min_len=2): keyword_list.append(item.word) return keyword_list def html_to_text_gne(html: str): """ 从html提取核心内容text :param html: :return: """ extractor = GeneralNewsExtractor() result = extractor.extract(html, noise_node_list=['//div[@class="comment-list"]']) return result.get("content").strip() def html_to_text_h2t(html: str): """ 从html提取核心内容text :param html: :return: """ doc = Document(html) h = html2text.HTML2Text() h.ignore_links = True h.bypass_tables = False h.unicode_snob = False text = h.handle(doc.summary()) return text.strip() def send_get_request(url, params: dict = None, **kwargs): """ 发起GET请求 :param url: 请求目标地址 :param params: 请求参数 :param kwargs: :return: """ try: resp = requests.get(url, params, **kwargs) except Exception as e: resp = None LOGGER.exception(f"请求出错 - {url} - {str(e)}") return resp if __name__ == "__main__": url = "https://mp.weixin.qq.com/s/LKaYM7f7W4DXw7gnQxToKQ" resp = requests.get(url) text = html_to_text_gne(resp.text) print(text) res = fetch_keyword_list(url) print(res)
22.649485
87
0.645881
0
0
0
0
0
0
0
0
720
0.312636
522b2b0fa12d2a1dd5f454984652774d2801eebd
1,886
py
Python
Middleware.py
huobingli/fpyd
b64f3369a3603b61310e57f5fc3d7c5de994dec1
[ "MIT" ]
null
null
null
Middleware.py
huobingli/fpyd
b64f3369a3603b61310e57f5fc3d7c5de994dec1
[ "MIT" ]
null
null
null
Middleware.py
huobingli/fpyd
b64f3369a3603b61310e57f5fc3d7c5de994dec1
[ "MIT" ]
null
null
null
from mysql_comm.mysql_comm import * from redis_comm.redis_comm import * def insert_datas(datas): with UsingMysql(log_time=True) as um: pass def insert_data(database, data): with UsingMysql(log_time=True) as um: sql = "insert into " + database + "(fp_id, fp_title, fp_res_org, fp_report_time, fp_stock_name, fp_stock_code, fp_source_id, fp_is_stock) \ values(%s, %s, %s, %s, %s, %s, %s, %s)" params = ('%s' % data[0], '%s' % data[1], "%s" % data[2], "%s" % data[3], "%s" % data[4], "%s" % data[5], "%s" % data[6], "%d" % data[7]) # print(sql) um.cursor.execute(sql, params) def test_insert_data(): pass def fecth_data(database, condition=""): with UsingMysql(log_time=True) as um: sql = 'select * from %s %s' % (database, condition) print(sql) um.cursor.execute(sql) data_list = um.cursor.fetchall() print('-- 总数: %d' % len(data_list)) return data_list def test_feach_data(): pass def update_data(database, setdata, condition=""): with UsingMysql(log_time=True) as um: sql = "update %s %s %s" % (database, setdata, condition) um.cursor.execute(sql) def test_update_data(): pass def delete_data(database, condition=""): with UsingMysql(log_time=True) as um: sql = 'delete from %s %s' % (database, condition) um.cursor.execute(sql) def redis_set(key, value): with UsingRedis(log_time=True) as ur: ur.set_key_value(key, value) def redis_get(key): with UsingRedis(log_time=True) as ur: return ur.get_key_value(key) def redis_zset_set(set, key, value): with UsingRedis(log_time=True) as ur: ur.zset_set_key_value(set, key, value) def redis_zset_get(key): with UsingRedis(log_time=True) as ur: return ur.zset_get_key_value(key) if __name__ == '__main__': pass
30.918033
148
0.629905
0
0
0
0
0
0
0
0
300
0.15873
522c1f35038d5aefaffebc5bc1b3302ffd6f58fe
61,881
py
Python
tests/llist_test.py
kata198/python-cllist
2d8c0acffd23e7438883759c0abff6fe5425e7c9
[ "MIT" ]
2
2017-04-16T02:09:58.000Z
2018-07-21T21:43:10.000Z
tests/llist_test.py
kata198/python-cllist
2d8c0acffd23e7438883759c0abff6fe5425e7c9
[ "MIT" ]
1
2017-11-29T16:25:53.000Z
2017-11-29T16:25:53.000Z
tests/llist_test.py
kata198/python-cllist
2d8c0acffd23e7438883759c0abff6fe5425e7c9
[ "MIT" ]
1
2017-11-29T20:33:46.000Z
2017-11-29T20:33:46.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import gc import sys import random import unittest from cllist import sllist from cllist import sllistnode from cllist import dllist from cllist import dllistnode gc.set_debug(gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_STATS) if sys.hexversion >= 0x03000000: # python 3 compatibility wrappers def py23_xrange(*args): return range(*args) def py23_range(*args): return list(range(*args)) def cmp(a, b): if a == b: return 0 elif a < b: return -1 else: return 1 else: # python 2 compatibility wrappers def py23_xrange(*args): return xrange(*args) def py23_range(*args): return range(*args) class testsllist(unittest.TestCase): def test_init_empty(self): ll = sllist() self.assertEqual(len(ll), 0) self.assertEqual(ll.size, 0) self.assertEqual(list(ll), []) def test_init_with_sequence(self): ref = py23_range(0, 1024, 4) ll = sllist(ref) self.assertEqual(len(ll), len(ref)) self.assertEqual(ll.size, len(ref)) self.assertEqual(list(ll), ref) def test_init_with_non_sequence(self): self.assertRaises(TypeError, sllist, 1) self.assertRaises(TypeError, sllist, 1.5) def test_str(self): a = sllist([]) self.assertEqual(str(a), 'sllist()') b = sllist([None, 1, 'abc']) self.assertEqual(str(b), 'sllist([None, 1, abc])') def test_repr(self): a = sllist([]) self.assertEqual(repr(a), 'sllist()') b = sllist([None, 1, 'abc']) self.assertEqual(repr(b), 'sllist([None, 1, \'abc\'])') def test_node_str(self): a = sllist([None, None]).first self.assertEqual(str(a), 'sllistnode(None)') b = sllist([1, None]).first self.assertEqual(str(b), 'sllistnode(1)') c = sllist(['abc', None]).first self.assertEqual(str(c), 'sllistnode(abc)') def test_node_repr(self): a = sllist([None]).first self.assertEqual(repr(a), '<sllistnode(None)>') b = sllist([1, None]).first self.assertEqual(repr(b), '<sllistnode(1)>') c = sllist(['abc', None]).first self.assertEqual(repr(c), '<sllistnode(\'abc\')>') def test_cmp(self): a = sllist(py23_xrange(0, 1100)) b = sllist(py23_xrange(0, 1101)) c = sllist([1, 2, 3, 4]) d = sllist([1, 2, 3, 5]) e = sllist([1, 0, 0, 0]) f = sllist([0, 0, 0, 0]) self.assertEqual(cmp(a, a), 0) self.assertEqual(cmp(a, b), -1) self.assertEqual(cmp(b, a), 1) self.assertEqual(cmp(c, d), -1) self.assertEqual(cmp(d, c), 1) self.assertEqual(cmp(e, f), 1) self.assertEqual(cmp(f, e), -1) def test_cmp_nonlist(self): a = sllist(py23_xrange(0, 1100)) b = [py23_xrange(0, 1100)] if sys.hexversion < 0x03000000: # actual order is not specified by language self.assertNotEqual(cmp(a, b), 0) self.assertNotEqual(cmp(b, a), 0) self.assertNotEqual(cmp([], a), 0) self.assertNotEqual(cmp(a, []), 0) def test_eq(self): a = sllist(py23_xrange(0, 1100)) b = sllist(py23_xrange(0, 1101)) c = sllist([1, 2, 3, 4]) d = sllist([1, 2, 3, 5]) e = sllist([1, 0, 0, 0]) f = sllist([0, 0, 0, 0]) self.assertTrue(sllist() == sllist()) self.assertTrue(a == a) self.assertFalse(sllist() == a) self.assertFalse(a == sllist()) self.assertFalse(a == b) self.assertFalse(b == a) self.assertFalse(c == d) self.assertFalse(d == c) self.assertFalse(e == f) self.assertFalse(f == e) def test_ne(self): a = sllist(py23_xrange(0, 1100)) b = sllist(py23_xrange(0, 1101)) c = sllist([1, 2, 3, 4]) d = sllist([1, 2, 3, 5]) e = sllist([1, 0, 0, 0]) f = sllist([0, 0, 0, 0]) self.assertFalse(sllist() != sllist()) self.assertFalse(a != a) self.assertTrue(sllist() != a) self.assertTrue(a != sllist()) self.assertTrue(a != b) self.assertTrue(b != a) self.assertTrue(c != d) self.assertTrue(d != c) self.assertTrue(e != f) self.assertTrue(f != e) def test_lt(self): a = sllist(py23_xrange(0, 1100)) b = sllist(py23_xrange(0, 1101)) c = sllist([1, 2, 3, 4]) d = sllist([1, 2, 3, 5]) e = sllist([1, 0, 0, 0]) f = sllist([0, 0, 0, 0]) self.assertFalse(sllist() < sllist()) self.assertFalse(a < a) self.assertTrue(sllist() < a) self.assertFalse(a < sllist()) self.assertTrue(a < b) self.assertFalse(b < a) self.assertTrue(c < d) self.assertFalse(d < c) self.assertFalse(e < f) self.assertTrue(f < e) def test_gt(self): a = sllist(py23_xrange(0, 1100)) b = sllist(py23_xrange(0, 1101)) c = sllist([1, 2, 3, 4]) d = sllist([1, 2, 3, 5]) e = sllist([1, 0, 0, 0]) f = sllist([0, 0, 0, 0]) self.assertFalse(sllist() > sllist()) self.assertFalse(a > a) self.assertFalse(sllist() > a) self.assertTrue(a > sllist()) self.assertFalse(a > b) self.assertTrue(b > a) self.assertFalse(c > d) self.assertTrue(d > c) self.assertTrue(e > f) self.assertFalse(f > e) def test_le(self): a = sllist(py23_xrange(0, 1100)) b = sllist(py23_xrange(0, 1101)) c = sllist([1, 2, 3, 4]) d = sllist([1, 2, 3, 5]) e = sllist([1, 0, 0, 0]) f = sllist([0, 0, 0, 0]) self.assertTrue(sllist() <= sllist()) self.assertTrue(a <= a) self.assertTrue(sllist() <= a) self.assertFalse(a <= sllist()) self.assertTrue(a <= b) self.assertFalse(b <= a) self.assertTrue(c <= d) self.assertFalse(d <= c) self.assertFalse(e <= f) self.assertTrue(f <= e) def test_ge(self): a = sllist(py23_xrange(0, 1100)) b = sllist(py23_xrange(0, 1101)) c = sllist([1, 2, 3, 4]) d = sllist([1, 2, 3, 5]) e = sllist([1, 0, 0, 0]) f = sllist([0, 0, 0, 0]) self.assertTrue(sllist() >= sllist()) self.assertTrue(a >= a) self.assertFalse(sllist() >= a) self.assertTrue(a >= sllist()) self.assertFalse(a >= b) self.assertTrue(b >= a) self.assertFalse(c >= d) self.assertTrue(d >= c) self.assertTrue(e >= f) self.assertFalse(f >= e) def test_nodeat(self): ref = py23_range(0, 1024, 4) ll = sllist(ref) for idx in py23_xrange(len(ll)): self.assertTrue(isinstance(ll.nodeat(idx), sllistnode)) self.assertEqual(ll.nodeat(idx).value, ref[idx]) for idx in py23_xrange(len(ll)): self.assertTrue(isinstance(ll.nodeat(idx), sllistnode)) self.assertEqual(ll.nodeat(-idx - 1).value, ref[-idx - 1]) self.assertRaises(TypeError, ll.nodeat, None) self.assertRaises(TypeError, ll.nodeat, 'abc') self.assertRaises(IndexError, ll.nodeat, len(ref)) self.assertRaises(IndexError, ll.nodeat, -len(ref) - 1) def test_nodeat_empty(self): ll = sllist() self.assertRaises(TypeError, ll.nodeat, None) self.assertRaises(TypeError, ll.nodeat, 'abc') self.assertRaises(IndexError, ll.nodeat, 0) self.assertRaises(IndexError, ll.nodeat, -1) def test_iter(self): ref = py23_range(0, 1024, 4) ll = sllist(ref) idx = 0 for val in ll: self.assertFalse(isinstance(val, sllistnode)) self.assertEqual(val, ref[idx]) idx += 1 self.assertEqual(idx, len(ref)) def test_iter_empty(self): ll = sllist() count = 0 for val in ll: count += 1 self.assertEqual(count, 0) def test_reversed(self): ref = py23_range(0, 1024, 4) ll = sllist(ref) idx = len(ref) - 1 for val in reversed(ll): self.assertFalse(isinstance(val, sllistnode)) self.assertEqual(val, ref[idx]) idx -= 1 self.assertEqual(idx, -1) def test_reversed_empty(self): ll = sllist() count = 0 for val in reversed(ll): count += 1 self.assertEqual(count, 0) def test_append_left(self): ll = sllist([1, 2, 3, 4]) ll.appendleft(5) self.assertTrue([5, 1, 2, 3, 4], list(ll)) def test_append_right(self): ll = sllist([1, 2, 3, 4]) ll.appendleft(5) self.assertTrue([1, 2, 3, 4, 5], list(ll)) def test_pop_left_from_one_elem(self): ll = sllist(py23_xrange(0, 100)) dd = ll.popleft() self.assertEqual(dd, 0) def test_pop_right_from_one_elem(self): ll = sllist(py23_xrange(0, 100)) dd = ll.popright() self.assertEqual(dd, 99) def test_pop_right_from_n_elem(self): ll = sllist(py23_xrange(0, 100)) dd = ll.popright() self.assertEqual(dd, 99) def test_get_node_at_from_n_elem(self): ll = sllist(py23_xrange(0, 100)) self.assertEqual(50, ll[50]) def test_remove_from_n_elem(self): ll = sllist() nn = sllistnode() ll.append(nn) to_del = ll.nodeat(0) ll.remove(to_del) self.assertEqual(None, None) def test_insert_after(self): ll = sllist([1, 3, '123']) ll.insertafter(100, ll.first) self.assertEqual([1, 100, 3, '123'], list(ll)) def test_insert_before(self): ll = sllist([1, 3, '123']) ll.insertbefore(100, ll.first) self.assertEqual([100, 1, 3, '123'], list(ll)) def test_insert_value_after(self): ll = sllist(py23_xrange(4)) ref = sllist([0, 1, 2, 10, 3]) prev = ll.nodeat(2) next = ll.nodeat(3) arg_node = sllistnode(10) new_node = ll.insertafter(arg_node, ll.nodeat(2)) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10) self.assertEqual(new_node.next, next) self.assertEqual(prev.next, new_node) self.assertEqual(ll, ref) def test_insert_value_after_last(self): ll = sllist(py23_xrange(4)) ref = sllist([0, 1, 2, 3, 10]) prev = ll.nodeat(3) arg_node = sllistnode(10) new_node = ll.insertafter(arg_node, ll.nodeat(-1)) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10) self.assertEqual(new_node.next, None) self.assertEqual(prev.next, new_node) self.assertEqual(new_node, ll.last) self.assertEqual(ll, ref) def test_insert_value_before(self): ll = sllist(py23_xrange(4)) ref = sllist([0, 1, 10, 2, 3]) prev = ll.nodeat(1) next = ll.nodeat(2) arg_node = sllistnode(10) new_node = ll.insertbefore(arg_node, ll.nodeat(2)) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10) self.assertEqual(new_node.next, next) self.assertEqual(prev.next, new_node) self.assertEqual(ll, ref) def test_insert_value_before_first(self): ll = sllist(py23_xrange(4)) ref = sllist([10, 0, 1, 2, 3]) next = ll.nodeat(0) arg_node = sllistnode(10) new_node = ll.insertbefore(arg_node, ll.nodeat(0)) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10) self.assertEqual(new_node.next, next) self.assertEqual(new_node, ll.first) self.assertEqual(ll, ref) def test_insert_invalid_ref(self): ll = sllist([1, 2, 3, 4]) self.assertRaises(TypeError, ll.insertafter, 10, 1) self.assertRaises(TypeError, ll.insertafter, 10, 'abc') self.assertRaises(TypeError, ll.insertafter, 10, []) self.assertRaises(ValueError, ll.insertafter, 10, sllistnode()) self.assertRaises(TypeError, ll.insertbefore, 10, 1) self.assertRaises(TypeError, ll.insertbefore, 10, 'abc') self.assertRaises(TypeError, ll.insertbefore, 10, []) self.assertRaises(ValueError, ll.insertbefore, 10, sllistnode()) def test_append(self): ll = sllist(py23_xrange(4)) ref = sllist([0, 1, 2, 3, 10]) prev = ll.nodeat(-1) arg_node = sllistnode(10) new_node = ll.append(arg_node) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10) self.assertEqual(new_node.next, None) self.assertEqual(prev.next, new_node) self.assertEqual(ll.last, new_node) self.assertEqual(ll, ref) def test_appendleft(self): ll = sllist(py23_xrange(4)) ref = sllist([10, 0, 1, 2, 3]) next = ll.nodeat(0) arg_node = sllistnode(10) new_node = ll.appendleft(arg_node) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10) self.assertEqual(new_node.next, next) self.assertEqual(ll.first, new_node) self.assertEqual(ll, ref) def test_appendright(self): ll = sllist(py23_xrange(4)) ref = sllist([0, 1, 2, 3, 10]) prev = ll.nodeat(-1) arg_node = sllistnode(10) new_node = ll.appendright(arg_node) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10) self.assertEqual(new_node.next, None) self.assertEqual(prev.next, new_node) self.assertEqual(ll.last, new_node) self.assertEqual(ll, ref) def test_extend(self): a_ref = py23_range(0, 1024, 4) b_ref = py23_range(8092, 8092 + 1024, 4) b = sllist(b_ref) ab_ref = sllist(a_ref + b_ref) a = sllist(a_ref) a.extend(b) self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = sllist(a_ref) a.extend(b_ref) self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = sllist(a_ref) a.extend(a) self.assertEqual(a, sllist(a_ref + a_ref)) self.assertEqual(len(a), len(a_ref) * 2) def test_extend_empty(self): filled_ref = py23_range(0, 1024, 4) filled = sllist(filled_ref) empty = sllist() empty.extend(empty) self.assertEqual(empty, sllist([] + [])) self.assertEqual(len(empty), 0) empty = sllist() empty.extend(filled) self.assertEqual(empty, sllist([] + filled_ref)) self.assertEqual(len(empty), len(filled_ref)) empty = sllist() filled.extend(empty) self.assertEqual(filled, sllist(filled_ref + [])) self.assertEqual(len(filled), len(filled_ref)) def test_extendleft(self): a_ref = py23_range(0, 1024, 4) b_ref = py23_range(8092, 8092 + 1024, 4) b = sllist(b_ref) ab_ref = sllist(list(reversed(b_ref)) + a_ref) a = sllist(a_ref) a.extendleft(b) self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = sllist(a_ref) a.extendleft(b_ref) self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = sllist(a_ref) a.extendleft(a) self.assertEqual(a, sllist(list(reversed(a_ref)) + a_ref)) self.assertEqual(len(a), len(a_ref) * 2) def test_extendleft_empty(self): filled_ref = py23_range(0, 1024, 4) filled = sllist(filled_ref) empty = sllist() empty.extendleft(empty) self.assertEqual(empty, sllist([] + [])) self.assertEqual(len(empty), 0) empty = sllist() empty.extendleft(filled) self.assertEqual(empty, sllist(list(reversed(filled_ref)) + [])) self.assertEqual(len(empty), len(filled_ref)) empty = sllist() filled.extendleft(empty) self.assertEqual(filled, sllist(list(reversed([])) + filled_ref)) self.assertEqual(len(filled), len(filled_ref)) def test_extendright(self): a_ref = py23_range(0, 1024, 4) b_ref = py23_range(8092, 8092 + 1024, 4) b = sllist(b_ref) ab_ref = sllist(a_ref + b_ref) a = sllist(a_ref) a.extendright(b) self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = sllist(a_ref) a.extendright(b_ref) self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = sllist(a_ref) a.extendright(a) self.assertEqual(a, sllist(a_ref + a_ref)) self.assertEqual(len(a), len(a_ref) * 2) def test_extendright_empty(self): filled_ref = py23_range(0, 1024, 4) filled = sllist(filled_ref) empty = sllist() empty.extendright(empty) self.assertEqual(empty, sllist([] + [])) self.assertEqual(len(empty), 0) empty = sllist() empty.extendright(filled) self.assertEqual(empty, sllist([] + filled_ref)) self.assertEqual(len(empty), len(filled_ref)) empty = sllist() filled.extendright(empty) self.assertEqual(filled, sllist(filled_ref + [])) self.assertEqual(len(filled), len(filled_ref)) def test_clear_empty(self): empty_list = sllist() empty_list.clear() self.assertEqual(empty_list.first, None) self.assertEqual(empty_list.last, None) self.assertEqual(empty_list.size, 0) self.assertEqual(list(empty_list), []) def test_clear(self): ll = sllist(py23_xrange(0, 1024, 4)) del_node = ll.nodeat(4) ll.clear() self.assertEqual(ll.first, None) self.assertEqual(ll.last, None) self.assertEqual(ll.size, 0) self.assertEqual(list(ll), []) self.assertEqual(del_node.next, None) def test_pop(self): ref = py23_range(0, 1024, 4) ll = sllist(ref) del_node = ll.nodeat(-1) result = ll.pop() self.assertEqual(result, ref[-1]) self.assertEqual(len(ll), len(ref) - 1) self.assertEqual(ll.size, len(ref) - 1) self.assertEqual(ll.last.value, ref[-2]) self.assertEqual(list(ll), ref[:-1]) self.assertEqual(del_node.next, None) ref = py23_range(0, 1024, 4) ll = sllist(ref) result = ll.pop(1) self.assertEqual(result, ref[1]) result = ll.pop(1) self.assertEqual(result, ref[2]) self.assertEqual(ll.size, len(ref)-2) ref = py23_range(0, 1024, 4) ll = sllist(ref) result = ll.pop(0) self.assertEqual(result, ref[0]) self.assertEqual(ll.first.value, ref[1]) for i in range(len(ll)): result = ll.pop(0) self.assertEqual(result, ref[i+1]) self.assertEqual(ll.first, None) self.assertEqual(ll.last, None) ref = py23_range(0, 1024, 4) ll = sllist(ref) i = len(ll)-1 while i >= 0: result = ll.pop(i) self.assertEqual(result, ref[i]) i -= 1 self.assertEqual(ll.first, None) self.assertEqual(ll.last, None) def test_slice(self): lst = list(range(100)) slst = sllist(lst) self.assertEqual(lst[0:20], list(slst[0:20])) self.assertEqual(lst[40:60], list(slst[40:60])) self.assertEqual(lst[60:40], list(slst[60:40])) self.assertEqual(lst[:-1], list(slst[:-1])) self.assertEqual(lst[-20:], list(slst[-20:])) self.assertEqual(lst[-20:-5], list(slst[-20:-5])) self.assertEqual(lst[-5:-20], list(slst[-5:-20])) self.assertEqual(lst[-70:50], list(slst[-70:50])) self.assertEqual(lst[5:500], list(slst[5:500])) self.assertEqual(lst[:], list(slst[:])) smlst = list(range(8)) smslst = sllist(smlst) self.assertEqual(smlst[2:5], list(smslst[2:5])) self.assertEqual(smlst[-3:-1], list(smslst[-3:-1])) for i in range(100): for j in range(100): try: self.assertEqual(lst[i:j], list(slst[i:j])) except AssertionError as ae: import pdb; pdb.set_trace() sys.stderr.write("Failed on [ %d : %d ]\n" %(i, j)) raise ae # Test if version of python (2.7+ , 3.? + ) supports step in slices try: lst[0:10:2] except: # If not supported, test is over return self.assertEqual(lst[0:20:2], list(slst[0:20:2])) self.assertEqual(lst[0:21:2], list(slst[0:21:2])) self.assertEqual(lst[50:80:6], list(slst[50:80:6])) for i in range(30): for j in range(30): for s in range(1, 30, 1): try: self.assertEqual(lst[i:j:s], list(slst[i:j:s])) except AssertionError as ae: sys.stderr.write("Failed on [ %d : %d : %d ]\n" %(i, j, s)) raise ae def test_popleft(self): ref = py23_range(0, 1024, 4) ll = sllist(ref) del_node = ll.nodeat(0) result = ll.popleft() self.assertEqual(result, ref[0]) self.assertEqual(len(ll), len(ref) - 1) self.assertEqual(ll.size, len(ref) - 1) self.assertEqual(ll.first.value, ref[1]) self.assertEqual(list(ll), ref[1:]) self.assertEqual(del_node.next, None) def test_popright(self): ref = py23_range(0, 1024, 4) ll = sllist(ref) del_node = ll.nodeat(-1) result = ll.popright() self.assertEqual(result, ref[-1]) self.assertEqual(len(ll), len(ref) - 1) self.assertEqual(ll.size, len(ref) - 1) self.assertEqual(ll.last.value, ref[-2]) self.assertEqual(list(ll), ref[:-1]) self.assertEqual(del_node.next, None) def test_pop_from_empty_list(self): ll = sllist() self.assertRaises(ValueError, ll.pop) self.assertRaises(ValueError, ll.popleft) self.assertRaises(ValueError, ll.popright) def test_remove(self): ref = py23_range(0, 1024, 4) ll = sllist(ref) prev_node = ll.nodeat(3) del_node = ll.nodeat(4) next_node = ll.nodeat(5) result = ll.remove(del_node) ref_result = ref[4] del ref[4] self.assertEqual(list(ll), ref) self.assertEqual(result, ref_result) self.assertEqual(len(ll), len(ref)) self.assertEqual(ll.size, len(ref)) self.assertEqual(prev_node.next, next_node) self.assertEqual(del_node.next, None) def test_remove_from_empty_list(self): ll = sllist() self.assertRaises(ValueError, ll.remove, sllistnode()) def test_remove_invalid_node(self): ll = sllist([1, 2, 3, 4]) self.assertRaises(ValueError, ll.remove, sllistnode()) def test_remove_already_deleted_node(self): ll = sllist([1, 2, 3, 4]) node = ll.nodeat(2) ll.remove(node) self.assertRaises(ValueError, ll.remove, node) def test_rotate_left(self): for n in py23_xrange(128): ref = py23_range(32) split = n % len(ref) ref_result = ref[split:] + ref[:split] ll = sllist(ref) new_first = ll.nodeat(split) new_last = ll.nodeat(split - 1) ll.rotate(-n) self.assertEqual(list(ll), ref_result) self.assertEqual(ll.first, new_first) self.assertEqual(ll.last, new_last) self.assertEqual(ll.size, len(ref)) self.assertEqual(ll.last.next, None) def test_rotate_right(self): for n in py23_xrange(128): ref = py23_range(32) split = n % len(ref) ref_result = ref[-split:] + ref[:-split] ll = sllist(ref) new_first = ll.nodeat(-split) last_idx = -split - 1 new_last = ll.nodeat(last_idx) ll.rotate(n) self.assertEqual(list(ll), ref_result) self.assertEqual(ll.first, new_first) self.assertEqual(ll.last, new_last) self.assertEqual(ll.size, len(ref)) self.assertEqual(ll.last.next, None) def test_rotate_left_empty(self): for n in py23_xrange(4): ll = sllist() ll.rotate(-n) self.assertEqual(ll.first, None) self.assertEqual(ll.last, None) self.assertEqual(ll.size, 0) def test_rotate_right_empty(self): for n in py23_xrange(4): ll = sllist() ll.rotate(n) self.assertEqual(ll.first, None) self.assertEqual(ll.last, None) self.assertEqual(ll.size, 0) def test_getitem(self): ref = py23_range(0, 1024, 4) ll = sllist(ref) for idx in py23_xrange(len(ll)): self.assertFalse(isinstance(ll[idx], sllistnode)) self.assertEqual(ll[idx], ref[idx]) for idx in py23_xrange(len(ll)): self.assertFalse(isinstance(ll[idx], sllistnode)) self.assertEqual(ll[-idx - 1], ref[-idx - 1]) self.assertRaises(TypeError, ll.__getitem__, None) self.assertRaises(TypeError, ll.__getitem__, 'abc') self.assertRaises(IndexError, ll.__getitem__, len(ref)) self.assertRaises(IndexError, ll.__getitem__, -len(ref) - 1) def test_getitem_empty(self): ll = sllist() self.assertRaises(TypeError, ll.__getitem__, None) self.assertRaises(TypeError, ll.__getitem__, 'abc') self.assertRaises(IndexError, ll.__getitem__, 0) self.assertRaises(IndexError, ll.__getitem__, -1) def test_del(self): ref = py23_range(0, 1024, 4) ll = sllist(ref) del ll[0] del ref[0] self.assertEqual(list(ll), ref) del ll[len(ll) - 1] del ref[len(ref) - 1] self.assertEqual(list(ll), ref) del ll[(len(ll) - 1) // 2] del ref[(len(ref) - 1) // 2] self.assertEqual(list(ll), ref) def del_item(idx): del ll[idx] self.assertRaises(IndexError, del_item, len(ll)) for i in py23_xrange(len(ll)): del ll[0] self.assertEqual(len(ll), 0) def test_concat(self): a_ref = py23_range(0, 1024, 4) a = sllist(a_ref) b_ref = py23_range(8092, 8092 + 1024, 4) b = sllist(b_ref) ab_ref = sllist(a_ref + b_ref) c = a + b self.assertEqual(c, ab_ref) self.assertEqual(len(c), len(ab_ref)) c = a + b_ref self.assertEqual(c, ab_ref) self.assertEqual(len(c), len(ab_ref)) def test_concat_empty(self): empty = sllist() filled_ref = py23_range(0, 1024, 4) filled = sllist(filled_ref) res = empty + empty self.assertEqual(res, sllist([] + [])) self.assertEqual(len(res), 0) res = empty + filled self.assertEqual(res, sllist([] + filled_ref)) self.assertEqual(len(res), len(filled_ref)) res = filled + empty self.assertEqual(res, sllist(filled_ref + [])) self.assertEqual(len(res), len(filled_ref)) def test_concat_inplace(self): a_ref = py23_range(0, 1024, 4) b_ref = py23_range(8092, 8092 + 1024, 4) b = sllist(b_ref) ab_ref = sllist(a_ref + b_ref) a = sllist(a_ref) a += b self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = sllist(a_ref) a += b_ref self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = sllist(a_ref) a += a self.assertEqual(a, sllist(a_ref + a_ref)) self.assertEqual(len(a), len(ab_ref)) def test_concat_inplace_empty(self): filled_ref = py23_range(0, 1024, 4) filled = sllist(filled_ref) empty = sllist() empty += empty self.assertEqual(empty, sllist([] + [])) self.assertEqual(len(empty), 0) empty = sllist() empty += filled self.assertEqual(empty, sllist([] + filled_ref)) self.assertEqual(len(empty), len(filled_ref)) empty = sllist() filled += empty self.assertEqual(filled, sllist(filled_ref + [])) self.assertEqual(len(filled), len(filled_ref)) def test_index(self): lst = [1, 5, 10, 5, 9] sl = sllist(lst) self.assertEqual(sl.index(1), 0) self.assertEqual(sl.index(5), 1) self.assertEqual(sl.rindex(5), 3) self.assertEqual(sl.rindex(9), 4) gotException = False try: sl.index(2) except ValueError: gotException = True self.assertEqual(gotException, True) def test_contains(self): lst = [1, 5, 7] sl = sllist(lst) self.assertEqual(5 in sl, True) self.assertEqual(1 in sl, True) self.assertEqual(7 in sl, True) self.assertEqual(8 in sl, False) def test_repeat(self): ref = py23_range(0, 1024, 4) ll = sllist(ref) self.assertEqual(ll * 4, sllist(ref * 4)) def test_repeat_empty(self): ll = sllist() self.assertEqual(ll * 4, sllist([] * 4)) def test_repeat_inplace(self): ref = py23_range(0, 1024, 4) ll = sllist(ref) ll *= 4 self.assertEqual(ll, sllist(ref * 4)) def test_repeat_inplace_empty(self): ll = sllist() ll *= 4 self.assertEqual(ll, sllist([] * 4)) def test_list_readonly_attributes(self): if sys.hexversion >= 0x03000000: expected_error = AttributeError else: expected_error = TypeError ll = sllist(py23_range(4)) self.assertRaises(expected_error, setattr, ll, 'first', None) self.assertRaises(expected_error, setattr, ll, 'last', None) self.assertRaises(expected_error, setattr, ll, 'size', None) def test_node_readonly_attributes(self): if sys.hexversion >= 0x03000000: expected_error = AttributeError else: expected_error = TypeError ll = sllistnode() self.assertRaises(expected_error, setattr, ll, 'next', None) # COMMENTED BECAUSE HASH DOES NOT WORK # def test_list_hash(self): # self.assertEqual(hash(sllist()), hash(sllist())) # self.assertEqual(hash(sllist(py23_range(0, 1024, 4))), # hash(sllist(py23_range(0, 1024, 4)))) # self.assertEqual(hash(sllist([0, 2])), hash(sllist([0.0, 2.0]))) class testdllist(unittest.TestCase): def test_init_empty(self): ll = dllist() self.assertEqual(len(ll), 0) self.assertEqual(ll.size, 0) self.assertEqual(list(ll), []) def test_init_with_sequence(self): ref = py23_range(0, 1024, 4) ll = dllist(ref) self.assertEqual(len(ll), len(ref)) self.assertEqual(ll.size, len(ref)) self.assertEqual(list(ll), ref) def test_init_with_non_sequence(self): self.assertRaises(TypeError, dllist, None); self.assertRaises(TypeError, dllist, 1); self.assertRaises(TypeError, dllist, 1.5); def test_str(self): a = dllist([]) self.assertEqual(str(a), 'dllist()') b = dllist([None, 1, 'abc']) self.assertEqual(str(b), 'dllist([None, 1, abc])') def test_repr(self): a = dllist([]) self.assertEqual(repr(a), 'dllist()') b = dllist([None, 1, 'abc']) self.assertEqual(repr(b), 'dllist([None, 1, \'abc\'])') def test_node_str(self): a = dllist([None, None]).first self.assertEqual(str(a), 'dllistnode(None)') b = dllist([1, None]).first self.assertEqual(str(b), 'dllistnode(1)') c = dllist(['abc', None]).first self.assertEqual(str(c), 'dllistnode(abc)') def test_node_repr(self): a = dllist([None]).first self.assertEqual(repr(a), '<dllistnode(None)>') b = dllist([1, None]).first self.assertEqual(repr(b), '<dllistnode(1)>') c = dllist(['abc', None]).first self.assertEqual(repr(c), '<dllistnode(\'abc\')>') def test_cmp(self): a = dllist(py23_xrange(0, 1100)) b = dllist(py23_xrange(0, 1101)) c = dllist([1, 2, 3, 4]) d = dllist([1, 2, 3, 5]) e = dllist([1, 0, 0, 0]) f = dllist([0, 0, 0, 0]) self.assertEqual(cmp(a, a), 0) self.assertEqual(cmp(a, b), -1) self.assertEqual(cmp(b, a), 1) self.assertEqual(cmp(c, d), -1) self.assertEqual(cmp(d, c), 1) self.assertEqual(cmp(e, f), 1) self.assertEqual(cmp(f, e), -1) def test_cmp_nonlist(self): a = dllist(py23_xrange(0, 1100)) b = [py23_xrange(0, 1100)] if sys.hexversion < 0x03000000: # actual order is not specified by language self.assertNotEqual(cmp(a, b), 0) self.assertNotEqual(cmp(b, a), 0) self.assertNotEqual(cmp([], a), 0) self.assertNotEqual(cmp(a, []), 0) def test_eq(self): a = dllist(py23_xrange(0, 1100)) b = dllist(py23_xrange(0, 1101)) c = dllist([1, 2, 3, 4]) d = dllist([1, 2, 3, 5]) e = dllist([1, 0, 0, 0]) f = dllist([0, 0, 0, 0]) self.assertTrue(dllist() == dllist()) self.assertTrue(a == a) self.assertFalse(dllist() == a) self.assertFalse(a == dllist()) self.assertFalse(a == b) self.assertFalse(b == a) self.assertFalse(c == d) self.assertFalse(d == c) self.assertFalse(e == f) self.assertFalse(f == e) def test_ne(self): a = dllist(py23_xrange(0, 1100)) b = dllist(py23_xrange(0, 1101)) c = dllist([1, 2, 3, 4]) d = dllist([1, 2, 3, 5]) e = dllist([1, 0, 0, 0]) f = dllist([0, 0, 0, 0]) self.assertFalse(dllist() != dllist()) self.assertFalse(a != a) self.assertTrue(dllist() != a) self.assertTrue(a != dllist()) self.assertTrue(a != b) self.assertTrue(b != a) self.assertTrue(c != d) self.assertTrue(d != c) self.assertTrue(e != f) self.assertTrue(f != e) def test_lt(self): a = dllist(py23_xrange(0, 1100)) b = dllist(py23_xrange(0, 1101)) c = dllist([1, 2, 3, 4]) d = dllist([1, 2, 3, 5]) e = dllist([1, 0, 0, 0]) f = dllist([0, 0, 0, 0]) self.assertFalse(dllist() < dllist()) self.assertFalse(a < a) self.assertTrue(dllist() < a) self.assertFalse(a < dllist()) self.assertTrue(a < b) self.assertFalse(b < a) self.assertTrue(c < d) self.assertFalse(d < c) self.assertFalse(e < f) self.assertTrue(f < e) def test_gt(self): a = dllist(py23_xrange(0, 1100)) b = dllist(py23_xrange(0, 1101)) c = dllist([1, 2, 3, 4]) d = dllist([1, 2, 3, 5]) e = dllist([1, 0, 0, 0]) f = dllist([0, 0, 0, 0]) self.assertFalse(dllist() > dllist()) self.assertFalse(a > a) self.assertFalse(dllist() > a) self.assertTrue(a > dllist()) self.assertFalse(a > b) self.assertTrue(b > a) self.assertFalse(c > d) self.assertTrue(d > c) self.assertTrue(e > f) self.assertFalse(f > e) def test_le(self): a = dllist(py23_xrange(0, 1100)) b = dllist(py23_xrange(0, 1101)) c = dllist([1, 2, 3, 4]) d = dllist([1, 2, 3, 5]) e = dllist([1, 0, 0, 0]) f = dllist([0, 0, 0, 0]) self.assertTrue(dllist() <= dllist()) self.assertTrue(a <= a) self.assertTrue(dllist() <= a) self.assertFalse(a <= dllist()) self.assertTrue(a <= b) self.assertFalse(b <= a) self.assertTrue(c <= d) self.assertFalse(d <= c) self.assertFalse(e <= f) self.assertTrue(f <= e) def test_ge(self): a = dllist(py23_xrange(0, 1100)) b = dllist(py23_xrange(0, 1101)) c = dllist([1, 2, 3, 4]) d = dllist([1, 2, 3, 5]) e = dllist([1, 0, 0, 0]) f = dllist([0, 0, 0, 0]) self.assertTrue(dllist() >= dllist()) self.assertTrue(a >= a) self.assertFalse(dllist() >= a) self.assertTrue(a >= dllist()) self.assertFalse(a >= b) self.assertTrue(b >= a) self.assertFalse(c >= d) self.assertTrue(d >= c) self.assertTrue(e >= f) self.assertFalse(f >= e) def test_nodeat(self): ref = py23_range(0, 1024, 4) ll = dllist(ref) for idx in py23_xrange(len(ll)): self.assertTrue(isinstance(ll.nodeat(idx), dllistnode)) self.assertEqual(ll.nodeat(idx).value, ref[idx]) for idx in py23_xrange(len(ll)): self.assertTrue(isinstance(ll.nodeat(idx), dllistnode)) self.assertEqual(ll.nodeat(-idx - 1).value, ref[-idx - 1]) self.assertRaises(TypeError, ll.nodeat, None) self.assertRaises(TypeError, ll.nodeat, 'abc') self.assertRaises(IndexError, ll.nodeat, len(ref)) self.assertRaises(IndexError, ll.nodeat, -len(ref) - 1) def test_nodeat_empty(self): ll = dllist() self.assertRaises(TypeError, ll.nodeat, None) self.assertRaises(TypeError, ll.nodeat, 'abc') self.assertRaises(IndexError, ll.nodeat, 0) self.assertRaises(IndexError, ll.nodeat, -1) def test_iter(self): ref = py23_range(0, 1024, 4) ll = dllist(ref) idx = 0 for val in ll: self.assertFalse(isinstance(val, dllistnode)) self.assertEqual(val, ref[idx]) idx += 1 self.assertEqual(idx, len(ref)) def test_iter_empty(self): ll = dllist() count = 0 for val in ll: count += 1 self.assertEqual(count, 0) def test_reversed(self): ref = py23_range(0, 1024, 4) ll = dllist(ref) idx = len(ref) - 1 for val in reversed(ll): self.assertFalse(isinstance(val, dllistnode)) self.assertEqual(val, ref[idx]) idx -= 1 self.assertEqual(idx, -1) def test_reversed_empty(self): ll = dllist() count = 0 for val in reversed(ll): count += 1 self.assertEqual(count, 0) def test_insert_value(self): ll = dllist(py23_xrange(4)) ref = dllist([0, 1, 2, 3, 10]) prev = ll.nodeat(-1) arg_node = dllistnode(10) new_node = ll.insert(arg_node) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10) self.assertEqual(new_node.prev, prev) self.assertEqual(new_node.next, None) self.assertEqual(prev.next, new_node) self.assertEqual(new_node, ll.last) self.assertEqual(ll, ref) def test_insert_value_before(self): ll = dllist(py23_xrange(4)) ref = dllist([0, 1, 10, 2, 3]) prev = ll.nodeat(1) next = ll.nodeat(2) arg_node = dllistnode(10) new_node = ll.insert(arg_node, ll.nodeat(2)) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10) self.assertEqual(new_node.prev, prev) self.assertEqual(new_node.next, next) self.assertEqual(prev.next, new_node) self.assertEqual(next.prev, new_node) self.assertEqual(ll, ref) def test_insert_value_before_first(self): ll = dllist(py23_xrange(4)) ref = dllist([10, 0, 1, 2, 3]) next = ll.nodeat(0) arg_node = dllistnode(10) new_node = ll.insert(arg_node, ll.nodeat(0)) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10) self.assertEqual(new_node.prev, None) self.assertEqual(new_node.next, next) self.assertEqual(next.prev, new_node) self.assertEqual(new_node, ll.first) self.assertEqual(ll, ref) def test_insert_invalid_ref(self): ll = dllist() self.assertRaises(TypeError, ll.insert, 10, 1) self.assertRaises(TypeError, ll.insert, 10, 'abc') self.assertRaises(TypeError, ll.insert, 10, []) self.assertRaises(ValueError, ll.insert, 10, dllistnode()) def test_append(self): ll = dllist(py23_xrange(4)) ref = dllist([0, 1, 2, 3, 10]) prev = ll.nodeat(-1) arg_node = dllistnode(10) new_node = ll.append(arg_node) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10); self.assertEqual(new_node.prev, prev) self.assertEqual(new_node.next, None) self.assertEqual(prev.next, new_node) self.assertEqual(ll.last, new_node) self.assertEqual(ll, ref) def test_appendleft(self): ll = dllist(py23_xrange(4)) ref = dllist([10, 0, 1, 2, 3]) next = ll.nodeat(0) arg_node = dllistnode(10) new_node = ll.appendleft(arg_node) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10); self.assertEqual(new_node.prev, None) self.assertEqual(new_node.next, next) self.assertEqual(next.prev, new_node) self.assertEqual(ll.first, new_node) self.assertEqual(ll, ref) def test_appendright(self): ll = dllist(py23_xrange(4)) ref = dllist([0, 1, 2, 3, 10]) prev = ll.nodeat(-1) arg_node = dllistnode(10) new_node = ll.appendright(arg_node) self.assertNotEqual(new_node, arg_node) self.assertEqual(new_node.value, 10); self.assertEqual(new_node.prev, prev) self.assertEqual(new_node.next, None) self.assertEqual(prev.next, new_node) self.assertEqual(ll.last, new_node) self.assertEqual(ll, ref) def test_extend(self): a_ref = py23_range(0, 1024, 4) b_ref = py23_range(8092, 8092 + 1024, 4) b = dllist(b_ref) ab_ref = dllist(a_ref + b_ref) a = dllist(a_ref) a.extend(b) self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = dllist(a_ref) a.extend(b_ref) self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = dllist(a_ref) a.extend(a) self.assertEqual(a, dllist(a_ref + a_ref)) self.assertEqual(len(a), len(a_ref) * 2) def test_extend_empty(self): filled_ref = py23_range(0, 1024, 4) filled = dllist(filled_ref) empty = dllist() empty.extend(empty) self.assertEqual(empty, dllist([] + [])) self.assertEqual(len(empty), 0) empty = dllist() empty.extend(filled) self.assertEqual(empty, dllist([] + filled_ref)) self.assertEqual(len(empty), len(filled_ref)) empty = dllist() filled.extend(empty) self.assertEqual(filled, dllist(filled_ref + [])) self.assertEqual(len(filled), len(filled_ref)) def test_extendleft(self): a_ref = py23_range(0, 1024, 4) b_ref = py23_range(8092, 8092 + 1024, 4) b = dllist(b_ref) ab_ref = dllist(list(reversed(b_ref)) + a_ref) a = dllist(a_ref) a.extendleft(b) self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = dllist(a_ref) a.extendleft(b_ref) self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = dllist(a_ref) a.extendleft(a) self.assertEqual(a, dllist(list(reversed(a_ref)) + a_ref)) self.assertEqual(len(a), len(a_ref) * 2) def test_extendleft_empty(self): filled_ref = py23_range(0, 1024, 4) filled = dllist(filled_ref) empty = dllist() empty.extendleft(empty) self.assertEqual(empty, dllist([] + [])) self.assertEqual(len(empty), 0) empty = dllist() empty.extendleft(filled) self.assertEqual(empty, dllist(list(reversed(filled_ref)) + [])) self.assertEqual(len(empty), len(filled_ref)) empty = dllist() filled.extendleft(empty) self.assertEqual(filled, dllist(list(reversed([])) + filled_ref)) self.assertEqual(len(filled), len(filled_ref)) def test_extendright(self): a_ref = py23_range(0, 1024, 4) b_ref = py23_range(8092, 8092 + 1024, 4) b = dllist(b_ref) ab_ref = dllist(a_ref + b_ref) a = dllist(a_ref) a.extendright(b) self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = dllist(a_ref) a.extendright(b_ref) self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = dllist(a_ref) a.extendright(a) self.assertEqual(a, dllist(a_ref + a_ref)) self.assertEqual(len(a), len(a_ref) * 2) def test_extendright_empty(self): filled_ref = py23_range(0, 1024, 4) filled = dllist(filled_ref) empty = dllist() empty.extendright(empty) self.assertEqual(empty, dllist([] + [])) self.assertEqual(len(empty), 0) empty = dllist() empty.extendright(filled) self.assertEqual(empty, dllist([] + filled_ref)) self.assertEqual(len(empty), len(filled_ref)) empty = dllist() filled.extendright(empty) self.assertEqual(filled, dllist(filled_ref + [])) self.assertEqual(len(filled), len(filled_ref)) def test_clear_empty(self): empty_list = dllist() empty_list.clear() self.assertEqual(empty_list.first, None) self.assertEqual(empty_list.last, None) self.assertEqual(empty_list.size, 0) self.assertEqual(list(empty_list), []) def test_clear(self): ll = dllist(py23_xrange(0, 1024, 4)) del_node = ll.nodeat(4) ll.clear() self.assertEqual(ll.first, None) self.assertEqual(ll.last, None) self.assertEqual(ll.size, 0) self.assertEqual(list(ll), []) self.assertEqual(del_node.prev, None) self.assertEqual(del_node.next, None) def test_pop(self): ref = py23_range(0, 1024, 4) ll = dllist(ref) del_node = ll.nodeat(-1) result = ll.pop(); self.assertEqual(result, ref[-1]) self.assertEqual(len(ll), len(ref) - 1) self.assertEqual(ll.size, len(ref) - 1) self.assertEqual(ll.last.value, ref[-2]) self.assertEqual(list(ll), ref[:-1]) self.assertEqual(del_node.prev, None) self.assertEqual(del_node.next, None) ref = py23_range(0, 1024, 4) ll = dllist(ref) #import pdb; pdb.set_trace() result = ll.pop(1) self.assertEqual(result, ref[1]) result = ll.pop(1) self.assertEqual(result, ref[2]) self.assertEqual(ll.size, len(ref)-2) secondNode = ll.nodeat(1) self.assertEquals(secondNode.prev, ll.first) self.assertEquals(ll.first.prev, None) ref = py23_range(0, 1024, 4) ll = dllist(ref) result = ll.pop(0) self.assertEqual(result, ref[0]) self.assertEqual(ll.first.value, ref[1]) for i in range(len(ll)): result = ll.pop(0) self.assertEqual(result, ref[i+1]) self.assertEqual(ll.first, None) self.assertEqual(ll.last, None) ref = py23_range(0, 1024, 4) ll = dllist(ref) i = len(ll) - 1 while i >= 0: result = ll.pop(i) self.assertEqual(result, ref[i]) i -= 1 self.assertEqual(ll.first, None) self.assertEqual(ll.last, None) ref = py23_range(0, 1024, 4) lastIdx = list(ref).index(ref[-1]) allIndexes = list(range(lastIdx+1)) random.shuffle(allIndexes) ll = dllist(ref) while allIndexes: # print ( "Popping %d out of %d indexes. Value: %s\n\tFirst=%s\n\tMiddle=%s\n\tLast=%s\n\tSize=%d\n" %(allIndexes[0], len(allIndexes), str(ll[allIndexes[0]]), ll.first, ll.middle, ll.last, ll.size)) nextIndex = allIndexes.pop(0) listAccessValue = ll[nextIndex] poppedValue = ll.pop(nextIndex) self.assertEquals(listAccessValue, poppedValue) for i in range(len(allIndexes)): if allIndexes[i] > nextIndex: allIndexes[i] -= 1 self.assertEqual(ll.first, None) self.assertEqual(ll.last, None) def test_popleft(self): ref = py23_range(0, 1024, 4) ll = dllist(ref) del_node = ll.nodeat(0) result = ll.popleft() self.assertEqual(result, ref[0]) self.assertEqual(len(ll), len(ref) - 1) self.assertEqual(ll.size, len(ref) - 1) self.assertEqual(ll.first.value, ref[1]) self.assertEqual(list(ll), ref[1:]) self.assertEqual(del_node.prev, None) self.assertEqual(del_node.next, None) def test_popright(self): ref = py23_range(0, 1024, 4) ll = dllist(ref) del_node = ll.nodeat(-1) result = ll.popright() self.assertEqual(result, ref[-1]) self.assertEqual(len(ll), len(ref) - 1) self.assertEqual(ll.size, len(ref) - 1) self.assertEqual(ll.last.value, ref[-2]) self.assertEqual(list(ll), ref[:-1]) self.assertEqual(del_node.prev, None) self.assertEqual(del_node.next, None) def test_pop_from_empty_list(self): ll = dllist() self.assertRaises(ValueError, ll.pop) self.assertRaises(ValueError, ll.popleft) self.assertRaises(ValueError, ll.popright) def test_remove(self): ref = py23_range(0, 1024, 4) ll = dllist(ref) prev_node = ll.nodeat(3) del_node = ll.nodeat(4) next_node = ll.nodeat(5) result = ll.remove(del_node) ref_result = ref[4] del ref[4] self.assertEqual(list(ll), ref) self.assertEqual(result, ref_result) self.assertEqual(len(ll), len(ref)) self.assertEqual(ll.size, len(ref)) self.assertEqual(prev_node.next, next_node) self.assertEqual(next_node.prev, prev_node) self.assertEqual(del_node.prev, None) self.assertEqual(del_node.next, None) def test_remove_from_empty_list(self): ll = dllist() self.assertRaises(ValueError, ll.remove, dllistnode()) def test_remove_invalid_node(self): ll = dllist([1, 2, 3, 4]) self.assertRaises(ValueError, ll.remove, dllistnode()) def test_remove_already_deleted_node(self): ll = dllist([1, 2, 3, 4]) node = ll.nodeat(2) ll.remove(node) self.assertRaises(ValueError, ll.remove, node) def test_rotate_left(self): for n in py23_xrange(128): ref = py23_range(32) split = n % len(ref) ref_result = ref[split:] + ref[:split] ll = dllist(ref) new_first = ll.nodeat(split) new_last = ll.nodeat(split - 1) # touch future middle element to initialize cache cached_idx = (len(ll) // 2 + n) % len(ll) ll[cached_idx] ll.rotate(-n) self.assertEqual(list(ll), ref_result) self.assertEqual(ll.first, new_first) self.assertEqual(ll.last, new_last) self.assertEqual(ll.size, len(ref)) self.assertEqual(ll.first.prev, None) self.assertEqual(ll.first.next.prev, ll.first) self.assertEqual(ll.last.next, None) self.assertEqual(ll.last.prev.next, ll.last) # check if cached index is updated correctly self.assertEqual(ll[len(ll) // 2], ref_result[len(ref_result) // 2]) def test_rotate_right(self): for n in py23_xrange(128): ref = py23_range(32) split = n % len(ref) ref_result = ref[-split:] + ref[:-split] ll = dllist(ref) new_first = ll.nodeat(-split) last_idx = -split - 1 new_last = ll.nodeat(last_idx) # touch future middle element to initialize cache cached_idx = len(ll) - (len(ll) // 2 + n) % len(ll) - 1 ll[cached_idx] ll.rotate(n) self.assertEqual(list(ll), ref_result) self.assertEqual(ll.first, new_first) self.assertEqual(ll.last, new_last) self.assertEqual(ll.size, len(ref)) self.assertEqual(ll.first.prev, None) self.assertEqual(ll.first.next.prev, ll.first) self.assertEqual(ll.last.next, None) self.assertEqual(ll.last.prev.next, ll.last) # check if cached index is updated correctly self.assertEqual(ll[len(ll) // 2], ref_result[len(ref_result) // 2]) def test_rotate_left_empty(self): for n in py23_xrange(4): ll = dllist() ll.rotate(-n) self.assertEqual(ll.first, None) self.assertEqual(ll.last, None) self.assertEqual(ll.size, 0) def test_rotate_right_empty(self): for n in py23_xrange(4): ll = dllist() ll.rotate(n) self.assertEqual(ll.first, None) self.assertEqual(ll.last, None) self.assertEqual(ll.size, 0) def test_getitem(self): ref = py23_range(0, 1024, 4) ll = dllist(ref) for idx in py23_xrange(len(ll)): self.assertFalse(isinstance(ll[idx], dllistnode)) self.assertEqual(ll[idx], ref[idx]) for idx in py23_xrange(len(ll)): self.assertFalse(isinstance(ll[idx], dllistnode)) self.assertEqual(ll[-idx - 1], ref[-idx - 1]) self.assertRaises(TypeError, ll.__getitem__, None) self.assertRaises(TypeError, ll.__getitem__, 'abc') self.assertRaises(IndexError, ll.__getitem__, len(ref)) self.assertRaises(IndexError, ll.__getitem__, -len(ref) - 1) def test_getitem_empty(self): ll = dllist() self.assertRaises(TypeError, ll.__getitem__, None) self.assertRaises(TypeError, ll.__getitem__, 'abc') self.assertRaises(IndexError, ll.__getitem__, 0) self.assertRaises(IndexError, ll.__getitem__, -1) def test_del(self): ref = py23_range(0, 1024, 4) ll = dllist(ref) del ll[0] del ref[0] self.assertEqual(list(ll), ref) del ll[len(ll) - 1] del ref[len(ref) - 1] self.assertEqual(list(ll), ref) del ll[(len(ll) - 1) // 2] del ref[(len(ref) - 1) // 2] self.assertEqual(list(ll), ref) def del_item(idx): del ll[idx] self.assertRaises(IndexError, del_item, len(ll)) for i in py23_xrange(len(ll)): del ll[0] self.assertEqual(len(ll), 0) def test_concat(self): a_ref = py23_range(0, 1024, 4) a = dllist(a_ref) b_ref = py23_range(8092, 8092 + 1024, 4) b = dllist(b_ref) ab_ref = dllist(a_ref + b_ref) c = a + b self.assertEqual(c, ab_ref) self.assertEqual(len(c), len(ab_ref)) c = a + b_ref self.assertEqual(c, ab_ref) self.assertEqual(len(c), len(a_ref) * 2) def test_concat_empty(self): empty = dllist() filled_ref = py23_range(0, 1024, 4) filled = dllist(filled_ref) res = empty + empty self.assertEqual(res, dllist([] + [])) self.assertEqual(len(res), 0) res = empty + filled self.assertEqual(res, dllist([] + filled_ref)) self.assertEqual(len(res), len(filled_ref)) res = filled + empty self.assertEqual(res, dllist(filled_ref + [])) self.assertEqual(len(res), len(filled_ref)) def test_concat_inplace(self): a_ref = py23_range(0, 1024, 4) b_ref = py23_range(8092, 8092 + 1024, 4) b = dllist(b_ref) ab_ref = dllist(a_ref + b_ref) a = dllist(a_ref) a += b self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = dllist(a_ref) a += b_ref self.assertEqual(a, ab_ref) self.assertEqual(len(a), len(ab_ref)) a = dllist(a_ref) a += a self.assertEqual(a, dllist(a_ref + a_ref)) self.assertEqual(len(a), len(a_ref) * 2) def test_concat_inplace_empty(self): filled_ref = py23_range(0, 1024, 4) filled = dllist(filled_ref) empty = dllist() empty += empty self.assertEqual(empty, dllist([] + [])) self.assertEqual(len(empty), 0) empty = dllist() empty += filled self.assertEqual(empty, dllist([] + filled_ref)) self.assertEqual(len(empty), len(filled_ref)) empty = dllist() filled += empty self.assertEqual(filled, dllist(filled_ref + [])) self.assertEqual(len(filled), len(filled_ref)) def test_index(self): lst = [1, 5, 10, 5, 9] dl = dllist(lst) self.assertEqual(dl.index(1), 0) self.assertEqual(dl.index(5), 1) self.assertEqual(dl.rindex(5), 3) self.assertEqual(dl.rindex(9), 4) gotException = False try: dl.index(2) except ValueError: gotException = True self.assertEqual(gotException, True) def test_contains(self): lst = [1, 5, 7] sl = dllist(lst) self.assertEqual(5 in sl, True) self.assertEqual(1 in sl, True) self.assertEqual(7 in sl, True) self.assertEqual(8 in sl, False) def test_slice(self): lst = list(range(100)) dlst = dllist(lst) self.assertEqual(lst[0:20], list(dlst[0:20])) self.assertEqual(lst[40:60], list(dlst[40:60])) self.assertEqual(lst[60:40], list(dlst[60:40])) self.assertEqual(lst[:-1], list(dlst[:-1])) self.assertEqual(lst[-20:], list(dlst[-20:])) self.assertEqual(lst[-20:-5], list(dlst[-20:-5])) self.assertEqual(lst[-5:-20], list(dlst[-5:-20])) self.assertEqual(lst[-70:50], list(dlst[-70:50])) self.assertEqual(lst[5:500], list(dlst[5:500])) self.assertEqual(lst[:], list(dlst[:])) smlst = list(range(8)) smdlst = dllist(smlst) self.assertEqual(smlst[2:5], list(smdlst[2:5])) self.assertEqual(smlst[-3:-1], list(smdlst[-3:-1])) for i in range(100): for j in range(100): self.assertEqual(lst[i:j], list(dlst[i:j])) # Test if version of python (2.7+ , 3.? + ) supports step in slices try: lst[0:10:2] except: # If not supported, test is over return self.assertEqual(lst[0:20:2], list(dlst[0:20:2])) self.assertEqual(lst[0:21:2], list(dlst[0:21:2])) self.assertEqual(lst[50:80:6], list(dlst[50:80:6])) for i in range(100): for j in range(100): for s in range(1, 100, 1): try: self.assertEqual(lst[i:j:s], list(dlst[i:j:s])) except AssertionError as ae: sys.stderr.write("Failed on [ %d : %d : %d ]\n" %(i, j, s)) raise ae def test_repeat(self): ref = py23_range(0, 1024, 4) ll = dllist(ref) self.assertEqual(ll * 4, dllist(ref * 4)) def test_repeat_empty(self): ll = dllist() self.assertEqual(ll * 4, dllist([] * 4)) def test_repeat_inplace(self): ref = py23_range(0, 1024, 4) ll = dllist(ref) ll *= 4 self.assertEqual(ll, dllist(ref * 4)) def test_repeat_inplace_empty(self): ll = dllist() ll *= 4 self.assertEqual(ll, dllist([] * 4)) def test_list_readonly_attributes(self): if sys.hexversion >= 0x03000000: expected_error = AttributeError else: expected_error = TypeError ll = dllist(py23_range(4)) self.assertRaises(expected_error, setattr, ll, 'first', None) self.assertRaises(expected_error, setattr, ll, 'last', None) self.assertRaises(expected_error, setattr, ll, 'size', None) def test_node_readonly_attributes(self): if sys.hexversion >= 0x03000000: expected_error = AttributeError else: expected_error = TypeError ll = dllistnode() self.assertRaises(expected_error, setattr, ll, 'prev', None) self.assertRaises(expected_error, setattr, ll, 'next', None) # COMMENTED BECAUSE HASH DOES NOT WORK # def test_list_hash(self): # self.assertEqual(hash(dllist()), hash(dllist())) # self.assertEqual(hash(dllist(py23_range(0, 1024, 4))), # hash(dllist(py23_range(0, 1024, 4)))) # self.assertEqual(hash(dllist([0, 2])), hash(dllist([0.0, 2.0]))) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(testsllist)) suite.addTest(unittest.makeSuite(testdllist)) return suite if __name__ == '__main__': unittest.TextTestRunner(verbosity=2).run(suite())
33.814754
209
0.566022
60,234
0.973384
0
0
0
0
0
0
2,073
0.0335
522ddb7104f425240015b2b3379e3dbe907a6b3f
1,240
py
Python
trio/_highlevel_open_unix_stream.py
monobot/trio
0fc81827093ebb7eb524e1c14464bb0dfd8641e8
[ "Apache-2.0", "MIT" ]
null
null
null
trio/_highlevel_open_unix_stream.py
monobot/trio
0fc81827093ebb7eb524e1c14464bb0dfd8641e8
[ "Apache-2.0", "MIT" ]
null
null
null
trio/_highlevel_open_unix_stream.py
monobot/trio
0fc81827093ebb7eb524e1c14464bb0dfd8641e8
[ "Apache-2.0", "MIT" ]
null
null
null
import trio from trio._highlevel_open_tcp_stream import close_on_error from trio.socket import socket, SOCK_STREAM try: from trio.socket import AF_UNIX has_unix = True except ImportError: has_unix = False __all__ = ["open_unix_socket"] async def open_unix_socket(filename,): """Opens a connection to the specified `Unix domain socket <https://en.wikipedia.org/wiki/Unix_domain_socket>`__. You must have read/write permission on the specified file to connect. Args: filename (str or bytes): The filename to open the connection to. Returns: SocketStream: a :class:`~trio.abc.Stream` connected to the given file. Raises: OSError: If the socket file could not be connected to. RuntimeError: If AF_UNIX sockets are not supported. """ if not has_unix: raise RuntimeError("Unix sockets are not supported on this platform") if filename is None: raise ValueError("Filename cannot be None") # much more simplified logic vs tcp sockets - one socket type and only one # possible location to connect to sock = socket(AF_UNIX, SOCK_STREAM) with close_on_error(sock): await sock.connect(filename) return trio.SocketStream(sock)
28.837209
78
0.715323
0
0
0
0
0
0
987
0.795968
704
0.567742
522e672d6d314c9e22db7c2f836733d64e4b7dec
6,854
py
Python
mod_setup_nftables.py
surarim/nifard
78744b3cb17a7152a3bf32e7d07c258be033220e
[ "MIT" ]
null
null
null
mod_setup_nftables.py
surarim/nifard
78744b3cb17a7152a3bf32e7d07c258be033220e
[ "MIT" ]
null
null
null
mod_setup_nftables.py
surarim/nifard
78744b3cb17a7152a3bf32e7d07c258be033220e
[ "MIT" ]
null
null
null
#!/usr/local/bin/python3 # -*- coding: utf-8 -*- # Встроенные модули import time, sys, subprocess from threading import Thread # Внешние модули try: import psycopg2 except ModuleNotFoundError as err: print(err) sys.exit(1) # Внутренние модули try: from mod_common import * except ModuleNotFoundError as err: print(err) sys.exit(1) # Класс для работы с изменениями в nftables class setup_nftables(Thread): # Стартовые параметры def __init__(self, threads_list, todolist): super().__init__() self.daemon = True self.threads_list = threads_list self.todolist = todolist # Поток изменений в nftables def run(self): # Запись в лог файл log_write('Thread setup_nftables running') try: # Подключение к базе conn_pg = psycopg2.connect(database='nifard', user=get_config('DatabaseUserName'), password=get_config('DatabasePassword')) except psycopg2.DatabaseError as error: log_write(error) sys.exit(1) # Цикл чтения таблицы while not app_work.empty(): # Чтение из таблицы базы данных cursor = conn_pg.cursor() try: cursor.execute("select * from users;") except psycopg2.DatabaseError as error: log_write(error) subprocess.call('nft flush ruleset', shell=True) sys.exit(1) conn_pg.commit() rows = cursor.fetchall() # Получение текущего списка правил nftables по таблице nat rules_nat = subprocess.check_output('nft -a list table nat | head -n -2 | tail +4', shell=True).decode().strip() try: # Получение текущего списка правил nftables по таблице traffic rules_traffic = subprocess.check_output('nft -a list table traffic | grep daddr', shell=True).decode().strip() except: rules_traffic = '' # Получение текущего списка правил nftables по таблице speed rules_speed = subprocess.check_output('nft -a list table speed | head -n -2 | tail +4', shell=True).decode().strip() # for row in rows: if app_work.empty(): break # Повторная проверка на завершение потока ip_addr = row[0] # IP адрес username = row[1] # Имя пользователя computer = row[2] # Имя компьютера domain = row[3] # Имя домена speed = row[4] # Группа скорости # Проверка ip адреса на валидность if ip_addr.count('.') == 3 and ip_addr.find(get_config('ADUserIPMask')) != -1: # Обнуление переменных модификаций правил speed_db = 0 speed_nft = 0 # Определение модификаций try: # Если лимит if int(speed[speed.find('_')+1:])//1024 >= 1: speed_db = int(speed[speed.find('_')+1:])//1024 else: speed_db = int(speed[speed.find('_')+1:]) except: # Если безлимит speed_db = speed[speed.find('_')+1:] # Получение скорости из nftables if ' '+ip_addr+' ' in rules_speed: # Получение скорости из nftables for line in rules_speed.splitlines(): if line.split()[2] == ip_addr: speed_nft = int(line.split()[6]) break else: if ' '+ip_addr+' ' in rules_traffic: speed_nft = 'nolimit' else: speed_nft = 'disable' # # Удаление правил в том числе для пересоздания if speed == 'disable' or speed_db != speed_nft: # Проверка на наличие его в rules_nat if ' '+ip_addr+' ' in rules_nat: rule_nat = '' # Получение номера правила для таблицы nat for line in rules_nat.splitlines(): if line.split()[2] == ip_addr: rule_nat = line.split()[8] rule_nat = 'nft delete rule nat postrouting handle '+rule_nat+'\n' break # Получение номера правила и удаление для таблицы traffic rule_traffic = '' rule_counter = 'nft delete counter traffic '+ip_addr+'\n' # Получение номера правила и удаление для таблицы traffic for line in rules_traffic.splitlines(): if line.split()[2] == ip_addr: rule_traffic = line.split()[10] rule_traffic = 'nft delete rule traffic prerouting handle '+rule_traffic+'\n' break rule_speed = '' # Получение номера правила и удаление для таблицы speed for line in rules_speed.splitlines(): if line.split()[2] == ip_addr: rule_speed = line.split()[11] rule_speed = 'nft delete rule speed prerouting handle '+rule_speed+'\n' break # Удаление выбранного правила из nftables subprocess.call(rule_traffic + rule_nat + rule_speed, shell=True) # Ожидание перед удалением счётчика time.sleep(1) subprocess.call(rule_counter, shell=True) # Запись в лог файл log_write('Delete '+ip_addr+' from nftables') # # Добавление правил if (speed != 'disable' and speed.find('disable') == -1): # Если ip адреса ещё нет в nftables, и при этом он не принадлежит другому домену, то добавляем if ' '+ip_addr+' ' not in rules_nat and (domain == get_config('DomainRealm') or domain == 'Domain Unknown'): # Формирование правила в nat rule_nat = 'nft add rule nat postrouting ip saddr '+ip_addr+' oif '+get_config('InternetInterface')+' masquerade\n' # Формирование правила в traffic (подсчёт трафика) rule_traffic = 'nft add counter traffic '+ip_addr+'\n' rule_traffic += 'nft add rule traffic prerouting ip daddr '+ip_addr+' iif '+get_config('InternetInterface')+' counter name '+ip_addr+'\n' # Формирование правила в speed (оганичение трафика) rule_limit = '' if speed.find('nolimit') == -1: rule_limit = 'nft add rule speed prerouting ip daddr '+ip_addr+' limit rate over '+speed[speed.find('_')+1:]+' kbytes/second drop\n' # Добавление текущих правил в nftables subprocess.call(rule_nat + rule_traffic + rule_limit, shell=True) # Запись в лог файл log_write('Adding '+ip_addr+' in nftables') # # Закрытие курсора и задержка выполнения cursor.close() # Ожидание потока for tick in range(5): if app_work.empty(): break time.sleep(1) conn_pg.close() subprocess.call('nft flush ruleset', shell=True) # Запись в лог файл log_write('Thread setup_nftables stopped') # Удаление потока из списка self.threads_list.get()
41.792683
151
0.591625
7,493
0.941687
0
0
0
0
0
0
3,619
0.45482
522e8268e27cc0e4ac1467fdf1229c4f9a633e0c
731
py
Python
dlapp/forms.py
tavershimafx/E-library
861b8aeba82e4bf0b63f5cb1421ca8f9f9cd1d96
[ "Apache-2.0" ]
null
null
null
dlapp/forms.py
tavershimafx/E-library
861b8aeba82e4bf0b63f5cb1421ca8f9f9cd1d96
[ "Apache-2.0" ]
null
null
null
dlapp/forms.py
tavershimafx/E-library
861b8aeba82e4bf0b63f5cb1421ca8f9f9cd1d96
[ "Apache-2.0" ]
null
null
null
from django import forms from . models import Holdings class HoldingsForm(forms.ModelForm): class Meta: model = Holdings fields = ['title', 'holding', 'authors', 'category'] widgets={ 'title': forms.TextInput(attrs={'class': 'form-control'}), 'holding': forms.FileInput(attrs={'type': 'file'}), 'authors': forms.TextInput(attrs={'class': 'form-control'}), 'category': forms.Select(attrs={'class': 'form-control'}), } class SearchForm(forms.Form): # create a search form query = forms.CharField(max_length=250) class Meta: widgets={ 'query': forms.TextInput(attrs={'class': 'form-control'}), }
34.809524
72
0.578659
673
0.920657
0
0
0
0
0
0
195
0.266758
522ec8665cdc06a15413dd19e0ca4b2d4864dbdc
2,805
py
Python
tests/aircraft/deploys/ubuntu/models/v1beta3/storage_config_data_test.py
relaxdiego/aircraft
ce9a6724fe33be38777991fbb1cd731e197fa468
[ "Apache-2.0" ]
9
2021-01-15T18:26:44.000Z
2021-07-29T07:40:15.000Z
tests/aircraft/deploys/ubuntu/models/v1beta3/storage_config_data_test.py
relaxdiego/aircraft
ce9a6724fe33be38777991fbb1cd731e197fa468
[ "Apache-2.0" ]
null
null
null
tests/aircraft/deploys/ubuntu/models/v1beta3/storage_config_data_test.py
relaxdiego/aircraft
ce9a6724fe33be38777991fbb1cd731e197fa468
[ "Apache-2.0" ]
1
2021-04-26T01:39:26.000Z
2021-04-26T01:39:26.000Z
import pytest from aircraft.deploys.ubuntu.models.v1beta3 import StorageConfigData @pytest.fixture def input_config(request): marker = request.node.get_closest_marker('data_kwargs') config = dict( disks=[ { 'path': '/dev/sda', 'partitions': [ { 'size': 536870912, # 512MB 'format': 'fat32', 'mount_path': '/boot/efi', 'flag': 'boot', 'grub_device': True, }, { 'size': 1073741824, # 1GB 'format': 'ext4', 'mount_path': '/boot', }, { 'id': 'partition-for-ubuntu-vg', 'size': 429496729600, # 400GB }, ], }, ], lvm_volgroups=[ { 'name': 'ubuntu-vg', 'devices': [ 'partition-for-ubuntu-vg' ], 'logical_volumes': marker.kwargs.get( 'logical_volumes', [ { 'name': 'ubuntu-lv', 'size': 397284474880, # 370GB 'format': 'ext4', 'mount_path': '/', } ] ) } ] ) return config @pytest.fixture def no_lvm_volgroups(request): config = dict( disks=[ { 'path': '/dev/sda', 'partitions': [ { 'size': 536870912, # 512MB 'format': 'fat32', 'mount_path': '/boot/efi', 'flag': 'boot', 'grub_device': True, }, { 'size': 1073741824, # 1GB 'format': 'ext4', 'mount_path': '/boot', }, { 'id': 'partition-for-ubuntu-vg', 'size': 429496729600, # 400GB }, ], }, ], ) return config @pytest.mark.data_kwargs(logical_volumes=[]) def test__does_not_error_out_when_lvm_lvs_are_empty(input_config): assert StorageConfigData(**input_config).export_lvm_logical_volumes() == [] def test__does_not_error_out_when_lvm_volgroups_is_empty(no_lvm_volgroups): assert StorageConfigData(**no_lvm_volgroups).export_lvm_logical_volumes() == []
29.526316
83
0.370053
0
0
0
0
2,550
0.909091
0
0
537
0.191444
522faca411638c4c5ca4e0265ac3ec446d4b7425
1,009
py
Python
tests/tests_core/test_definitions.py
markuswiertarkus/simbatch
c0e21abdee9f3475a01779d35cbe19e607a2c502
[ "MIT" ]
1
2017-11-28T01:10:09.000Z
2017-11-28T01:10:09.000Z
tests/tests_core/test_definitions.py
markuswiertarkus/simbatch
c0e21abdee9f3475a01779d35cbe19e607a2c502
[ "MIT" ]
null
null
null
tests/tests_core/test_definitions.py
markuswiertarkus/simbatch
c0e21abdee9f3475a01779d35cbe19e607a2c502
[ "MIT" ]
null
null
null
from simbatch.core import core from simbatch.core.definitions import SingleAction import pytest # TODO check dir on prepare tests TESTING_AREA_DIR = "S:\\simbatch\\data\\" @pytest.fixture(scope="module") def simbatch(): # TODO pytest-datadir pytest-datafiles vs ( path.dirname( path.realpath(sys.argv[0]) ) sib = core.SimBatch(5, ini_file="config_tests.ini") sib.clear_all_memory_data() sib.prj.create_example_project_data(do_save=False) sib.prj.update_current_from_index(1) sib.sch.create_example_schemas_data(do_save=False) return sib def test_exist_definitions_data(simbatch): assert simbatch.comfun.file_exists(simbatch.sts.store_definitions_directory) is True def test_load_definitions(simbatch): assert simbatch.dfn.load_definitions() is True # def test_load_definitions(sib): # assert sib.dfn.load_definitions() is True # def test_clear_all_definion_data # def test_action_create(simbatch): # sa = SingleAction() # assert sa.
25.225
103
0.75223
0
0
0
0
405
0.401388
0
0
366
0.362735
523074ecbb7f919e6c2ee41fb3a77f7a0bc837a7
304
py
Python
creational/singleton/singleton_main.py
Kozak24/Patterns
351d5c11f7c64ce5d58db37b6715fc8f7d31945a
[ "MIT" ]
null
null
null
creational/singleton/singleton_main.py
Kozak24/Patterns
351d5c11f7c64ce5d58db37b6715fc8f7d31945a
[ "MIT" ]
null
null
null
creational/singleton/singleton_main.py
Kozak24/Patterns
351d5c11f7c64ce5d58db37b6715fc8f7d31945a
[ "MIT" ]
null
null
null
from creational.singleton.logic import God def main(): god = God() another_god = God() if god != another_god: raise Exception("God should be equal to God") for _ in range(0, 7): god.do_something() another_god.do_something() if __name__ == "__main__": main()
16.888889
53
0.618421
0
0
0
0
0
0
0
0
38
0.125
5231e6bd87f94e0063595e79c3086076e75fc714
31
py
Python
src/awss3/__init__.py
ZhiruiFeng/CarsMemory
658afb98b1b8a667ae45e599ceb56f51759fdfce
[ "MIT" ]
9
2019-01-26T21:57:38.000Z
2021-08-13T11:55:56.000Z
src/awss3/__init__.py
ZhiruiFeng/CarsMemory
658afb98b1b8a667ae45e599ceb56f51759fdfce
[ "MIT" ]
6
2019-02-03T05:42:50.000Z
2021-06-01T23:24:35.000Z
src/awss3/__init__.py
ZhiruiFeng/CarsMemory
658afb98b1b8a667ae45e599ceb56f51759fdfce
[ "MIT" ]
5
2019-03-06T04:33:57.000Z
2021-05-31T17:43:57.000Z
#!/usr/bin/env python # aws s3
10.333333
21
0.645161
0
0
0
0
0
0
0
0
29
0.935484
5232c89174d7545d58073dde916fe3b501931f46
277
py
Python
rsa_demo/stubs/pyasn1/type/univ.py
jlinoff/rsa_demo
8babffab271bf58ca944ab9ff8743af26dc3c9b1
[ "MIT" ]
2
2019-12-04T22:38:44.000Z
2020-04-11T06:47:06.000Z
rsa_demo/stubs/pyasn1/type/univ.py
jlinoff/rsa_demo
8babffab271bf58ca944ab9ff8743af26dc3c9b1
[ "MIT" ]
null
null
null
rsa_demo/stubs/pyasn1/type/univ.py
jlinoff/rsa_demo
8babffab271bf58ca944ab9ff8743af26dc3c9b1
[ "MIT" ]
null
null
null
# Really hackey stubs, not suitable for typeshed. from typing import Any class Integer(): def __init__(self, value: Any = None) -> None: ... class Sequence(): def __init__(self) -> None: ... def setComponentByPosition(self, idx: int, value: Any) -> None: ...
27.7
71
0.649819
196
0.707581
0
0
0
0
0
0
49
0.176895
5233405403bf14c4dfbcafd95285eeecc8a3a127
4,730
py
Python
2021/python/tests/test_day25.py
shalgrim/advent-of-code
d3bd1c9f7eeaebff4153f6fd73ef8fc32d2b1ea8
[ "MIT" ]
null
null
null
2021/python/tests/test_day25.py
shalgrim/advent-of-code
d3bd1c9f7eeaebff4153f6fd73ef8fc32d2b1ea8
[ "MIT" ]
null
null
null
2021/python/tests/test_day25.py
shalgrim/advent-of-code
d3bd1c9f7eeaebff4153f6fd73ef8fc32d2b1ea8
[ "MIT" ]
null
null
null
import pytest from day25_1 import map_step, map_step_n from day25_1 import main as main1 @pytest.fixture def test_input(): with open('../../data/test25.txt') as f: return [line.strip() for line in f.readlines()] example_1_0 = [ list('...>...'), list('.......'), list('......>'), list('v.....>'), list('......>'), list('.......'), list('..vvv..'), ] example_1_1 = [ list('..vv>..'), list('.......'), list('>......'), list('v.....>'), list('>......'), list('.......'), list('....v..'), ] example_1_2 = [ list('....v>.'), list('..vv...'), list('.>.....'), list('......>'), list('v>.....'), list('.......'), list('.......'), ] example_1_3 = [ list('......>'), list('..v.v..'), list('..>v...'), list('>......'), list('..>....'), list('v......'), list('.......'), ] example_1_4 = [ list('>......'), list('..v....'), list('..>.v..'), list('.>.v...'), list('...>...'), list('.......'), list('v......'), ] def map_from_text(raw_text): lines = raw_text.split() return [list(line.strip()) for line in lines] example_2_0 = map_from_text( """v...>>.vv> .vv>>.vv.. >>.>v>...v >>v>>.>.v. v>v.vv.v.. >.>>..v... .vv..>.>v. v.v..>>v.v ....v..v.>""" ) example_2_1 = map_from_text( """....>.>v.> v.v>.>v.v. >v>>..>v.. >>v>v>.>.v .>v.v...v. v>>.>vvv.. ..v...>>.. vv...>>vv. >.v.v..v.v""" ) example_2_2 = map_from_text( """>.v.v>>..v v.v.>>vv.. >v>.>.>.v. >>v>v.>v>. .>..v....v .>v>>.v.v. v....v>v>. .vv..>>v.. v>.....vv.""" ) example_2_3 = map_from_text( """v>v.v>.>v. v...>>.v.v >vv>.>v>.. >>v>v.>.v> ..>....v.. .>.>v>v..v ..v..v>vv> v.v..>>v.. .v>....v..""" ) example_2_4 = map_from_text( """v>..v.>>.. v.v.>.>.v. >vv.>>.v>v >>.>..v>.> ..v>v...v. ..>>.>vv.. >.v.vv>v.v .....>>vv. vvv>...v..""" ) example_2_5 = map_from_text( """vv>...>v>. v.v.v>.>v. >.v.>.>.>v >v>.>..v>> ..v>v.v... ..>.>>vvv. .>...v>v.. ..v.v>>v.v v.v.>...v.""" ) example_2_10 = map_from_text( """..>..>>vv. v.....>>.v ..v.v>>>v> v>.>v.>>>. ..v>v.vv.v .v.>>>.v.. v.v..>v>.. ..v...>v.> .vv..v>vv.""" ) example_2_20 = map_from_text( """v>.....>>. >vv>.....v .>v>v.vv>> v>>>v.>v.> ....vv>v.. .v.>>>vvv. ..v..>>vv. v.v...>>.v ..v.....v>""" ) example_2_30 = map_from_text( """.vv.v..>>> v>...v...> >.v>.>vv.> >v>.>.>v.> .>..v.vv.. ..v>..>>v. ....v>..>v v.v...>vv> v.v...>vvv""" ) example_2_40 = map_from_text( """>>v>v..v.. ..>>v..vv. ..>>>v.>.v ..>>>>vvv> v.....>... v.v...>v>> >vv.....v> .>v...v.>v vvv.v..v.>""" ) example_2_50 = map_from_text( """..>>v>vv.v ..v.>>vv.. v.>>v>>v.. ..>>>>>vv. vvv....>vv ..v....>>> v>.......> .vv>....v> .>v.vv.v..""" ) example_2_55 = map_from_text( """..>>v>vv.. ..v.>>vv.. ..>>v>>vv. ..>>>>>vv. v......>vv v>v....>>v vvv...>..> >vv.....>. .>v.vv.v..""" ) example_2_56 = map_from_text( """..>>v>vv.. ..v.>>vv.. ..>>v>>vv. ..>>>>>vv. v......>vv v>v....>>v vvv....>.> >vv......> .>v.vv.v..""" ) example_2_57 = map_from_text( """..>>v>vv.. ..v.>>vv.. ..>>v>>vv. ..>>>>>vv. v......>vv v>v....>>v vvv.....>> >vv......> .>v.vv.v..""" ) example_2_58 = map_from_text( """..>>v>vv.. ..v.>>vv.. ..>>v>>vv. ..>>>>>vv. v......>vv v>v....>>v vvv.....>> >vv......> .>v.vv.v.. """ ) def test_map_step(): assert map_step([list('...>>>>>...')]) == [list('...>>>>.>..')] assert map_step([list('...>>>>.>..')]) == [list('...>>>.>.>.')] assert map_step( [list('..........'), list('.>v....v..'), list('.......>..'), list('..........')] ) == [ list('..........'), list('.>........'), list('..v....v>.'), list('..........'), ] assert map_step(example_1_0) == example_1_1 assert map_step(example_1_1) == example_1_2 assert map_step(example_1_2) == example_1_3 assert map_step(example_1_3) == example_1_4 assert map_step(example_2_0) == example_2_1 assert map_step(example_2_1) == example_2_2 assert map_step(example_2_2) == example_2_3 assert map_step(example_2_3) == example_2_4 assert map_step(example_2_4) == example_2_5 assert map_step(map_step(map_step(map_step(map_step(example_2_5))))) == example_2_10 assert map_step(example_2_55) == example_2_56 assert map_step(example_2_56) == example_2_57 assert map_step(example_2_57) == example_2_58 def test_map_step_n(): assert map_step_n(example_2_10, 10) == example_2_20 assert map_step_n(example_2_20, 10) == example_2_30 assert map_step_n(example_2_30, 10) == example_2_40 assert map_step_n(example_2_40, 10) == example_2_50 assert map_step_n(example_2_50, 5) == example_2_55 def test_main1(test_input): assert main1(test_input) == 58
16.596491
88
0.443552
0
0
0
0
134
0.02833
0
0
2,047
0.43277
5233dbb8a07ced44676748858d28ade2ed2e5328
7,504
py
Python
solver.py
satyenrajpal/speed-prediction-challenge
0ac46419c5e2288d5d27ad66348d7a2c47705342
[ "MIT" ]
5
2019-07-11T08:38:53.000Z
2020-04-11T19:07:11.000Z
solver.py
satyenrajpal/speed-prediction-challenge
0ac46419c5e2288d5d27ad66348d7a2c47705342
[ "MIT" ]
null
null
null
solver.py
satyenrajpal/speed-prediction-challenge
0ac46419c5e2288d5d27ad66348d7a2c47705342
[ "MIT" ]
6
2019-04-21T13:39:37.000Z
2021-09-15T09:35:51.000Z
import cv2, os import numpy as np import sys from utils import movingAverage, plot, computeAverage import queue from sklearn import linear_model class Solver(): def __init__(self, config): self.vid = cv2.VideoCapture(config.vidpath) self.txtfile = config.txtfile self.vis = config.vis self.len_gt = config.len_gt self.test_vid = cv2.VideoCapture(config.test_vidpath) # Separate function to allow for different methods to be inculcated into the same class self.setupParams() def setupParams(self): """ intialize parameters for tracking and extracting features Load ground truth parameters from txt file""" # Lucas Kanade parameters self.lk_params = dict(winSize = (21, 21), maxLevel = 2, criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 20, 0.01)) self.frame_idx = 0 self.prev_pts = None self.detect_interval = 1 self.temp_preds = np.zeros(int(self.vid.get(cv2.CAP_PROP_FRAME_COUNT))) # Load ground truth txt file with open(self.txtfile, 'r') as file_: gt = file_.readlines() gt = [float(x.strip()) for x in gt] self.gt = np.array(gt[:self.len_gt]) self.window = 80 # for moving average self.prev_gray = None def constructMask(self, mask = None, test=False): """Constructs a mask to only take into consideration a part of the frame. In this case it's the road. """ vid = self.test_vid if test else self.vid if mask is None: W = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)) H = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)) mask = np.zeros(shape = (H,W), dtype = np.uint8) mask.fill(255) else: W = mask.shape[1] H = mask.shape[0] cv2.rectangle(mask, (0, 0), (W, H), (0, 0, 0), -1) x_top_offset = 240 x_btm_offset = 65 poly_pts = np.array([[[640-x_top_offset, 250], [x_top_offset, 250], [x_btm_offset, 350], [640-x_btm_offset, 350]]], dtype=np.int32) cv2.fillPoly(mask, poly_pts, (255, 255, 255)) return mask def processFrame(self, frame): """ Gaussian Blur and then apply Lucas Kanade optical flow""" frame = cv2.GaussianBlur(frame, (3,3), 0) curr_pts, _st, _err = cv2.calcOpticalFlowPyrLK(self.prev_gray, frame, self.prev_pts, None, **self.lk_params) # Store flow (x, y, dx, dy) flow = np.hstack((self.prev_pts.reshape(-1, 2), (curr_pts - self.prev_pts).reshape(-1, 2))) preds = [] for x, y, u, v in flow: if v < -0.05: continue # Translate points to center x -= frame.shape[1]/2 y -= frame.shape[0]/2 # Append to preds taking care of stability issues if y == 0 or (abs(u) - abs(v)) > 11: preds.append(0) preds.append(0) elif x == 0: preds.append(0) preds.append(v / (y*y)) else: preds.append(u / (x * y)) preds.append(v / (y*y)) return [n for n in preds if n>=0] def getKeyPts(self, offset_x=0, offset_y=0): """ return key points with offset """ if self.prev_pts is None: return None return [cv2.KeyPoint(x=p[0][0] + offset_x, y=p[0][1] + offset_y, _size=10) for p in self.prev_pts] def getFeatures(self, frame_gray, mask): return cv2.goodFeaturesToTrack(frame_gray,30,0.1,10,blockSize=10, mask=mask) def run(self): # Construct mask first mask = self.constructMask() prev_key_pts = None # fourcc = cv2.VideoWriter_fourcc(*'XVID') # self.video = cv2.VideoWriter('video.avi', fourcc,29, (int(self.vid.get(cv2.CAP_PROP_FRAME_WIDTH)), int(self.vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))) while self.vid.isOpened() and self.frame_idx<len(self.gt): ret, frame = self.vid.read() if not ret: break # Convert to B/W frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame_gray = frame_gray[130:350, 35:605] mask_vis = frame.copy() # <- For visualization # Process each frame if self.prev_pts is None: self.temp_preds[self.frame_idx] = 0 else: # Get median of predicted V/hf values preds = self.processFrame(frame_gray) self.temp_preds[self.frame_idx] = np.median(preds) if len(preds) else 0 # Extract features self.prev_pts = self.getFeatures(frame_gray, mask[130:350, 35:605]) self.prev_gray = frame_gray self.frame_idx += 1 # For visualization purposes only if self.vis: prev_key_pts = self.visualize(frame, mask_vis, prev_key_pts) if cv2.waitKey(1) & 0xFF == ord('q'): break # self.video.release() self.vid.release() # Split predictions into train and validation - split = self.frame_idx//10 train_preds = self.temp_preds[:self.frame_idx-split] val_preds = self.temp_preds[self.frame_idx - split:self.frame_idx] gt_train = self.gt[:len(train_preds)] gt_val = self.gt[len(train_preds):self.frame_idx] # Fit to ground truth preds = movingAverage(train_preds, self.window) lin_reg = linear_model.LinearRegression(fit_intercept=False) lin_reg.fit(preds.reshape(-1, 1), gt_train) hf_factor = lin_reg.coef_[0] print("Estimated hf factor = {}".format(hf_factor)) # estimate training error pred_speed_train = train_preds * hf_factor pred_speed_train = movingAverage(pred_speed_train, self.window) mse = np.mean((pred_speed_train - gt_train)**2) print("MSE for train", mse) # Estimate validation error pred_speed_val = val_preds * hf_factor pred_speed_val = movingAverage(pred_speed_val, self.window) mse = np.mean((pred_speed_val - gt_val)**2) print("MSE for val", mse) # plot(pred_speed_train, gt_train) # plot(pred_speed_val, gt_val) return hf_factor def visualize(self, frame, mask_vis, prev_key_pts, speed=None): self.constructMask(mask_vis) mask_vis = cv2.bitwise_not(mask_vis) frame_vis = cv2.addWeighted(frame, 1, mask_vis, 0.3, 0) key_pts = self.getKeyPts(35, 130) cv2.drawKeypoints(frame_vis, key_pts, frame_vis, color=(0,0,255)) cv2.drawKeypoints(frame_vis, prev_key_pts, frame_vis, color=(0,255,0)) if speed is not None: font = cv2.FONT_HERSHEY_DUPLEX cv2.putText(frame_vis, "speed {}".format(speed), (10, 35), font, 1.2, (0, 0, 255)) cv2.imshow('test',frame_vis) # self.video.write(frame_vis) return key_pts def test(self, hf_factor, save_txt=False): mask = self.constructMask(test=True) self.prev_gray = None test_preds = np.zeros(int(self.test_vid.get(cv2.CAP_PROP_FRAME_COUNT))) frame_idx = 0 curr_estimate = 0 prev_key_pts = None self.prev_pts = None while self.test_vid.isOpened(): ret, frame = self.test_vid.read() if not ret: break # Convert to B/W frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) frame_gray = frame_gray[130:350, 35:605] mask_vis = frame.copy() # <- For visualization # Process each frame # For the first frame pred_speed = 0 if self.prev_pts is None: test_preds[frame_idx] = 0 else: # Get median of predicted V/hf values preds = self.processFrame(frame_gray) pred_speed = np.median(preds) * hf_factor if len(preds) else 0 test_preds[frame_idx] = pred_speed # Extract features self.prev_pts = self.getFeatures(frame_gray, mask[130:350, 35:605]) self.prev_gray = frame_gray frame_idx += 1 # For visualization purposes only vis_pred_speed = computeAverage(test_preds, self.window//2, frame_idx) prev_key_pts = self.visualize(frame, mask_vis, prev_key_pts, speed=vis_pred_speed) if cv2.waitKey(1) & 0xFF == ord('q'): break self.test_vid.release() print("Saving predicted speeds in test.txt ") if save_txt: with open("test.txt", "w") as file_: for item in test_preds: file_.write("%s \n" % item)
29.660079
149
0.6883
7,352
0.979744
0
0
0
0
0
0
1,475
0.196562
52345b669557250e37a229c365510c22c77e0d88
4,035
py
Python
status/trajectories.py
ioos/glider-dac-status
03fcf9561ed11905235885a7557a58bdc2815d65
[ "MIT" ]
null
null
null
status/trajectories.py
ioos/glider-dac-status
03fcf9561ed11905235885a7557a58bdc2815d65
[ "MIT" ]
16
2016-05-16T13:40:56.000Z
2020-07-21T21:39:21.000Z
status/trajectories.py
ioos/glider-dac-status
03fcf9561ed11905235885a7557a58bdc2815d65
[ "MIT" ]
8
2015-02-20T16:39:30.000Z
2020-10-12T20:33:28.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import json import requests import os import sys from app import app from shapely.geometry import LineString from status.profile_plots import iter_deployments, is_recent_data, is_recent_update def get_trajectory(erddap_url): ''' Reads the trajectory information from ERDDAP and returns a GEOJSON like structure. ''' # https://gliders.ioos.us/erddap/tabledap/ru01-20140104T1621.json?latitude,longitude&time&orderBy(%22time%22) url = erddap_url.replace('html', 'json') # ERDDAP requires the variable being sorted to be present in the variable # list. The time variable will be removed before converting to GeoJSON url += '?longitude,latitude,time&orderBy(%22time%22)' response = requests.get(url, timeout=180) if response.status_code != 200: raise IOError("Failed to fetch trajectories: {}".format(erddap_url)) data = response.json() geo_data = { 'type': 'LineString', 'coordinates': [c[0:2] for c in data['table']['rows']] } geometry = parse_geometry(geo_data) coords = LineString(geometry['coordinates']) trajectory = coords.simplify(0.02, preserve_topology=False) geometry = { 'type': 'LineString', 'coordinates': list(trajectory.coords), 'properties': { 'oceansmap_type': 'glider' } } return geometry def get_path(deployment): ''' Returns the path to the trajectory file :param dict deployment: Dictionary containing the deployment metadata ''' trajectory_dir = app.config.get('TRAJECTORY_DIR') username = deployment['username'] name = deployment['name'] dir_path = os.path.join(trajectory_dir, username) if not os.path.exists(dir_path): os.makedirs(dir_path) file_path = os.path.join(dir_path, name + '.json') return file_path def write_trajectory(deployment, geo_data): ''' Writes a geojson like python structure to the appropriate data file :param dict deployment: Dictionary containing the deployment metadata :param dict geometry: A GeoJSON Geometry object ''' file_path = get_path(deployment) with open(file_path, 'w') as f: f.write(json.dumps(geo_data)) def parse_geometry(geometry): ''' Filters out potentially bad coordinate pairs as returned from GliderDAC. Returns a safe geometry object. :param dict geometry: A GeoJSON Geometry object ''' coords = [] for lon, lat in geometry['coordinates']: if lon is None or lat is None: continue coords.append([lon, lat]) return {'coordinates': coords} def trajectory_exists(deployment): ''' Returns True if the data is within the last week :param dict deployment: Dictionary containing the deployment metadata ''' file_path = get_path(deployment) return os.path.exists(file_path) def generate_trajectories(deployments=None): ''' Determine which trajectories need to be built, and write geojson to file ''' for deployment in iter_deployments(): try: # Only add if the deployment has been recently updated or the data is recent recent_update = is_recent_update(deployment['updated']) recent_data = is_recent_data(deployment) existing_trajectory = trajectory_exists(deployment) if (recent_update or recent_data or not existing_trajectory): geo_data = get_trajectory(deployment['erddap']) write_trajectory(deployment, geo_data) except Exception: from traceback import print_exc print_exc() return 0 if __name__ == '__main__': from argparse import ArgumentParser parser = ArgumentParser(description=generate_trajectories.__doc__) parser.add_argument( '-d', '--deployment', action='append', help='Which deployment to build' ) args = parser.parse_args() sys.exit(generate_trajectories(args.deployment))
31.771654
113
0.677076
0
0
0
0
0
0
0
0
1,583
0.392317
52345fa930bdb9bec3252111f3c27febc14ab5da
663
py
Python
game/client/view/pad/pad.py
AntonYermilov/progue
7f382208c9efc904cff9d8df4750606039801d45
[ "MIT" ]
null
null
null
game/client/view/pad/pad.py
AntonYermilov/progue
7f382208c9efc904cff9d8df4750606039801d45
[ "MIT" ]
6
2019-03-25T21:11:28.000Z
2019-06-21T16:21:47.000Z
game/client/view/pad/pad.py
AntonYermilov/progue
7f382208c9efc904cff9d8df4750606039801d45
[ "MIT" ]
1
2021-12-22T22:03:47.000Z
2021-12-22T22:03:47.000Z
from abc import ABC, abstractmethod class Pad(ABC): def __init__(self, view, x0: int, y0: int, x1: int, y1: int): """ Creates pad with corners in specified coordinates :param view: base view instance :param x0: x-coordinate of top left corner (included) :param y0: y-coordinate of top left corner (included) :param x1: x-coordinate of bottom right corner (excluded) :param y1: y-coordinate of bottom right corner (excluded) """ self.view = view self.x0 = x0 self.y0 = y0 self.x1 = x1 self.y1 = y1 @abstractmethod def refresh(self): pass
30.136364
65
0.597285
625
0.942685
0
0
51
0.076923
0
0
369
0.556561
523460bd80c3461cd9638039533ab16a8632b269
269
py
Python
urls.py
audacious-software/Passive-Data-Kit-Codebook
84533014f6f3aaa6b7cdb1bcf095633c65c94d01
[ "Apache-2.0" ]
null
null
null
urls.py
audacious-software/Passive-Data-Kit-Codebook
84533014f6f3aaa6b7cdb1bcf095633c65c94d01
[ "Apache-2.0" ]
null
null
null
urls.py
audacious-software/Passive-Data-Kit-Codebook
84533014f6f3aaa6b7cdb1bcf095633c65c94d01
[ "Apache-2.0" ]
null
null
null
from django.conf.urls import url from .views import pdk_codebook_page, pdk_codebook_sitemap urlpatterns = [ url(r'^(?P<generator>.+)/$', pdk_codebook_page, name='pdk_codebook_page'), url(r'^sitemap.json$', pdk_codebook_sitemap, name='pdk_codebook_sitemap') ]
29.888889
78
0.747212
0
0
0
0
0
0
0
0
81
0.301115
5234684154dd0faf0b6bcb6ee4c3aed702f78dd5
934
py
Python
model_evaluation/test_data.py
huawenbo/ERSW-project
105c692234357a0de58bfa067f379b83ea8259b6
[ "MIT" ]
5
2021-07-04T15:13:55.000Z
2021-09-05T14:55:51.000Z
model_evaluation/test_data.py
huawenbo/ERSW-project
105c692234357a0de58bfa067f379b83ea8259b6
[ "MIT" ]
null
null
null
model_evaluation/test_data.py
huawenbo/ERSW-project
105c692234357a0de58bfa067f379b83ea8259b6
[ "MIT" ]
1
2021-07-06T11:54:39.000Z
2021-07-06T11:54:39.000Z
# -*- coding: utf-8 -*- """ Created on Sat May 8 09:54:36 2021 @author: Administrator """ import torch from torch.utils.tensorboard import SummaryWriter from sklearn.metrics import roc_auc_score import numpy as np # %% data = torch.load('dataset/dic_3_1_7878.pt') print(len(data)) print(data[0][2].shape) # %% for i in data: for j in i[2]: for k in j: if k<-10000: print(k) print('dowm') # %% data_new = [] for i in data: if i[3].sum() != 0: data_new.append(i) torch.save(data_new, 'dic_3_1_7899_5_new.pt') print('down') # %% writer = SummaryWriter() writer.add_pr_curve(tag, labels, predictions) # %% result = np.load('roc/test/1.npy') print(result[0].shape) print(roc_auc_score(result[0], result[1])) # %% da = torch.load('dataset/mimic_dic_1210.pt') # %% num = 0 for i in da: num += i[-1] print(num, len(da)-num)
17.961538
50
0.594218
0
0
0
0
0
0
0
0
233
0.249465
5235f4900f956a3bbd70a475b7a93639e781aab5
2,087
py
Python
jdcloud_sdk/services/iotcore/models/ThingTypeInfoVO.py
Tanc009/jdcloud-sdk-python
8b045c99bc5b73ca7348e950b6f01e03a27982f5
[ "Apache-2.0" ]
14
2018-04-19T09:53:56.000Z
2022-01-27T06:05:48.000Z
jdcloud_sdk/services/iotcore/models/ThingTypeInfoVO.py
Tanc009/jdcloud-sdk-python
8b045c99bc5b73ca7348e950b6f01e03a27982f5
[ "Apache-2.0" ]
15
2018-09-11T05:39:54.000Z
2021-07-02T12:38:02.000Z
jdcloud_sdk/services/iotcore/models/ThingTypeInfoVO.py
Tanc009/jdcloud-sdk-python
8b045c99bc5b73ca7348e950b6f01e03a27982f5
[ "Apache-2.0" ]
33
2018-04-20T05:29:16.000Z
2022-02-17T09:10:05.000Z
# coding=utf8 # Copyright 2018 JDCLOUD.COM # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # NOTE: This class is auto generated by the jdcloud code generator program. class ThingTypeInfoVO(object): def __init__(self, deviceTypeName, code, name, authType=None, connectType=None, createdTime=None, updateTime=None, customProfiles=None, deviceTypeCode=None, globalProfiles=None, manufacturerId=None, manufacturerName=None, nodeType=None): """ :param authType: (Optional) 认证方式 1: 一机一密 2:一型一密 :param connectType: (Optional) 连接类型,蜂窝(2G/3G/4G) :param createdTime: (Optional) 创建时间 :param updateTime: (Optional) 更新时间 :param customProfiles: (Optional) 自定义档案信息 :param deviceTypeCode: (Optional) 设备类型编号 :param deviceTypeName: 设备类型名称 例如:ttu、电表 :param globalProfiles: (Optional) :param manufacturerId: (Optional) 制造商编号 :param manufacturerName: (Optional) 制造商名称 :param nodeType: (Optional) 节点类型1:直连终端、2:边缘代理、3:非直连终端 :param code: 所属物类型Code :param name: 所属物类型名称 """ self.authType = authType self.connectType = connectType self.createdTime = createdTime self.updateTime = updateTime self.customProfiles = customProfiles self.deviceTypeCode = deviceTypeCode self.deviceTypeName = deviceTypeName self.globalProfiles = globalProfiles self.manufacturerId = manufacturerId self.manufacturerName = manufacturerName self.nodeType = nodeType self.code = code self.name = name
40.134615
241
0.700048
1,606
0.705314
0
0
0
0
0
0
1,469
0.645147
52366585374893c5a5d4f9892985aa180f39b4ce
198
py
Python
cars/admin.py
rngallen/signals
a27c2b1e307a3a25eb83afe38df137cf836ed8c9
[ "MIT" ]
null
null
null
cars/admin.py
rngallen/signals
a27c2b1e307a3a25eb83afe38df137cf836ed8c9
[ "MIT" ]
null
null
null
cars/admin.py
rngallen/signals
a27c2b1e307a3a25eb83afe38df137cf836ed8c9
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Car # Register your models here. @admin.register(Car) class CarAdmin(admin.ModelAdmin): list_display = ("name", "price", "buyer", "code")
19.8
53
0.722222
87
0.439394
0
0
108
0.545455
0
0
54
0.272727
5236e1e1d04488927330339167dbb5ca72dd2f1c
5,683
py
Python
backend/forum/utils/djtools/query_cache.py
karolyi/forum-django
a498be3123deb836e0108258c493b88c645b2163
[ "MIT" ]
7
2016-09-20T11:49:49.000Z
2017-06-24T23:51:56.000Z
backend/forum/utils/djtools/query_cache.py
karolyi/forum-django
a498be3123deb836e0108258c493b88c645b2163
[ "MIT" ]
17
2019-12-22T10:41:48.000Z
2021-11-17T10:58:50.000Z
backend/forum/utils/djtools/query_cache.py
karolyi/forum-django
a498be3123deb836e0108258c493b88c645b2163
[ "MIT" ]
1
2016-09-20T11:50:57.000Z
2016-09-20T11:50:57.000Z
from typing import Iterable, Optional from django import VERSION from django.db.models.base import Model from django.db.models.fields.related import ManyToManyField from django.db.models.fields.reverse_related import ManyToOneRel from django.db.models.manager import Manager from django.db.models.query import QuerySet def invalidate_onetomany(objs: Iterable[Model], prefetch_keys: Iterable[str]): """ Invalidate one-to-many caches. These are remote `ForeignKey` and `ManyToManyField` fields fetched with `prefetch_related()`. """ if VERSION[0] == 1 or VERSION[0] == 2: for obj in objs: if not hasattr(obj, '_prefetched_objects_cache'): continue for key in prefetch_keys: if key not in obj._prefetched_objects_cache: continue del obj._prefetched_objects_cache[key] def invalidate_manytoone(objs: Iterable[Model], field_names: Iterable[str]): """ Invalidate many-to-one caches. These are `ForeignKey` and `OneToOneField` fields fetched with `select_related()` or `prefetch_related()`. """ if VERSION[0] == 1: for obj in objs: for field_name in field_names: if not is_fk_cached(obj=obj, field_name=field_name): continue del obj.__dict__[f'_{field_name}_cache'] elif VERSION[0] == 2: for obj in objs: for field_name in field_names: if not is_fk_cached(obj=obj, field_name=field_name): continue del obj._state.fields_cache[field_name] def get_prefetch_cache_key(relation: Manager) -> str: 'Return a key used in the prefetched cache for a relation.' try: # Works on ManyToMany return relation.prefetch_cache_name except AttributeError: # Is a ForeignKey (OneToMany) rel_field = relation.field.remote_field # type: ManyToOneRel if rel_field.related_name: return rel_field.related_name if VERSION[0] == 1: return rel_field.name elif VERSION[0] == 2: return f'{rel_field.name}_set' def init_prefetch_cache(obj: Model): 'Init a prefetch cache on the model.' if VERSION[0] == 1 or VERSION[0] == 2: if hasattr(obj, '_prefetched_objects_cache'): return obj._prefetched_objects_cache = {} def is_query_prefetched(relation: Manager) -> bool: 'Return `True` if the relation is prefetched.' if VERSION[0] == 1 or VERSION[0] == 2: obj = relation.instance if not hasattr(obj, '_prefetched_objects_cache'): return False prefetch_cache_key = get_prefetch_cache_key(relation=relation) return prefetch_cache_key in obj._prefetched_objects_cache return False def set_prefetch_cache( relation: Manager, queryset: QuerySet, override: bool = True): 'Set prefetch cache on a `Model` for a relation.' if is_query_prefetched(relation=relation) and not override: return obj = relation.instance init_prefetch_cache(obj=obj) if VERSION[0] == 1 or VERSION[0] == 2: key = get_prefetch_cache_key(relation=relation) obj._prefetched_objects_cache[key] = queryset def is_queryresult_loaded(qs: QuerySet) -> bool: 'Return `True` if the query is loaded, `False` otherwise.' if VERSION[0] == 1 or VERSION[0] == 2: return qs._result_cache is not None return False def set_queryresult(qs: QuerySet, result: list, override: bool = True): 'Set result on a previously setup query.' if VERSION[0] == 1 or VERSION[0] == 2: if override or not is_queryresult_loaded(qs=qs): qs._result_cache = result def get_queryresult(qs: QuerySet) -> Optional[list]: 'Return the cached query result of the passed `QuerySet`.' if VERSION[0] == 1 or VERSION[0] == 2: return qs._result_cache def is_fk_cached(obj: Model, field_name: str) -> bool: 'Return `True` if the `ForeignKey` field on the object is cached.' if VERSION[0] == 1: return hasattr(obj, f'_{field_name}_cache') elif VERSION[0] == 2: if getattr(obj, '_state', None) is None or \ getattr(obj._state, 'fields_cache', None) is None: return False return field_name in obj._state.fields_cache return False def set_fk_cache( obj: Model, field_name: str, value: Model, override: bool = True): """ Set a cache on the `obj` for a `ForeignKey` field, override when requested. """ if not override and is_fk_cached(obj=obj, field_name=field_name): return if VERSION[0] == 1: setattr(obj, f'_{field_name}_cache', value) elif VERSION[0] == 2: if getattr(obj, '_state', None) is None: obj._state = dict() if getattr(obj._state, 'fields_cache', None) is None: obj._state.fields_cache = dict() obj._state.fields_cache[field_name] = value def del_fk_cache(obj: Model, field_name: str): 'Delete a cached `ForeignKey` on the `Model`.' if not is_fk_cached(obj=obj, field_name=field_name): return if VERSION[0] == 1: delattr(obj, f'_{field_name}_cache') elif VERSION[0] == 2: del obj._state.fields_cache _old_m2m_savedata = ManyToManyField.save_form_data def _save_m2m_form_data( self: ManyToManyField, instance: Model, data: QuerySet): _old_m2m_savedata(self=self, instance=instance, data=data) set_prefetch_cache( relation=getattr(instance, self.name), queryset=data, override=True) ManyToManyField.save_form_data = _save_m2m_form_data
34.652439
78
0.655464
0
0
0
0
0
0
0
0
1,166
0.205173
52371790ddfcd8b2c7d4c68f5e7cb458249f7867
16,914
py
Python
web/poisson_web.py
DreadyBear/codereview
9e3072ca79f97a067599c762cdea73da7607f671
[ "Unlicense" ]
60
2015-01-06T16:19:01.000Z
2021-04-21T12:41:35.000Z
web/poisson_web.py
DreadyBear/codereview
9e3072ca79f97a067599c762cdea73da7607f671
[ "Unlicense" ]
2
2015-02-18T19:59:37.000Z
2015-03-25T20:10:59.000Z
web/poisson_web.py
DreadyBear/codereview
9e3072ca79f97a067599c762cdea73da7607f671
[ "Unlicense" ]
27
2015-02-23T03:29:04.000Z
2021-04-25T21:06:42.000Z
# -*- coding: utf-8 -*- # <nbformat>3.0</nbformat> # <codecell> import os import psycopg2 import numpy as np import pandas as pd import patsy import statsmodels.api as sm import pickle import random from math import floor, exp from datetime import * import pytz from dateutil.relativedelta import * import calendar from config import config # Connect to postgres db conn = psycopg2.connect("dbname= %s user= %s host=%s" % (config()["DB"], config()["USER"], config()["DB_URL"])) # <codecell> def get_station_data(station_id): # Pulls Data for Given Station_id and Converts to Pandas Dataframe cur = conn.cursor() # Fetch data for station 17 in Washington, DC - 16th & Harvard St NW, terminalName: 31103 cur.execute("SELECT * FROM bike_ind_washingtondc WHERE tfl_id = %s;" % station_id) station_data = cur.fetchall() # Put data in pandas dataframe station_updates = pd.DataFrame.from_records(station_data, columns = ["station_id", "bikes_available", "spaces_available", "timestamp"], index = "timestamp") # Convert UTC timezone of the timestamps to DC's Eastern time station_updates.index = station_updates.index.tz_localize('UTC').tz_convert('US/Eastern') return station_updates # <codecell> def fit_poisson(station_updates): # Find changes (deltas) in bike count bikes_available = station_updates.bikes_available deltas = bikes_available - bikes_available.shift() # Show the histogram of the deltas. Need to remove outliers first. # clipped_deltas = deltas[(deltas > -6) & (deltas < 6)] # clipped_deltas.hist(bins=11) # Separate positive and negative deltas pos_deltas = deltas[deltas > 0] neg_deltas = abs(deltas[deltas < 0]) # Count the number of positive and negative deltas per half hour per day, add them to new dataframe. time_interval = '1H' pos_interval_counts_null = pos_deltas.resample(time_interval, how ='sum') neg_interval_counts_null = neg_deltas.resample(time_interval, how ='sum') # Set NaN delta counts to 0 # By default the resampling step puts NaN (null values) into the data when there were no observations # to count up during those thirty minutes. arrivals = pos_interval_counts_null.fillna(0) departures = neg_interval_counts_null.fillna(0) arrivals_departures = pd.DataFrame(arrivals, columns=["arrivals"]) arrivals_departures['departures'] = departures # Extract months for Month feature, add to model data delta_months = arrivals_departures.index.month arrivals_departures['months'] = delta_months # Extract hours for Hour feature delta_hours = arrivals_departures.index.hour arrivals_departures['hours'] = delta_hours # Extract weekday vs. weekend variable delta_dayofweek = arrivals_departures.index.weekday delta_weekday_dummy = delta_dayofweek.copy() delta_weekday_dummy[delta_dayofweek < 5] = 1 delta_weekday_dummy[delta_dayofweek >= 5] = 0 arrivals_departures['weekday_dummy'] = delta_weekday_dummy # print arrivals_departures # print arrivals_departures.head(20) # Create design matrix for months, hours, and weekday vs. weekend. # We can't just create a "month" column to toss into our model, because it doesnt # understand what "June" is. Instead, we need to create a column for each month # and code each row according to what month it's in. Ditto for hours and weekday (=1). y_arr, X_arr = patsy.dmatrices("arrivals ~ C(months, Treatment) + C(hours, Treatment) + C(weekday_dummy, Treatment)", arrivals_departures, return_type='dataframe') y_dep, X_dep = patsy.dmatrices("departures ~ C(months, Treatment) + C(hours, Treatment) + C(weekday_dummy, Treatment)", arrivals_departures, return_type='dataframe') y_dep[pd.isnull(y_dep)] = 0 # Fit poisson distributions for arrivals and departures, print results arr_poisson_model = sm.Poisson(y_arr, X_arr) arr_poisson_results = arr_poisson_model.fit() dep_poisson_model = sm.Poisson(y_dep, X_dep) dep_poisson_results = dep_poisson_model.fit() # print arr_poisson_results.summary(), dep_poisson_results.summary() poisson_results = [arr_poisson_results, dep_poisson_results] return poisson_results # <codecell> # Predict *net* lambda value some time in the future, using the list of hours created above. # You can predict any number of hours ahead using interval_length, default is set to 1 hour. # The arrival lambda at 12pm actually means the expected arrival rate from 12pm to 1pm. But if the # current time is 12:15pm and you're estimating an hour ahead to 1:15pm, you need to find # 3/4ths of the lambda from 12pm - 1pm and add it to 1/4th of the lambda from 1pm to 2pm. # This section returns the total lambda over that interval, during which the rate is changing. # It also works for predictions multiple hours ahead, as all those lambdas will be summed # and yield a large expected value, which makes sense if you're counting bikes over several hours. # The function predicts arrival lambdas across the time interval, does the same thing independently # for departure lambdas, and finds their difference to get the net lambda at that time - the change in bikes # you'll see at the station in an hour. Add the net lambda to the current number of bikes to get # the prediction of the expected value of how many bikes will be there. def lambda_calc(month, time, weekday, poisson_results): "Compute the lambda value for a specific month, time (hour), and weekday." # Pull out coefficient estimates for the factored covariants estimates = poisson_results["params"] # Fetch intercept intercept = estimates['Intercept'] # Fetch coefficient estimate that corresponds to the month.. if month == 1: month_estimate = 0 else: month_estimate = estimates['C(months, Treatment)[T.'+str(month)+']'] # .. to the hour hour = floor(time) if hour == 1: hour_estimate = 0 else: hour_estimate = estimates['C(hours, Treatment)[T.'+str(int(hour))+']'] # .. and to the weekday status. if weekday == 0: weekday_estimate = 0 else: weekday_estimate = estimates['C(weekday_dummy, Treatment)[T.'+str(weekday)+']'] # Compute log lambda, which is linear function of the hour, month, and weekday coefficient estimates log_lambda = intercept + month_estimate + hour_estimate + weekday_estimate # Raise e to the computed log lambda to find the estimated value of the Poisson distribution for these covariates. est_lambda = exp(log_lambda) return est_lambda def predict_net_lambda(current_time, prediction_interval, month, weekday, poisson_results): # Define function that takes in a month, time, weekday and returns # a lambda - the expected value of arrivals or departures during that hour (given that month) # - using the covariate coefficients estimated above. # Create list of hours in between the current time and the prediction time # Need to do this to calculate cumulative rate of arrivals and departures prediction_time = current_time + prediction_interval time_list = [current_time] next_step = current_time while next_step != prediction_time: if floor(next_step) + 1 < prediction_time: next_step = floor(next_step) + 1 time_list.append(next_step) else: next_step = prediction_time time_list.append(next_step) # Calculate the cumulative lambda rate over the predition interval arr_cum_lambda = 0 dep_cum_lambda = 0 # Find cumulative lambda for arrivals.. for i in range(1, len(time_list)): est_lambda = lambda_calc(month, time_list[ i - 1 ], weekday, poisson_results[0]) hour_proportion = time_list[i] - time_list[ i - 1 ] interval_lambda = est_lambda * hour_proportion arr_cum_lambda += interval_lambda # .. and departures for i in range(1, len(time_list)): est_lambda = lambda_calc(month, time_list[ i - 1 ], weekday, poisson_results[1]) hour_proportion = time_list[i] - time_list[ i - 1 ] interval_lambda = est_lambda * hour_proportion dep_cum_lambda += interval_lambda net_lambda = arr_cum_lambda - dep_cum_lambda return net_lambda # <codecell> # Estimate the poisson! def save_poisson_results(): print ("saving") # station_ids = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74] # station_ids = getStations() for station in station_ids: station_id = station[0] if (os.path.isfile("%spoisson_results_%s.p" % (pickle_folder, station_id))): continue station_updates = get_station_data(station_id) print("Got data, now fitting") poisson_results = fit_poisson(station_updates) file_out = open("%spoisson_results_%s.p" % (pickle_folder, station_id), "wb") to_save_ps = (poisson_results[0].params, poisson_results[1].params) pickle.dump(to_save_ps, file_out) file_out.close() print "finished %s" % station_id print("done saving") # <codecell> # pickle_folder = "/mnt/data1/BikeShare/pickles/" pickle_folder = "/Users/darkzeroman/dssg/bikeshare/web/static/pickles/" # save_poisson_results() def load_poisson_result(station_id): temp = pickle.load(open("%spoisson_results_%s.p" % (pickle_folder, station_id), "rb")) return (dict(params=temp[0]), dict(params=temp[1])) # <codecell> ''' # Auxiliary code # Try to predict! current_time = 17.5 prediction_interval = 1 month = 5 weekday = 0 bike_change = predict_net_lambda(current_time, prediction_interval, month, weekday, poisson_results) # print "The change in bikes at time %s and month %s is %s" % (str(floor(current_time)), str(month), str(bike_change)) # Plot predictions of available bikes by hour for given covariates init_bikes = 18 bike = init_bikes bikes = [init_bikes] hours_of_day = range(1,24) for hour in hours_of_day: bike += predict_net_lambda(hour, prediction_interval, month, weekday, poisson_results) bikes.append(bike) pd.Series(bikes).plot() ''' # <codecell> # Validate the model! # min_time_pt = datetime.datetime(2010,10,8) # prediction_interval = # time_step = #def validate_model(min_time_pt): # Generate list of time points incremented by the time_step # Get observations before timepoint # smaller_updates = station_updates[station_updates.index < min_time_pt] # print station_updates # print smaller_updates #validate_model(min_time_pt) # <codecell> # Simulate bike availability at station 17 for next half hour # We're doing this to flag when station is full or empty, which # is what bikeshare operators want. #import sys def simulate_bikes(station_id, starting_time, final_time, max_slots, starting_bikes_available, month, weekday, poisson_results): bikes_available = starting_bikes_available current_time = starting_time go_empty = 0 go_full = 0 while current_time < final_time: # Calculate the Appropriate Up and Down Rate Terms up_lambda = lambda_calc(month,current_time,weekday, poisson_results[0]) down_lambda = lambda_calc(month,current_time,weekday, poisson_results[1]) total_lambda = float(up_lambda + down_lambda) next_obs_time = random.expovariate(total_lambda) chance_up = up_lambda / total_lambda # Update the Current Time to the Next Observation Time current_time += next_obs_time if current_time < final_time: if random.uniform(0,1) > chance_up: bikes_available -= 1 else: bikes_available += 1 # Adjust Bikes Available to Sit Inside Range if bikes_available < 0: bikes_available = 0 elif bikes_available > max_slots: bikes_available = max_slots if bikes_available == 0: go_empty = 1 if bikes_available == max_slots: go_full = 1 return (bikes_available, go_empty, go_full) def simulation(station_id, starting_time, final_time, max_slots, starting_bikes_available, month, weekday, simulate_bikes, trials=250): poisson_results = load_poisson_result(station_id) bikes_results = [] # numbikes at the station at the end of each trial go_empty_results = [] # go_full_results = [] # for i in range(1,trials): bikes, empty, full = simulate_bikes(station_id, starting_time,final_time,max_slots,starting_bikes_available,month,weekday, poisson_results) bikes_results.append(bikes) go_empty_results.append(empty) go_full_results.append(full) return (bikes_results, go_empty_results, go_full_results) # <codecell> def make_prediction(station, how_many_mins): try: station_id = station[0] starting_datetime = datetime.now(pytz.timezone('US/Eastern')) ending_datetime = starting_datetime + relativedelta(minutes=how_many_mins) # protect sql injection later? cur = conn.cursor() cur.execute("select * from bike_ind_washingtondc where tfl_id = %s order by timestamp desc limit 1;" % station_id) _, starting_bikes_available, num_spaces, _ = cur.fetchall()[0] #(station_id, bikes, spaces, timestamp) max_slots = starting_bikes_available + num_spaces month = starting_datetime.month # Between 1-12 weekday = 0 if (starting_datetime.isoweekday == 0) or (starting_datetime.isoweekday == 7): weekday = 1 starting_time = round(starting_datetime.hour + (starting_datetime.minute / float(60)), 3) ending_time = round(ending_datetime.hour + (ending_datetime.minute / float(60)), 3) bikes_results, empty_results, full_results = simulation(station_id, starting_time, ending_time, max_slots, \ starting_bikes_available, month, weekday, simulate_bikes, 250) week_dict = {'0': 'Week', '1' : 'Weekend'} # net_lambda = predict_net_lambda(starting_time, final_time - starting_time, month, weekday, poisson_results) # print ("In %s during the %s" % (calendar.month_name[month], week_dict[str(weekday)])) # print ("For Starting Time: %0.2f and Ending Time: %0.2f with Initial Bikes: %d out of a Maximum: %d" % (starting_time, ending_time, starting_bikes_available, max_slots)) # print ('Expected Number of Bikes at %s: %0.2f' % (ending_time, round(np.mean(bikes_results),2))) # print 'Other Expected Value : ', starting_bikes_available + net_lambda # print ('Probability of Being (Empty, Full) Any Time in the Next %0.2f hours: (%0.2f, %0.2f)' % \ # (ending_time - starting_time, round(np.mean(empty_results),2), round(np.mean(full_results),2))) print ", ".join(map(str, [how_many_mins, station_id])) temp_res = (int(station_id), round(np.mean(bikes_results),2), round(np.mean(empty_results),2), \ round(np.mean(full_results),2), station[2], station[3], station[1], starting_bikes_available, max_slots) res_names = ("station_id", "expected_num_bikes", "prob_empty", "prob_full", "lat", "lon", "name", "current_bikes", "max_slots") return dict(zip(res_names, temp_res)) except KeyError: return (int(station_id), "Prediction Error") # %time make_prediction('17', 15*4) # <codecell> def run_code(): starting_time = 6.0 final_time = 6.5 starting_bikes_available = 21 max_slots = 25 month = 8 weekday = 0 station_id = '17' #starting_time, final_time, max_slots, starting_bikes_available, month, weekday, bikes_results, empty_results, full_results = simulation(station_id, starting_time, final_time, max_slots, starting_bikes_available, \ month, weekday,simulate_bikes, 500) expected_num_bikes = round(np.mean(bikes_results), 2) prob_empty_any_time = round(np.mean(empty_results), 2) prob_full_any_time = round(np.mean(full_results), 2) #print (expected_num_bikes, prob_empty_any_time, prob_full_any_time) # %timeit run_code() # <codecell> def getStations(): cur = conn.cursor() cur.execute("SELECT DISTINCT * FROM metadata_washingtondc order by id;") station_ids = cur.fetchall() station_list = [] for station in station_ids: station_list.append(station) return station_list # print getIds()
38.008989
303
0.691853
0
0
0
0
0
0
0
0
7,586
0.448504
5238a965f3751d579b65ce010c9f63ebfac434c8
67,494
py
Python
genericScreener.py
rsandx/generic-stocks-screener
e717623052cae5ff382604f142ad308ee85e6368
[ "MIT" ]
null
null
null
genericScreener.py
rsandx/generic-stocks-screener
e717623052cae5ff382604f142ad308ee85e6368
[ "MIT" ]
null
null
null
genericScreener.py
rsandx/generic-stocks-screener
e717623052cae5ff382604f142ad308ee85e6368
[ "MIT" ]
null
null
null
#! python3 import pandas as pd import numpy as np import re, json from ta import * import sys, os, logging, logging.config, traceback, concurrent_log_handler, getopt import multiprocessing as mp import contextlib import heapq import talib from datetime import datetime, timedelta #import numba as nb #from timeit import default_timer as timer # Internal imports import utils logging.config.fileConfig("logging.cfg") logger = logging.getLogger(os.path.basename(__file__)) #define constants IBDRS = 'ibd relative strength' #group ta names by number of parameters ta_names = {} TA_MAPPING = dict((k.lower(), v) for k,v in utils.ta_mapping.items()) #sort by name in descending order to make sure the longest name is matched sorted_ta_mapping = {k: v for k, v in sorted(TA_MAPPING.items(), key=lambda x: x[0], reverse=True)} for k, v in sorted_ta_mapping.items(): k = k.replace('+', '\+').replace(' ', '\s+') if v[-1] not in ta_names: ta_names[v[-1]] = k else: ta_names[v[-1]] += '|' + k #logger.debug(ta_names) taRegex = '' for k, v in ta_names.items(): if k > 0: if '|' in v: v = '(' + v + ')' v += '\s*\(\s*' + ('\d+\.?\d*,\s*'*k)[:-4] + '\s*\)' taRegex += v + '|' taRegex = taRegex[:-1] PERIOD = r'(days|weeks|months|day|week|month)' INDICATOR_PLAIN = r'({}|open|high|low|close|volume|range)'.format(taRegex) INDICATOR_FUNCTION = r'(min|max|avg)\s*\(\s*({}|open|high|low|close|volume|range),\s*[1-9]\d*\s*\)'.format(taRegex) INDICATOR = r'((daily|weekly|monthly)\s+)?({0}|{1})(\s+[1-9]\d*\s+{2}\s+ago)?'.format(INDICATOR_PLAIN, INDICATOR_FUNCTION, PERIOD) #logger.debug(indicator) CP_NAMES = '|'.join([name.lower().replace(' ', '\s+') for name in sorted(utils.get_cp_mapping().keys(), reverse=True)]) CANDLESTICK_PATTERN = r'((?P<timeframe>daily|weekly|monthly)\s+)?(?P<cspattern>{0})'.format(CP_NAMES) IS_ABOVE_BELOW_BETWEEN = r'''(?P<indicator>({0}))\s+(is|was|has\s+been|had\s+been)\s+ ((?P<more_less>((more|less)\s+than\s+\d+\.?\d*(%|\s+point|\s+points)\s+))?(?P<above_below>above|below)\s+((?P<above_below_indicator>({0}))|(?P<above_below_value>-?\d+\.?\d*)) | ((?P<between>from)\s+((?P<between_indicator1>({0}))|(?P<between_value1>-?\d+\.?\d*))\s+to\s+((?P<between_indicator2>({0}))|(?P<between_value2>-?\d+\.?\d*)))) (\s+for\s+the\s+last\s+(?P<duration>[1-9]\d*\s+{1}))?'''.format(INDICATOR, PERIOD) CROSSED_ABOVE_BELOW = r'''(?P<indicator>({0}))\s+(crossed|has\s+crossed)\s+ (?P<above_below>above|below)\s+((?P<above_below_indicator>({0}))|(?P<above_below_value>-?\d+\.?\d*)) (\s+within\s+the\s+last\s+(?P<duration>[1-9]\d*\s+{1}))?'''.format(INDICATOR, PERIOD) DROPPED_GAINED = r'''(?P<indicator>({0}))\s+(?P<verb>dropped|gained)\s+ (?P<more_less>((more|less)\s+than\s+\d+\.?\d*(%|\s+point|\s+points)))\s+ (over\s+the\s+last\s+(?P<duration>[1-9]\d*\s+{1}))?'''.format(INDICATOR, PERIOD) INCREASING_DECREASING = r'''(?P<indicator>({0}))\s+has\s+been\s+(?P<verb>increasing|decreasing)\s+ (for\s+(?P<duration>[1-9]\d*\s+{1}))'''.format(INDICATOR, PERIOD) REACHED_HIGH_LOW = r'''(?P<indicator>({0}))\s+(reached|has\s+reached)\s+ a\s+new\s+(?P<high_low>([1-9]\d*\s+{1}\s+(high|low))) (\s+within\s+the\s+last\s+(?P<duration>[1-9]\d*\s+{1}))?'''.format(INDICATOR, PERIOD) TOP_BOTTOM = r'''(?P<top_botom>top|bottom)\s+(?P<number>[1-9]\d*)\s+ ((?P<indicator>({0}))|IBD\s+Relative\s+Strength)'''.format(INDICATOR) FORMED = r'''{0}\s+(formed|has\s+formed)(\s+within\s+the\s+last\s+(?P<duration>[1-9]\d*\s+{1}))?'''.format(CANDLESTICK_PATTERN, PERIOD) indicator_1 = r'((?P<timeframe>daily|weekly|monthly)\s+)?(?P<indicator>{0})(?P<offset>\s+[1-9]\d*\s+{1}\s+ago)?'.format(INDICATOR_PLAIN, PERIOD) indicator_2 = r'((?P<timeframe>daily|weekly|monthly)\s+)?(min|max|avg)\s*\(\s*(?P<indicator>{0}),\s*(?P<range>[1-9]\d*)\s*\)\s*(?P<offset>\s+[1-9]\d*\s+{1}\s+ago)?'.format(INDICATOR_PLAIN, PERIOD) PLAIN_INDICATOR_RE = re.compile(indicator_1, re.IGNORECASE | re.VERBOSE) AGGREGATE_INDICATOR_RE = re.compile(indicator_2, re.IGNORECASE | re.VERBOSE) def evaluate(a): return eval(a) #eval_nb = nb.njit(evaluate) #so far numba can't be used as njit numpy calculation can return incorrect result (https://github.com/numba/numba/issues/4419) def tolist(pandas_series): return None if pandas_series is None else pandas_series.fillna(-999999999).round(4).tolist() #return None if pandas_series is None else pandas_series.fillna(method='bfill').dropna().round(4).tolist() class MyScreener: def __init__(self): self._id = None self._expression = None self._exchanges = None self._symbols = None self._priceType = None self._volumeType = None self._priceLow = None self._priceHigh = None self._volumeLow = None self._volumeHigh = None self._translation = None self._industries = None @property def id(self): return self._id @id.setter def id(self, value): self._id = value @property def expression(self): return self._expression @expression.setter def expression(self, value): self._expression = value @property def exchanges(self): return self._exchanges @exchanges.setter def exchanges(self, value): self._exchanges = value @property def symbols(self): return self._symbols @symbols.setter def symbols(self, value): self._symbols = value @property def priceType(self): return self._priceType @priceType.setter def priceType(self, value): self._priceType = value @property def volumeType(self): return self._volumeType @volumeType.setter def volumeType(self, value): self._volumeType = value @property def priceLow(self): return self._priceLow @priceLow.setter def priceLow(self, value): self._priceLow = value @property def priceHigh(self): return self._priceHigh @priceHigh.setter def priceHigh(self, value): self._priceHigh = value @property def volumeLow(self): return self._volumeLow @volumeLow.setter def volumeLow(self, value): self._volumeLow = value @property def volumeHigh(self): return self._volumeHigh @volumeHigh.setter def volumeHigh(self, value): self._volumeHigh = value @property def translation(self): return self._translation @translation.setter def translation(self, value): self._translation = value @property def industries(self): return self._industries @industries.setter def industries(self, value): self._industries = value def __translate(self, statement): """Translate a statement to a list of expressions. For example, [0, ["weekly", "+di(13)", 0, 13], ["", "above"], ["weekly", "-di(13)", 0, 13], null]. The first value of the list is type: 1 - 'is above/below', 2 - 'is in between', 3 - 'crossed above/below' 4 - 'gained' 4.1 - 'dropped' 5 - 'increase' 5.1 - 'decrease' 6 - 'reach high/low' 7 - 'top' 8 - 'bottom' 99 - 'form' (candlestick pattern) """ sceenerRegex = re.compile(IS_ABOVE_BELOW_BETWEEN, re.IGNORECASE | re.VERBOSE) mo = sceenerRegex.search(statement) if mo is not None: logger.debug(f"{mo.group('indicator')}, {mo.group('more_less')}, {mo.group('above_below')}, {mo.group('above_below_indicator')}, {mo.group('above_below_value')}, \ {mo.group('between')}, {mo.group('between_indicator1')}, {mo.group('between_value1')}, {mo.group('between_indicator2')}, {mo.group('between_value2')}, {mo.group('duration')}") indicator = mo.group('indicator').strip().lower() value1 = getIndicatorComponents(indicator) duration = mo.group('duration') if duration is not None: duration = getOffset(duration.strip(), value1[0]) value1[-1] += duration if mo.group('above_below') is not None: type = 1 comparator = [None if mo.group('more_less') is None else mo.group('more_less').strip(), mo.group('above_below')] value2 = mo.group('above_below_value') if value2 is not None: if duration is not None or statement.rstrip().endswith(value2): return [type, value1, comparator, value2, duration] value2 = mo.group('above_below_indicator') if value2 is not None: value2 = getIndicatorComponents(value2.strip().lower()) if duration is not None: if value1[0] == value2[0]: value2[-1] += duration else: value2[-1] += getOffset(mo.group('duration'), value2[0]) if duration is not None or statement.rstrip().endswith(mo.group('above_below_indicator')): return [type, value1, comparator, value2, duration] if mo.group('between') is not None: type = 2 between_value1 = mo.group('between_value1') between_indicator1 = mo.group('between_indicator1') if between_indicator1 is not None: between_indicator1 = mo.group('between_indicator1').strip().lower() between_value1 = getIndicatorComponents(between_indicator1) if duration is not None: if value1[0] == between_value1[0]: between_value1[-1] += duration else: between_value1[-1] += getOffset(mo.group('duration'), between_value1[0]) between_value2 = mo.group('between_value2') if between_value2 is not None: noduration = statement.rstrip().endswith(between_value2) between_indicator2 = mo.group('between_indicator2') if between_indicator2 is not None: noduration = statement.rstrip().endswith(between_indicator2) between_value2 = getIndicatorComponents(between_indicator2.strip().lower()) if duration is not None: if value1[0] == between_value2[0]: between_value2[-1] += duration else: between_value2[-1] += getOffset(mo.group('duration'), between_value2[0]) if between_value1 is not None and between_value2 is not None: if duration is not None or noduration: return [type, value1, between_value1, between_value2, duration] sceenerRegex = re.compile(CROSSED_ABOVE_BELOW, re.IGNORECASE | re.VERBOSE) mo = sceenerRegex.search(statement) if mo is not None: type = 3 logger.debug(f"{mo.group('indicator')}, {mo.group('above_below')}, {mo.group('above_below_indicator')}, {mo.group('above_below_value')}, {mo.group('duration')}") indicator = mo.group('indicator').strip().lower() value1 = getIndicatorComponents(indicator) duration = mo.group('duration') if duration is not None: duration = getOffset(duration.strip(), value1[0]) value1[-1] += duration comparator = mo.group('above_below') value2 = mo.group('above_below_value') if value2 is not None: if duration is not None or statement.rstrip().endswith(value2): return [type, value1, comparator, value2, duration] value2 = mo.group('above_below_indicator') if value2 is not None: value2 = getIndicatorComponents(value2.strip().lower()) if duration is not None: if value1[0] == value2[0]: value2[-1] += duration else: value2[-1] += getOffset(mo.group('duration'), value2[0]) if duration is not None or statement.rstrip().endswith(mo.group('above_below_indicator')): return [type, value1, comparator, value2, duration] sceenerRegex = re.compile(DROPPED_GAINED, re.IGNORECASE | re.VERBOSE) mo = sceenerRegex.search(statement) if mo is not None: logger.debug(f"{mo.group('indicator')}, {mo.group('verb')}, {mo.group('more_less')}, {mo.group('duration')}") if mo.group('verb').lower() == 'gained': type = 4 else: type = 4.1 indicator = mo.group('indicator').strip().lower() value1 = getIndicatorComponents(indicator) duration = mo.group('duration') if duration is not None: duration = getOffset(duration.strip(), value1[0]) value1[-1] += duration comparator = mo.group('more_less') if duration is not None or statement.rstrip().endswith(comparator): return [type, value1, comparator, duration] sceenerRegex = re.compile(INCREASING_DECREASING, re.IGNORECASE | re.VERBOSE) mo = sceenerRegex.search(statement) if mo is not None: logger.debug(f"{mo.group('indicator')}, {mo.group('verb')}, {mo.group('duration')}") if mo.group('verb').lower() == 'increasing': type = 5 else: type = 5.1 indicator = mo.group('indicator').strip().lower() value1 = getIndicatorComponents(indicator) duration = mo.group('duration') duration = getOffset(duration.strip(), value1[0]) value1[-1] += duration return [type, value1, duration] sceenerRegex = re.compile(REACHED_HIGH_LOW, re.IGNORECASE | re.VERBOSE) mo = sceenerRegex.search(statement) if mo is not None: type = 6 logger.debug(f"{mo.group('indicator')}, {mo.group('high_low')}, {mo.group('duration')}") indicator = mo.group('indicator').strip().lower() value1 = getIndicatorComponents(indicator) duration = mo.group('duration') if duration is not None: duration = getOffset(duration.strip(), value1[0]) value1[-1] += duration value2 = mo.group('high_low') if duration is not None or statement.rstrip().endswith(value2): return [type, value1, value2, duration] sceenerRegex = re.compile(TOP_BOTTOM, re.IGNORECASE | re.VERBOSE) mo = sceenerRegex.search(statement) if mo is not None: logger.debug(f"{mo.group('top_botom')}, {mo.group('number')}, {mo.group('indicator')}") if mo.group('top_botom').lower() == 'top': type = 7 else: type = 8 if mo.group('indicator') is None: value1 = IBDRS else: indicator = mo.group('indicator').strip().lower() value1 = getIndicatorComponents(indicator) return [type, mo.group('number'), value1] sceenerRegex = re.compile(FORMED, re.IGNORECASE | re.VERBOSE) mo = sceenerRegex.search(statement) if mo is not None: type = 99 logger.debug(f"{mo.group('timeframe')}, {mo.group('cspattern')}, {mo.group('duration')}") timeframe = mo.group('timeframe') if timeframe is None: timeframe = 'daily' else: timeframe = timeframe.lower() cspattern = mo.group('cspattern').strip().lower() cspattern = ' '.join(cspattern.split()) #remove extra whitespace duration = mo.group('duration') if duration is not None: duration = getOffset(duration.strip(), timeframe) if duration is not None or statement.rstrip().lower().endswith('formed'): return [type, timeframe, cspattern, duration] errorMessage = f'"{statement}" is unrecognizable, please check against the acceptable syntax, make sure the candlestick pattern name or indicator name is spelled correctly, and required parameters are included.' # (http://screenerapp.aifinancials.net/screenerSyntax) logger.error(errorMessage) raise Exception(errorMessage) def __separate(self, expression): expression = expression.replace('\n', ' ').strip() if ('top' in expression.lower() or 'bottom' in expression.lower()) and len(expression) > 50: errorMessage = 'top/bottom expression should be used alone' logger.error(errorMessage) raise Exception(errorMessage) tokens = re.split(r' and | or |[*]', expression, re.IGNORECASE | re.VERBOSE); statements = [] for token in tokens: token = token.strip().replace('[', '').replace(']', '') if len(token) > 0: statements.append(token) return statements @staticmethod def populateIndicators(value, indicators, dataframe): np.seterr(all='warn') name = value[0] + ' ' + value[1] if name not in indicators.keys(): if value[1] in ['open', 'high', 'low', 'close', 'volume', 'range']: if value[1] == 'range': x = "dataframe['" + value[0] + "']['high'] - dataframe['" + value[0] + "']['low']" else: x = "dataframe['" + value[0] + "']['" + value[1] + "']" #with warnings.catch_warnings(): # warnings.filterwarnings('error') try: indicators[name] = eval(x) except: indicators[name] = None else: i = value[1].find('(') key = value[1][:i] parameters = value[1][i+1:] if key in TA_MAPPING.keys(): mapped = TA_MAPPING[key] x = mapped[0] + '(' for j in range(1, len(mapped)-1): x += "dataframe['" + value[0] + "']['" + mapped[j] + "']," if len(parameters) > 1: if 'macd' in key: #swap 1st and 2nd arguments for MACD to conform to the usual order of parameters parameters = parameters[:-1].split(',') if key == 'macd': x += parameters[1] + ',' + parameters[0] + ')' else: x += parameters[1] + ',' + parameters[0] + ',' + parameters[2] + ')' elif key == 'median bollinger band': #the function needs just 1 parameter parameters = parameters[:-1].split(',') x += parameters[0] + ')' else: x += parameters else: x = x[:-1] + ')' #with warnings.catch_warnings(): # warnings.filterwarnings('error') try: indicators[name] = eval(x) #eval_nb(x) except: indicators[name] = None else: logger.warning(f'{value[1]} is undefined in ta_mapping') return @staticmethod def getResults(symbol, timeframes, translation): #start = timer() if len(translation) == 1 and list(translation.values())[0][0] in [7, 8] and type(list(translation.values())[0][2]) is str: #IBDRS ibdRelativeStrength = 0 query = f"SELECT ibdRelativeStrength FROM symbols WHERE ticker = '{symbol}'" with contextlib.closing(utils.engine.raw_connection()) as conn: cursor = conn.cursor() cursor.execute(query) row = cursor.fetchone() cursor.close() if row is not None and row[0] is not None: ibdRelativeStrength = row[0] return {symbol: ibdRelativeStrength} dataframe = {} for timeframe, maxPeriod in timeframes.items(): tablename = timeframe + '_quotes' datapoints = maxPeriod * 2 + 50 #500 #query = "SELECT formatted_date, open, high, low, close, adjclose, volume FROM '{0}' WHERE symbol = '{1}' ORDER BY formatted_date DESC LIMIT 500".format(tablename, symbol[0]) query = f"SELECT formatted_date as date, open*adjclose/close as open, high*adjclose/close as high, low*adjclose/close as low, adjclose as close, volume FROM {tablename} WHERE symbol = '{symbol}' ORDER BY formatted_date DESC LIMIT {datapoints}" query = f"SELECT * FROM ({query}) as quotes ORDER BY date ASC" with contextlib.closing(utils.engine.raw_connection()) as conn: df = pd.read_sql_query(query, conn, index_col='date') #df.info(verbose=True) if (df.empty or df.size < 3) and timeframe == 'daily': return None dataframe[timeframe] = df.round(4) #end = timer() #logger.debug(f'retrieved data in {str(100*(end-start))} ms') #logger.debug('got dataframe') if len(translation) == 1 and list(translation.values())[0][0] in [7, 8]: indicators = {} v = list(translation.values())[0] MyScreener.populateIndicators(v[2], indicators, dataframe) name = v[2][0] + ' ' + v[2][1] if indicators[name] is None or len(indicators[name]) <= v[2][2]: return {symbol: 0} else: return {symbol: getIndicatorValue(indicators, v[2], 1)} #start = timer() indicators = {} for k, v in translation.items(): if v[0] != 99: MyScreener.populateIndicators(v[1], indicators, dataframe) if len(v) > 3: if v[0] == 2 and type(v[2]) is list: MyScreener.populateIndicators(v[2], indicators, dataframe) if type(v[3]) is list: MyScreener.populateIndicators(v[3], indicators, dataframe) #logger.debug(indicators) #end = timer() #logger.debug(f'populated indicators in {str(100*(end-start))} ms') cp_mapping = dict((k.lower(), v) for k,v in utils.get_cp_mapping().items()) #logger.debug(cp_mapping) results = {} for k, v in translation.items(): results[k] = None try: if v[0] == 1: #'is above/below' if v[4] is None: v[4] = 1 for i in range(1, v[4]+1): name = v[1][0] + ' ' + v[1][1] if indicators[name] is None or len(indicators[name]) <= i: results[k] = False break value1 = getIndicatorValue(indicators, v[1], i) if type(v[3]) is list: index = getIndex(i, v[1][0], v[3][0]) name = v[3][0] + ' ' + v[3][1] if indicators[name] is None or len(indicators[name]) <= index: results[k] = False break value2 = getIndicatorValue(indicators, v[3], index) else: value2 = float(v[3]) if v[2][0] is None: if v[2][1] == 'above': result = (value1 >= value2) else: result = (value1 <= value2) else: mo = re.search(r'\d+\.?\d*', v[2][0]) extra = float(mo.group()) if 'more' in v[2][0]: if v[2][1] == 'above': if '%' in v[2][0]: result = (value1 >= (value2 + abs(value2) * extra/100)) else: result = (value1 >= (value2 + extra)) else: if '%' in v[2][0]: result = (value1 <= (value2 - abs(value2) * extra/100)) else: result = (value1 <= (value2 - extra)) else: if v[2][1] == 'above': if '%' in v[2][0]: result = (value1 >= value2 and value1 < (value2 + abs(value2) * extra/100)) else: result = (value1 >= value2 and value1 < (value2 + extra)) else: if '%' in v[2][0]: result = (value1 <= value2 and value1 > (value2 - abs(value2) * extra/100)) else: result = (value1 <= value2 and value1 > (value2 - extra)) if not result: results[k] = False break if results[k] is None: results[k] = True if v[0] == 2: #'is in between' if v[4] is None: v[4] = 1 for i in range(1, v[4]+1): name = v[1][0] + ' ' + v[1][1] if indicators[name] is None or len(indicators[name]) <= i: results[k] = False break value = getIndicatorValue(indicators, v[1], i) if type(v[2]) is list: index = getIndex(i, v[1][0], v[2][0]) name = v[2][0] + ' ' + v[2][1] if indicators[name] is None or len(indicators[name]) <= index: results[k] = False break value1 = getIndicatorValue(indicators, v[2], index) else: value1 = float(v[2]) if type(v[3]) is list: index = getIndex(i, v[1][0], v[3][0]) name = v[3][0] + ' ' + v[3][1] if indicators[name] is None or len(indicators[name]) <= index: results[k] = False break value2 = getIndicatorValue(indicators, v[3], index) else: value2 = float(v[3]) if value1 > value2: result = (value >= value2 and value <= value1) else: result = (value >= value1 and value <= value2) if not result: results[k] = False break if results[k] is None: results[k] = True if v[0] == 3: #'crossed above/below' if v[4] is None: v[4] = 1 for i in range(1, v[4]+1): name = v[1][0] + ' ' + v[1][1] if indicators[name] is None or len(indicators[name]) <= i: results[k] = False break value1 = getIndicatorValue(indicators, v[1], i) value1_1 = getIndicatorValue(indicators, v[1], i+1) if type(v[3]) is list: index = getIndex(i, v[1][0], v[3][0]) name = v[3][0] + ' ' + v[3][1] if indicators[name] is None or len(indicators[name]) <= index: results[k] = False break value2 = getIndicatorValue(indicators, v[3], index) value2_1 = getIndicatorValue(indicators, v[3], index+1) else: value2 = float(v[3]) value2_1 = value2 if v[2] == 'above': result = (value1 >= value2 and value1_1 <= value2_1) else: result = (value1 <= value2 and value1_1 >= value2_1) if result: results[k] = True break if results[k] is None: results[k] = False if v[0] in [4, 4.1]: #['gained', 'dropped'] name = v[1][0] + ' ' + v[1][1] if indicators[name] is None or len(indicators[name]) < 3: results[k] = False else: if v[3] is None: v[3] = 1 value1 = getIndicatorValue(indicators, v[1], 1) value2 = getIndicatorValue(indicators, v[1], 1+v[3]) mo = re.search(r'\d+\.?\d*', v[2]) extra = float(mo.group()) if 'more' in v[2]: if v[0] == 4: if '%' in v[2]: result = (value1 >= (value2 + abs(value2) * extra/100)) else: result = (value1 >= (value2 + extra)) else: if '%' in v[2]: result = (value1 <= (value2 - abs(value2) * extra/100)) else: result = (value1 <= (value2 - extra)) else: if v[0] == 4: if '%' in v[2]: result = (value1 >= value2 and value1 < (value2 + abs(value2) * extra/100)) else: result = (value1 >= value2 and value1 < (value2 + extra)) else: if '%' in v[2]: result = (value1 <= value2 and value1 > (value2 - abs(value2) * extra/100)) else: result = (value1 <= value2 and value1 > (value2 - extra)) results[k] = (False if result is None else result) if v[0] in [5, 5.1]: #['increasing', 'decreasing'] name = v[1][0] + ' ' + v[1][1] for i in range(1, v[2]+1): if indicators[name] is None or len(indicators[name]) <= i: results[k] = False break value1 = getIndicatorValue(indicators, v[1], i) value2 = getIndicatorValue(indicators, v[1], i+1) if v[0] == 5: result = (value1 >= value2) else: result = (value1 <= value2) if not result: results[k] = False break if results[k] is None: results[k] = True if v[0] == 6: #'reached high/low' if v[3] is None: v[3] = 1 period = getOffset(v[2].strip().lower(), v[1][0]) name = v[1][0] + ' ' + v[1][1] for i in range(1, v[3]+1): if indicators[name] is None or len(indicators[name]) <= i+period: results[k] = False break value1 = getIndicatorValue(indicators, v[1], i) value2 = [getIndicatorValue(indicators, v[1], i+j) for j in range(period)] if 'high' in v[2]: result = (value1 >= max(value2)) else: result = (value1 <= min(value2)) if result: results[k] = True break if results[k] is None: results[k] = False if v[0] == 99: #formed Candlestick Pattern name = v[2] if 'candlestick pattern' in name: isPatternFound = False sortedByRankMap = sorted(cp_mapping.items(), key=lambda x: x[1][2]) if name == 'bullish candlestick pattern': cs_patterns = (kc.lower() for kc,vc in sortedByRankMap if len(vc[0]) > 0 and vc[1] > 0) elif name == 'bearish candlestick pattern': cs_patterns = (kc.lower() for kc,vc in sortedByRankMap if len(vc[0]) > 0 and vc[1] < 0) else: cs_patterns = (kc.lower() for kc,vc in sortedByRankMap if len(vc[0]) > 0 and vc[1] == 0) for cs_pattern in cs_patterns: #sorted by performance rank isPatternFound = isCandlestickPatternFound(cs_pattern, v[3], dataframe[v[1]], cp_mapping) if isPatternFound: break results[k] = isPatternFound else: results[k] = isCandlestickPatternFound(name, v[3], dataframe[v[1]], cp_mapping) #logger.debug(k + ' = ' + str(results[k])) except Exception as e: logger.error(f'{symbol[0]}: {traceback.format_exc()}') return None return results @staticmethod def sceener(symbol, expression, timeframes, translation): utils.engine.dispose() result = False results = MyScreener.getResults(symbol, timeframes, translation) logger.debug(results) if len(translation) == 1 and list(translation.values())[0][0] in [7, 8]: return results if results is not None: #logger.debug('expression: ' + expression) newexpression = expression for k, v in results.items(): newexpression = newexpression.replace(k, str(v)) newexpression = newexpression.replace('[', '(').replace(']', ')').replace('\r', ' ').replace('\n', ' ') #logger.debug('newexpression: ' + newexpression) try: result = eval(newexpression) except Exception as e: logger.error(f'{newexpression}: {traceback.format_exc()}') return None logger.debug(f'{symbol}: {str(result)}') return symbol if result else None @staticmethod def calculateIndicator(timeframe, function, dataframe): np.seterr(all='warn') if function.lower() in ['open', 'high', 'low', 'close', 'volume']: return (function, dataframe[timeframe][function]) i = function.find('(') name = function[:i] parameters = function[i+1:] result = None for k, v in TA_MAPPING.items(): if name.lower() == k: x = v[0] + '(' for j in range(1, len(v)-1): x += "dataframe['" + timeframe + "']['" + v[j] + "']," if len(parameters) > 1: key = name.lower() if 'macd' in key: #swap 1st and 2nd arguments for MACD to conform to the usual order of parameters parameters = parameters[:-1].split(',') if key == 'macd': x += parameters[1] + ',' + parameters[0] + ')' else: x += parameters[1] + ',' + parameters[0] + ',' + parameters[2] + ')' else: x += parameters else: x = x[:-1] + ')' #print(x) try: result = eval(x) except Exception as e: logger.error(f'{x}: {traceback.format_exc()}') result = dataframe[timeframe]['close'] result.values[:] = 0 return (k + function[i:], result) errorMessage = f'{name} is undefined in ta_mapping' logger.error(errorMessage) raise Exception(errorMessage) def checkExpression(self, expression): logger.debug('expression: ' + expression) #start = timer() statements = self.__separate(expression) if len(statements) > 0: translation = {} for statement in statements: logger.debug('statement: ' + statement) translation[statement] = self.__translate(statement) logger.debug('translated: ' + str(translation[statement])) #end = timer() #logger.debug(f'translation done in {str(100*(end-start))} ms') timeframes = self.getTimeframes(translation) symbol = 'SPY' #['FSZ-DBA.TO', 204] #this is a test case for exception #start = timer() result = MyScreener.sceener(symbol, expression, timeframes, translation) #end = timer() #logger.debug(f'got result in {str(100*(end-start))} ms') #if len(translation) == 1 and list(translation.values())[0][0] in [7, 8]: # logger.debug(result) #else: # result = result is not None # logger.debug(f'result: {str(result)}') return translation def getTimeframes(self, translationMap): timeframes = {} for translation in translationMap.values(): if translation[0] == 99: duration = translation[-1] if duration is None: duration = 1 if translation[1] not in timeframes: timeframes[translation[1]] = duration elif duration > timeframes[translation[1]]: timeframes[translation[1]] = duration elif translation[0] in [7, 8]: if translation[2][0] not in timeframes: timeframes[translation[2][0]] = translation[2][-1] elif translation[2][-1] > timeframes[translation[2][0]]: timeframes[translation[2][0]] = translation[2][-1] else: if translation[1][0] not in timeframes: timeframes[translation[1][0]] = translation[1][-1] elif translation[1][-1] > timeframes[translation[1][0]]: timeframes[translation[1][0]] = translation[1][-1] if len(translation) > 3: if translation[0] == 2 and type(translation[2]) is list: if translation[2][0] not in timeframes: timeframes[translation[2][0]] = translation[2][-1] elif translation[2][-1] > timeframes[translation[2][0]]: timeframes[translation[2][0]] = translation[2][-1] if type(translation[3]) is list: if translation[3][0] not in timeframes: timeframes[translation[3][0]] = translation[3][-1] elif translation[3][-1] > timeframes[translation[3][0]]: timeframes[translation[3][0]] = translation[3][-1] return timeframes def __getAllSymbols(self): price = None if self._priceType is not None and (self._priceLow is not None or self._priceHigh is not None): if self._priceType == 0: price = 'lastDayPrice' elif self._priceType == 1: price = 'avg30DayPrice' elif self._priceType == 2: price = 'avg60DayPrice' elif self._priceType == 3: price = 'avg90DayPrice' volume = None if self._volumeType is not None and (self._volumeLow is not None or self._volumeHigh is not None): if self._volumeType == 0: volume = 'lastDayVolume' elif self._volumeType == 1: volume = 'avg30DayVolume' elif self._volumeType == 2: volume = 'avg60DayVolume' elif self._volumeType == 3: volume = 'avg90DayVolume' lastDate = (datetime.today() - timedelta(days=4)).strftime(utils.date_format) #take into account weekend and holidays query = f"SELECT ticker FROM symbols WHERE active=1 and lastDate >= '{lastDate}'" #only consider symbols that are active and have price up to date if self._symbols is not None: query += " and ticker in (" + ', '.join(["'%s'" %symbol for symbol in self._symbols]) + ")" if price is not None: if self._priceLow is not None: query += " and " + price + ">=" + str(self._priceLow) if self._priceHigh is not None: query += " and " + price + "<=" + str(self._priceHigh) if volume is not None: if self._volumeLow is not None: query += " and " + volume + ">=" + str(self._volumeLow) if self._volumeHigh is not None: query += " and " + volume + "<=" + str(self._volumeHigh) if self._industries is not None: query += " and industry in (" + ', '.join(["'%s'" %industry for industry in self._industries.split()]) + ")" #logger.info(query) with contextlib.closing(utils.engine.raw_connection()) as conn: cursor = conn.cursor() cursor.execute(query) rows = cursor.fetchall() cursor.close() return [row[0] for row in rows] def getMatchingSymbols(self): logger.info('screener_id = ' + str(self._id)) if self._translation is None or len(self._translation) == 0: if self._expression is None or len(self._expression) == 0: logger.info('missing expression') return logger.info('new expression to translate: ' + self._expression) statements = self.__separate(self._expression) if len(statements) == 0: logger.info('no valid statement') return translation = {} for statement in statements: translation[statement] = self.__translate(statement) logger.debug('translation: ' + str(translation)) if len(translation) == 0: logger.info('no valid translation') return replaceTranslation(self._id, translation) self._translation = translation matchingSymbols = [] symbols = self.__getAllSymbols() logger.info(f'#symbols: {str(len(symbols))}') if len(symbols) == 0: return matchingSymbols isTop = False isBottom = False translationValue = None if len(self._translation) == 1: translationValue = list(self._translation.values())[0] if translationValue[0] == 7: isTop = True if translationValue[0] == 8: isBottom = True if (isTop or isBottom) and type(translationValue[2]) is str: #IBDRS query = "SELECT ticker FROM symbols WHERE active=1 and ticker in ({}) order by ibdRelativeStrength {} limit {}" \ .format(','.join([f"'{symbol}'" for symbol in symbols]), 'desc' if isTop else 'asc', translationValue[1]) with contextlib.closing(utils.engine.raw_connection()) as conn: cursor = conn.cursor() cursor.execute(query) rows = cursor.fetchall() cursor.close() matchingSymbols = [row[0] for row in rows] logger.info(f'#matchingSymbols: {str(len(matchingSymbols))}') return matchingSymbols timeframes = self.getTimeframes(self._translation) """ #do in single process for symbol in symbols: result = MyScreener.sceener(symbol, self._expression, timeframes, self._translation) if result is not None: matchingSymbols.append(result) """ #do with multiprocessing if __name__ == '__main__': parameters = [(symbol, self._expression, timeframes, self._translation) for symbol in symbols] processes = mp.cpu_count() #this process is mainly cpu bound with mp.Pool(processes=processes) as pool: results = pool.starmap(MyScreener.sceener, parameters) if isTop: results = dict((key,d[key]) for d in results for key in d) matchingSymbols = heapq.nlargest(int(translationValue[1]), results, key=results.get) elif isBottom: results = dict((key,d[key]) for d in results for key in d) matchingSymbols = heapq.nsmallest(int(translationValue[1]), results, key=results.get) else: for result in results: if result is not None: matchingSymbols.append(result) logger.info(f'#matchingSymbols: {str(len(matchingSymbols))}') return matchingSymbols def isBlank(myString): if myString and myString.strip(): #myString is not None AND myString is not empty or blank return False #myString is None OR myString is empty or blank return True def getOffset(text, timeframe): if len(text) == 0: return 0 mo = re.search(r'\d+', text) offset = int(mo.group()) if timeframe == 'daily': if 'week' in text: offset *= 5 elif 'month' in text: offset *= 20 if timeframe == 'weekly': if 'day' in text: offset //= 5 elif 'month' in text: offset *= 4 if timeframe == 'monthly': if 'day' in text: offset //= 20 elif 'week' in text: offset //= 4 return offset def getMaxPeriod(indicator): maxPeriod = 1 i = indicator.find('(') if i > 0: parameters = indicator[i+1:] if len(parameters) > 1: parameters = parameters[:-1].split(',') for parameter in parameters: period = int(float(parameter)) if period > maxPeriod: maxPeriod = period return maxPeriod def getIndicatorComponents(indicatorString): if not ('min' in indicatorString or 'max' in indicatorString or 'avg' in indicatorString): functionType = 0 matches = PLAIN_INDICATOR_RE.search(indicatorString) timeframe = matches.group('timeframe') if timeframe is None: timeframe = 'daily' else: timeframe = timeframe.lower() indicator = matches.group('indicator') #remove extra whitespace in indicator name_parameters = indicator.split('(') if len(name_parameters) < 2: indicator = ' '.join(indicator.split()) else: indicator = ' '.join(name_parameters[0].strip().split()) + '(' + ''.join(name_parameters[1].strip().split()) offset = matches.group('offset') if offset is None: offset = 0 else: offset = getOffset(offset.strip(), timeframe) maxPeriod = getMaxPeriod(indicator) + offset return [timeframe, indicator, offset, functionType, 0, maxPeriod] else: if 'min' in indicatorString: functionType = 1 elif 'max' in indicatorString: functionType = 2 else: functionType = 3 matches = AGGREGATE_INDICATOR_RE.search(indicatorString) timeframe = matches.group('timeframe') if timeframe is None: timeframe = 'daily' else: timeframe = timeframe.strip() indicator = matches.group('indicator') #remove extra whitespace in indicator name_parameters = indicator.split('(') if len(name_parameters) < 2: indicator = ' '.join(indicator.split()) else: indicator = ' '.join(name_parameters[0].strip().split()) + '(' + ''.join(name_parameters[1].strip().split()) functionRange = int(matches.group('range')) offset = matches.group('offset') if offset is None: offset = 0 else: offset = getOffset(offset.strip(), timeframe) maxPeriod = getMaxPeriod(indicator) + functionRange + offset return [timeframe, indicator, offset, functionType, functionRange, maxPeriod] def getIndex(baseIndex, baseTimeframe, timeframe): index = baseIndex if timeframe != baseTimeframe: #timeframe is different from the base, cast longer to shorter one, the other way doesn't make much sense and is ignored if baseTimeframe == 'daily': if timeframe == 'weekly': index = (baseIndex-1) // 5 elif timeframe == 'monthly': index = (baseIndex-1) // 20 if baseTimeframe == 'weekly': if timeframe == 'monthly': index = (baseIndex-1) // 4 return index def getIndicatorValue(indicators, indicatorComponents, index): name = indicatorComponents[0] + ' ' + indicatorComponents[1] if indicatorComponents[3] < 1: #plain indicator value = indicators[name].iloc[-index - indicatorComponents[2]] else: if indicatorComponents[3] == 1: #min function value = indicators[name].iloc[-index-indicatorComponents[4]-indicatorComponents[2] : -index-indicatorComponents[2]].agg('min') elif indicatorComponents[3] == 2: #max function value = indicators[name].iloc[-index-indicatorComponents[4]-indicatorComponents[2] : -index-indicatorComponents[2]].agg('max') elif indicatorComponents[3] == 3: #mean function value = indicators[name].iloc[-index-indicatorComponents[4]-indicatorComponents[2] : -index-indicatorComponents[2]].agg('mean') else: raise Exception(f'Unknown function type {indicatorComponents[3]}') return value def replaceTranslation(screener_id, translationMap): with contextlib.closing(utils.engine.raw_connection()) as conn: cursor = conn.cursor() query = f"DELETE FROM screenertranslation WHERE screener_id = {screener_id}" cursor.execute(query) newrows = [] for statement, translation in translationMap.items(): row = (screener_id, statement, json.dumps(translation)) newrows.append(row) if len(newrows) > 0: query = "INSERT INTO screenertranslation (screener_id, statement, translation) VALUES (%s, %s, %s)" cursor.executemany(query, newrows) conn.commit() cursor.close() def isCandlestickPatternFound(name, duration, df, cp_mapping): result = None if cp_mapping[name] is None: result = False else: if duration is None: duration = 1 value = getattr(talib, cp_mapping[name][0])(df['open'], df['high'], df['low'], df['close']) #logger.debug(name + ': ' + str(value)) sign_of_value = cp_mapping[name][1] for i in range(1, duration+1): if sign_of_value > 0 and value.iloc[-i] > 0: result = True break elif sign_of_value < 0 and value.iloc[-i] < 0: result = True break elif sign_of_value == 0 and value.iloc[-i] != 0: result = True break if result is None: result = False return result def runScreeners(region=None, intraday=False): #if intraday: # logging.config.fileConfig("logging_app.cfg") # logger = applogging.getLogger(os.path.basename(__file__)) logger.info('runScreeners - start') myScreeners = [] with contextlib.closing(utils.engine.raw_connection()) as conn: cursor = conn.cursor() query = "SELECT id, expression, priceType, priceLow, priceHigh, volumeType, volumeLow, volumeHigh, exchanges, watchlists, industries, lastUpdate FROM screener" if region is not None: query += " WHERE region = '{}'".format(region) query += " ORDER BY id" cursor.execute(query) screeners = cursor.fetchall() for screener in screeners: if intraday: #run newly created or updated screeners only if screener[0] >= 6: #always include defaultScreeners to copy results from when user created screeners from sample records query = f"SELECT lastUpdate FROM screenerresult WHERE screener_id = {screener[0]}" cursor.execute(query) result = cursor.fetchone() if result is not None and result[0] > screener[-1]: #print(f'screener {screener[0]} skipped') continue #print(f'screener {screener[0]} is going to be run') myScreener = MyScreener() watchlists = screener[9] if isBlank(watchlists): #watchlists take precedence to exchanges myScreener.exchanges = screener[8] else: query = f"SELECT symbols FROM watchlist where id in ({watchlists.replace(' ',',')})" cursor.execute(query) rows = cursor.fetchall() symbols = set() for row in rows: symbols.update(row[0].split(' ')) myScreener.symbols = symbols if not isBlank(myScreener.exchanges) or myScreener.symbols is not None: myScreener.id = screener[0] myScreener.expression = screener[1] myScreener.priceType = screener[2] myScreener.priceLow = screener[3] myScreener.priceHigh = screener[4] myScreener.volumeType = screener[5] myScreener.volumeLow = screener[6] myScreener.volumeHigh = screener[7] myScreener.industries = screener[10] query = f"SELECT statement, translation FROM screenertranslation where screener_id = {myScreener.id}" cursor.execute(query) rows = cursor.fetchall() translation = {} for row in rows: translation[row[0]] = json.loads(row[1]) myScreener.translation = translation myScreeners.append(myScreener) cursor.close() defaultScreeners = [] for myScreener in myScreeners: message = None query_result = None if myScreener.id < 6: defaultScreeners.append(myScreener) if intraday: continue #don't run defaultScreeners intraday #query_result = f"SELECT result FROM screenerresult WHERE screener_id = {myScreener.id}" else: for ds in defaultScreeners: if (myScreener.expression == ds.expression and myScreener.exchanges == ds.exchanges and myScreener.industries == ds.industries and myScreener.priceType == ds.priceType and myScreener.priceLow == ds.priceLow and myScreener.priceHigh == ds.priceHigh and myScreener.volumeType == ds.volumeType and myScreener.volumeLow == ds.volumeLow and myScreener.volumeHigh == ds.volumeHigh): query_result = f"SELECT result FROM screenerresult WHERE screener_id = {ds.id}" break; if query_result is not None: #copy result from defaultScreeners when criteria totally match with contextlib.closing(utils.engine.raw_connection()) as conn: cursor = conn.cursor() cursor.execute(query_result) result = cursor.fetchone() cursor.close() if result is not None: message = result[0] if message is None: if myScreener.symbols is None and not isBlank(myScreener.exchanges): query = f"SELECT ticker FROM symbols WHERE active=1 and exchange_id in ({myScreener.exchanges.replace(' ',',')})" with contextlib.closing(utils.engine.raw_connection()) as conn: cursor = conn.cursor() cursor.execute(query) rows = cursor.fetchall() cursor.close() myScreener.symbols = [row[0] for row in rows] if len(myScreener.symbols) == 0: continue matchingSymbols = myScreener.getMatchingSymbols() utils.engine.dispose() if len(matchingSymbols) > 0: message = 'Matching symbols: ' + ' '.join(matchingSymbols) else: message = 'No matching symbols' #logger.info(f'screener_id = {myScreener.id}, message = {message}') with contextlib.closing(utils.engine.raw_connection()) as conn: cursor = conn.cursor() query = f"SELECT user_id, name FROM screener WHERE id = {myScreener.id}" cursor.execute(query) screener = cursor.fetchone() screener_name = '' email = None if screener is not None: user_id = screener[0] screener_name = screener[1] if user_id != 1: #send email to non system user query = f"SELECT email FROM user WHERE id = {user_id} and isVerified = 1" cursor.execute(query) result = cursor.fetchone() if result is not None: email = result[0] #query = "UPDATE screener SET result = %s, resultTimestamp = %s WHERE id = %s" query = "INSERT INTO screenerresult (screener_id, result) VALUES (%s, %s) ON DUPLICATE KEY UPDATE result=%s, lastUpdate=UTC_TIMESTAMP()" cursor.execute(query, (myScreener.id, message, message)) conn.commit() cursor.close() if email is not None: subject = f"Result of screener [{screener_name}]" message += utils.mail_signature utils.sendMail(email, subject, message, logger) logger.info('runScreeners - end') def testScreener(id, symbols=None): myScreener = MyScreener() with contextlib.closing(utils.engine.raw_connection()) as conn: cursor = conn.cursor() query = f"SELECT id, expression, priceType, priceLow, priceHigh, volumeType, volumeLow, volumeHigh, exchanges, watchlists, industries FROM screener WHERE id = {id}" cursor.execute(query) screener = cursor.fetchone() if symbols is not None: myScreener.symbols = symbols else: watchlists = screener[9] if isBlank(watchlists): #watchlists take precedence to exchanges myScreener.exchanges = screener[8] else: query = f"SELECT symbols FROM watchlist where id in ({watchlists.replace(' ',',')})" cursor.execute(query) rows = cursor.fetchall() symbols = set() for row in rows: symbols.update(row[0].split(' ')) myScreener.symbols = symbols if myScreener.symbols is not None or not isBlank(myScreener.exchanges): myScreener.id = screener[0] myScreener.expression = screener[1] myScreener.priceType = screener[2] myScreener.priceLow = screener[3] myScreener.priceHigh = screener[4] myScreener.volumeType = screener[5] myScreener.volumeLow = screener[6] myScreener.volumeHigh = screener[7] myScreener.industries = screener[10] query = f"SELECT statement, translation FROM screenertranslation where screener_id = {myScreener.id}" cursor.execute(query) rows = cursor.fetchall() translation = {} for row in rows: translation[row[0]] = json.loads(row[1]) myScreener.translation = translation if myScreener.symbols is None and not isBlank(myScreener.exchanges): query = f"SELECT ticker FROM symbols WHERE active=1 and exchange_id in ({myScreener.exchanges.replace(' ',',')})" cursor.execute(query) rows = cursor.fetchall() myScreener.symbols = [row[0] for row in rows] cursor.close() if len(myScreener.symbols) > 0: matchingSymbols = myScreener.getMatchingSymbols() if len(matchingSymbols) > 0: print(id, symbols, 'Matching symbols: ' + ' '.join(matchingSymbols)) else: print(id, symbols, 'No matching symbols') def main(): region = None intraday = False if len(sys.argv) >= 2: try: opts, args = getopt.getopt(sys.argv[1:], "r:i") except getopt.GetoptError: print(f'Usage: {os.path.basename(__file__)} [-r|-i] [<region>|<intraday>]') sys.exit(2) for opt, arg in opts: if opt in ("-r", "--region"): region = arg elif opt in ("-i", "--intraday"): intraday = True if region is not None: region = utils.regions.get(int(region)) if region is None: region = 'Americas' runScreeners(region, intraday) if __name__ == '__main__': main() """ filter1 = 'Close 3 days ago has been more than 15% above weekly MA(50) 1 month ago for the last 2 weeks' filter1_1 = 'Volume MA(90) is above 100000.0' filter1_2 = 'Close is from 1.0 to 1999.9' filter2 = '[EMA(10) 2 days ago crossed above 50 or EMA(10) 2 days ago crossed above EMA(50) within the last 5 days]' filter3 = 'weekly EMA(10) 1 week ago dropped more than 30% over the last 1 month' filter4 = '[close 1 day ago is below EMA(23) or close 1 day ago is below EMA(30)]' filter5 = 'EMA(10) 1 week ago reached a new 10 weeks high within the last 6 days' expression = filter1 + '\nand ' + filter1_1 + '\nand ' + filter1_2+ '\nand ' + filter2 + '\nand ' + filter3 + '\nand ' + filter4 + '\nand ' + filter5 with contextlib.closing(utils.engine.raw_connection()) as conn: cursor = conn.cursor() query = "SELECT expression FROM screener WHERE id = 1" cursor.execute(query) row = cursor.fetchone() cursor.close() expression = row[0] #expression = 'Median Bollinger Band (20.0, 2.5) has been increasing for 20 days' #'bottom 20 IBD Relative Strength' #expression = 'Bullish candlestick pattern formed within the last 3 days and close is below EMA(10) and close is above 5 and RSI(7) is below 45 and MA(50) is above MA(200) and MA(50) is above MA(50) 50 days ago' #expression = 'avg(volume, 22) is from volume ma(10) 1 week ago to 9999999999 and MACD(12, 26,9) has crossed above MAX ( MACD Signal ( 12, 26, 9 ), 10) within the last 5 days and Close is more than 5% above MA(10) for the last 10 days and Bullish Harami Cross formed within the last 2 days' #expression = 'Volume MA ( 20 ) is above 100000 and MA(60) is above 20 and Range is above MIN ( Range, 6 ) 1 day ago and High is below High 1 day ago and Low is above Low 1 day ago and MIN(CCI(10),5) is below -100 and Aroon Up(63) is above Aroon Down(63)' myScreener = MyScreener() myScreener.checkExpression(expression) """ #testScreener(1, ['AEM'])
47.665254
308
0.503497
43,734
0.647969
0
0
22,808
0.337926
0
0
14,446
0.214034
523acdc06c5b11e96565a7004e5899f7f30e9391
1,075
py
Python
VideoReaders.py
aegissystems/yolact
29ee13f41a83b62a8946a86cb01e49d8fce0676c
[ "MIT" ]
1
2022-03-25T11:15:44.000Z
2022-03-25T11:15:44.000Z
VideoReaders.py
aegissystems/yolact
29ee13f41a83b62a8946a86cb01e49d8fce0676c
[ "MIT" ]
null
null
null
VideoReaders.py
aegissystems/yolact
29ee13f41a83b62a8946a86cb01e49d8fce0676c
[ "MIT" ]
null
null
null
import cv2 class VideoReader(object): ''' Class docstring for VideoReader(): Provides a generator for video frames. Returns a numpy array in BGR format. ''' def __init__(self, file_name): self.file_name = file_name try: # OpenCV parses an integer to read a webcam. Supplying '0' will use webcam. self.file_name = int(file_name) except ValueError: pass self.read = cv2.VideoCapture(self.file_name) def __iter__(self): self.cap = cv2.VideoCapture(self.file_name) if not self.cap.isOpened(): raise IOError('Video {} cannot be opened'.format(self.file_name)) return self def __next__(self): was_read, img = self.cap.read() if not was_read: raise StopIteration return img def properties(self): self.w, self.h, self.count = self.read.get(cv2.CAP_PROP_FRAME_WIDTH), self.read.get(cv2.CAP_PROP_FRAME_HEIGHT), self.read.get(cv2.CAP_PROP_FRAME_COUNT) return int(self.count), (int(self.h), int(self.w))
34.677419
159
0.63907
1,063
0.988837
0
0
0
0
0
0
234
0.217674
523c83b7a7286c3f75b98ba50343d40590b2e580
4,041
py
Python
OrderBookContainer.py
sturex/pyOrderbookTrading
2020fe3760dbbd7c32570d000a44fc7b229101c7
[ "MIT" ]
19
2019-11-07T13:55:38.000Z
2022-03-12T22:34:50.000Z
OrderBookContainer.py
nomad5am/pyOrderbookTrading
e902165231b8b986062c7c3e4657d888c5f8511c
[ "MIT" ]
6
2019-10-26T04:29:03.000Z
2022-02-10T01:38:39.000Z
OrderBookContainer.py
nomad5am/pyOrderbookTrading
e902165231b8b986062c7c3e4657d888c5f8511c
[ "MIT" ]
8
2019-11-07T13:55:39.000Z
2021-11-11T15:24:45.000Z
import os import json from contextlib import suppress from OrderBook import * from Signal import Signal class OrderBookContainer: def __init__(self, path_to_file): self.order_books = [] self.trades = [] self.cur_directory = os.path.dirname(path_to_file) self.f_name = os.path.split(path_to_file)[1] with open(path_to_file, 'r') as infile: for line in infile: ob = json.loads(line) self.order_books.append(OrderBook(ob)) def create_training_dataset(self): if not self.order_books: return output_dir = os.path.join(self.cur_directory, 'Datasets') with suppress(OSError): os.mkdir(output_dir) dataset_file_path = os.path.splitext(os.path.join(output_dir, self.f_name))[0] + '.ds' best_prices = self.order_books[0].best_prices mid_price = (best_prices['buy_price'] + best_prices['sell_price']) / 2 with open(dataset_file_path, 'w') as json_file: for idx, ob in enumerate(self.order_books[0:-1]): next_best_prices = self.order_books[idx + 1].best_prices next_mid_price = (next_best_prices['buy_price'] + next_best_prices['sell_price']) / 2 if mid_price != next_mid_price: direction = 0 if mid_price > next_mid_price else 1 json.dump({'volumes': ob.volumes, 'direction': direction}, json_file) json_file.write('\n') mid_price = next_mid_price def _open_position(self, best_prices, signal): self.trades.append({}) self.trades[-1]['direction'] = signal self.trades[-1]['open_time'] = best_prices['time']; if signal == Signal.BUY: self.trades[-1]['open_price'] = best_prices['buy_price']; elif signal == Signal.SELL: self.trades[-1]['open_price'] = best_prices['sell_price']; def _close_position(self, best_prices): self.trades[-1]['close_time'] = best_prices['time']; if self.trades[-1]['direction'] == Signal.BUY: self.trades[-1]['close_price'] = best_prices['sell_price']; elif self.trades[-1]['direction'] == Signal.SELL: self.trades[-1]['close_price'] = best_prices['buy_price']; def _reverse_position(self, best_prices, signal): self._close_position(best_prices) self._open_position(best_prices, signal) def backtest(self, generator, threshold): self.trades = [] for ob in self.order_books[0:-1]: best_prices = ob.best_prices signal = generator(ob.volumes, threshold) if not self.trades and signal != Signal.WAIT: self._open_position(best_prices, signal) elif signal != self.trades[-1]['direction'] and signal != Signal.WAIT: self._reverse_position(best_prices, signal) if not self.trades: best_prices = self.order_books[-1].best_prices self._close_position(best_prices) return self.trades def backtest_n(self, generator, ffnn, threshold): self.trades = [] for ob in self.order_books[0:-1]: best_prices = ob.best_prices signal = generator(ffnn, ob.volumes, threshold) if not self.trades and signal != Signal.WAIT: self._open_position(best_prices, signal) elif signal != self.trades[-1]['direction'] and signal != Signal.WAIT: self._reverse_position(best_prices, signal) if not self.trades: best_prices = self.order_books[-1].best_prices self._close_position(best_prices) return self.trades
37.073394
102
0.565702
3,924
0.971047
0
0
0
0
0
0
277
0.068547
523c8dafc13dd0a3971afa922c463435db38e743
979
py
Python
leetcode/0753_cracking_the_safe.py
jacquerie/leetcode
a05e6b832eb0e0740aaff7b2eb3109038ad404bf
[ "MIT" ]
3
2018-05-10T09:56:49.000Z
2020-11-07T18:09:42.000Z
leetcode/0753_cracking_the_safe.py
jacquerie/leetcode
a05e6b832eb0e0740aaff7b2eb3109038ad404bf
[ "MIT" ]
null
null
null
leetcode/0753_cracking_the_safe.py
jacquerie/leetcode
a05e6b832eb0e0740aaff7b2eb3109038ad404bf
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- class Solution: def crackSafe(self, n, k): if k == 1: return '0' * n s = self.deBrujin(n, k) return s + s[:n - 1] def deBrujin(self, n, k): """See: https://en.wikipedia.org/wiki/De_Bruijn_sequence#Algorithm""" def _deBrujin(t, p): if t > n: if n % p == 0: sequence.extend(a[1:p + 1]) else: a[t] = a[t - p] _deBrujin(t + 1, p) for j in range(a[t - p] + 1, k): a[t] = j _deBrujin(t + 1, t) a = [0] * k * n sequence = [] _deBrujin(1, 1) return ''.join(str(el) for el in sequence) if __name__ == '__main__': solution = Solution() assert '01' == solution.crackSafe(1, 2) assert '00110' == solution.crackSafe(2, 2) assert '0' == solution.crackSafe(1, 1) assert '00' == solution.crackSafe(2, 1)
26.459459
77
0.447395
718
0.733401
0
0
0
0
0
0
125
0.127681
523e0d5b10bb021e5fa2c2938ba1171583eee803
94
py
Python
estimators/apps.py
bearroast/django-estimators
5dd72694dab6725335214543a59104c4de504037
[ "MIT" ]
46
2016-09-13T06:33:30.000Z
2022-01-08T00:55:37.000Z
estimators/apps.py
bearroast/django-estimators
5dd72694dab6725335214543a59104c4de504037
[ "MIT" ]
14
2016-09-10T04:56:30.000Z
2017-11-28T04:12:43.000Z
estimators/apps.py
bearroast/django-estimators
5dd72694dab6725335214543a59104c4de504037
[ "MIT" ]
19
2016-09-20T23:53:26.000Z
2022-01-08T00:55:39.000Z
from django.apps import AppConfig class EstimatorConfig(AppConfig): name = 'estimators'
15.666667
33
0.765957
57
0.606383
0
0
0
0
0
0
12
0.12766
523e14b2179a0494e84de917951c121e0f704e85
4,507
py
Python
pwny/transfer.py
EntySec/Pwny
d6a8a5edee171dcd3bf3ea1ec49b0ffb03d38531
[ "MIT" ]
7
2021-11-12T23:14:19.000Z
2022-01-16T21:27:42.000Z
pwny/transfer.py
enty8080/Pwny
0723ab9fc03b812e21e4b7cef885ec13e84364ee
[ "MIT" ]
null
null
null
pwny/transfer.py
enty8080/Pwny
0723ab9fc03b812e21e4b7cef885ec13e84364ee
[ "MIT" ]
7
2021-11-10T16:52:57.000Z
2022-02-09T17:11:57.000Z
#!/usr/bin/env python3 # # MIT License # # Copyright (c) 2020-2022 EntySec # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # import os import json from hatsploit.core.cli.badges import Badges from pex.fs import FS from pex.string import String class Transfer(Badges, FS, String): def pull(self, channel, remote_file, local_path): request = json.dumps({ 'cmd': "download", 'args': remote_file, 'token': '' }) data = channel.send_command(request) if data == 'file': exists, is_dir = self.exists(local_path) if exists: if is_dir: local_path = local_path + '/' + os.path.split(remote_file)[1] self.print_process(f"Downloading {remote_file}...") token = self.random_string(8) channel.send_command(token, False) with open(local_path, 'wb') as f: while True: chunk = channel.read(1024) if token.encode() in chunk: token_index = chunk.index(token.encode()) token_size = len(token) f.write(chunk[:token_index]) break f.write(chunk) self.print_success(f"Saved to {local_path}!") return True elif data == 'directory': self.print_error(f"Remote file: {remote_file}: is a directory!") elif data == 'incorrect': self.print_error(f"Remote file: {remote_file}: does not exist!") else: self.print_error("Implementation error: download: not implemented!") return False def push(self, channel, local_file, remote_path): if self.exists(local_file): entry_token = self.random_string(8) request = json.dumps({ 'cmd': "upload", 'args': remote_path, 'token': entry_token }) data = channel.send_command(request) if data == 'directory': remote_path = remote_path + '/' + os.path.split(local_file)[1] channel.send_command(remote_path, False) elif data != 'file': self.print_error("Implementation error: upload: not implemented!") return False self.print_process(f"Uploading {local_file}...") token = self.random_string(8) status = channel.send_command(token) if status == 'success': with open(local_file, 'rb') as f: data = f.read() max_size = 1024 size = len(data) num_parts = int(size / max_size) + 1 for i in range(0, num_parts): current = i * max_size block = data[current:current + max_size] if block: channel.send(block) status = channel.send_command(token) if status == entry_token: self.print_success(f"Saved to {remote_path}!") return True self.print_error(f"Failed to save to {remote_path}!") else: self.print_error(f"Remote directory: {os.path.split(remote_path)[0]}: does not exist!") return False
34.143939
103
0.567783
3,246
0.720213
0
0
0
0
0
0
1,638
0.363435
523e7d0d568c08c8105bcd658164afdaec967f10
7,146
py
Python
clorm/util/wrapper.py
potassco/clorm
e46c50a052f37a083f3884a9c2a79c7c5412bf54
[ "MIT" ]
21
2020-01-07T15:55:54.000Z
2022-02-13T13:07:49.000Z
clorm/util/wrapper.py
potassco/clorm
e46c50a052f37a083f3884a9c2a79c7c5412bf54
[ "MIT" ]
66
2020-01-07T16:08:08.000Z
2022-03-31T07:51:35.000Z
clorm/util/wrapper.py
potassco/clorm
e46c50a052f37a083f3884a9c2a79c7c5412bf54
[ "MIT" ]
5
2020-07-06T17:36:28.000Z
2021-11-01T09:32:05.000Z
'''Functions and classes to wrap existing classes. Provides a wrapper metaclass and also a function that returns a wrapped class. The function is more flexible as a metaclass has multiple inheritence limitations. A wrapper metaclass for building wrapper objects. It is instantiated by specifying a class to be to be wrapped as a parent class with WrapperMetaClass as the metaclass. This creates a wrapped/proxy base class for that type of object. Note: this subverts the subclass mechanism as it does not actually create a subclass of the wrapped class. Instead it is simply used to create the forwarding of the member functions and attributes, while the wrapped class is replaced with object as the parent. Note: if a constructor is provided for the wrapper then it should call init_wrapper manually. It also sets up '_wrapped' and '_wrapped_cls' attributes so these cannot be attributes of the wrapped class. This metaclass is to be used for wrapping clingo.Control, clingo.SolveHandle, and clingo.Model objects. Note: some ideas and code have been copied from: https://code.activestate.com/recipes/496741-object-proxying/ ''' import functools import inspect # ------------------------------------------------------------------------------ # Make proxy member functions and properties # ------------------------------------------------------------------------------ def _make_wrapper_function(fn): @functools.wraps(fn) def wrapper(self, *args, **kwargs): func=getattr(self._wrapped,fn.__name__) return func(*args, **kwargs) return wrapper def _make_wrapper_property(name, get_only=True): def getter(self): return getattr(self._wrapped,name) def setter(self,x): return self._wrapped.__setattr__(name,x) return property(getter,setter) def _check_wrapper_object(wrapper,strict=False): ActualType = type(wrapper._wrapped) WrappedType = wrapper._wrapped_cls if issubclass(ActualType,WrappedType): return if strict: raise TypeError(("Invalid proxied object {} not of expected type " "{}").format(wrapper._wrapped,WrappedType)) # Constructor for every Predicate sub-class def init_wrapper(wrapper, *args, **kwargs): Wrapped = wrapper._wrapped_cls if "wrapped_" in kwargs: if len(args) != 0 and len(kwargs) != 1: raise ValueError(("Invalid initialisation: the 'wrapped_' argument " "cannot be combined with other arguments")) wrapper._wrapped = kwargs["wrapped_"] _check_wrapper_object(wrapper,strict=False) else: wrapper._wrapped = Wrapped(*args,**kwargs) # ------------------------------------------------------------------------------ # The wrapper metaclass # ------------------------------------------------------------------------------ class WrapperMetaClass(type): def __new__(meta, name, bases, dct): if len(bases) != 1: raise TypeError("ProxyMetaClass requires exactly one parent class") Wrapped = bases[0] bases = (object,) ignore=["__init__", "__new__", "__dict__", "__weakref__", "__setattr__", "__getattr__"] if "_wrapped_cls" in dct: raise TypeError(("ProxyMetaClass cannot proxy a class with a " "\"_wrapped_cls\" attribute: {}").format(PrClass)) dct["_wrapped_cls"] = Wrapped # Mirror the attributes of the proxied class for key,value in Wrapped.__dict__.items(): if key in ignore: continue if key in dct: continue if callable(value): dct[key]=_make_wrapper_function(value) else: dct[key]=_make_wrapper_property(key) # Create the init function if none is provided if "__init__" not in dct: dct["__init__"] = init_wrapper return super(WrapperMetaClass, meta).__new__(meta, name, bases, dct) #------------------------------------------------------------------------------ # Alternative wrapper implementation that doesn't use a metaclass. The metaclass # version is a problem when wrapping a class that already has a metaclass. This # version takes a class to be wrapped and an optional override class. In then # creates an wrapper class that has all the properties and member functions of # the override class as well as the wrapped class (with the override class # overriding any function/property that is common to both). # ------------------------------------------------------------------------------ def make_class_wrapper(inputclass, override=None): def getattrdoc(cls, key): if not cls: return None try: attr = getattr(cls,key) if isinstance(attr,property) or callable(attr): return attr.__doc__ return None except: return None Wrapped = inputclass name = inputclass.__name__ w_ignore=set(["__init__", "__new__", "__del__","__weakref__", "__setattr__", "__getattr__", "__module__", "__name__", "__dict__", "__abstractmethods__", "__orig_bases__", "__parameters__", "_abc_impl"]) o_ignore=set(["__module__", "__new__", "__dict__", "__weakref__", "__name__"]) dct = {} if override: for key,value in override.__dict__.items(): if key in o_ignore: continue if key == "__doc__" and not value: continue dct[key] = value if "_wrapped_cls" in dct: raise TypeError(("The overrides cannot contain a " "\"_wrapped_cls\" attribute: {}").format(dct)) dct["_wrapped_cls"] = Wrapped # Mirror the attributes of the proxied class for key,value in Wrapped.__dict__.items(): if key in w_ignore: continue if key in dct: continue if key == "__doc__" and key not in dct: dct[key]=value elif callable(value): dct[key]=_make_wrapper_function(value) else: dct[key]=_make_wrapper_property(key) # Create a basic init function if none is provided if "__init__" not in dct: dct["__init__"] = init_wrapper WrapperClass = type(name,(object,),dct) # print("\n{}".format(name)) # if override: # print ("OVERRIDE: {}".format(override.__dict__.keys())) # print ("WRAPPED: {}\n".format(inputclass.__dict__.keys())) # print("NAME: {} : {}".format(WrapperClass.__name__, WrapperClass.__dict__.keys())) # Now go through and add docstrings if necessary for key in dir(WrapperClass): attr = getattr(WrapperClass, key) if inspect.isclass(attr): continue if callable(attr) and attr.__doc__: continue doc1 = getattrdoc(override,key) doc2 = getattrdoc(inputclass,key) if doc1: attr.__doc__ = doc1 elif doc2: attr.__doc__ = doc2 return WrapperClass #------------------------------------------------------------------------------ # main #------------------------------------------------------------------------------ if __name__ == "__main__": raise RuntimeError('Cannot run modules')
39.7
90
0.606913
1,111
0.155472
0
0
145
0.020291
0
0
3,613
0.505598
523ea1126520b1080283acf8904ed417d13b271d
1,558
py
Python
hotel.py
youmitsu/tdr-reservation-clawler
e0c2f806b7ce897ff5d4bad5f1d768cf4a43e4bf
[ "MIT" ]
null
null
null
hotel.py
youmitsu/tdr-reservation-clawler
e0c2f806b7ce897ff5d4bad5f1d768cf4a43e4bf
[ "MIT" ]
null
null
null
hotel.py
youmitsu/tdr-reservation-clawler
e0c2f806b7ce897ff5d4bad5f1d768cf4a43e4bf
[ "MIT" ]
null
null
null
from selenium import webdriver from selenium.webdriver.common.action_chains import ActionChains import time import os import json import sys from lib import setupDriver, setupMode, access, stopUntilLoading, stopUntilSpinnerLoading from loginModule import loginSection from notification import notifySystem, notifySlackForHotel import sectionHotel loginCompleted = False waitingCompleted = False isAllCompleted = False mode = setupMode() driver = setupDriver() while not isAllCompleted: try: ### ログインセクション ### if(not loginCompleted): loginCompleted = loginSection(driver) if(mode == 'step'): onEndLoginSection = input('LoginSection completed. Are you ready to start next step?: (y or n)') if (onEndLoginSection != 'y'): break while(True): #### 待機セクション #### if(not waitingCompleted): waitingCompleted = sectionHotel.waitingSection(driver) # if(mode == 'step'): # onEndWaitingSection = input('onEndWaitingSection completed. Are you ready to start next step?: (y or n)') # if (onEndWaitingSection != 'y'): # break #### 購入セクション #### isAllCompleted = sectionHotel.reservationSection(driver) onEndReservationSection = input('onEndReservationSection completed. Do you want to retry from wating section?: (y or n)') if (onEndReservationSection == 'y'): waitingCompleted = False isAllCompleted = False continue else: break except: print('Error retrying...') time.sleep(5) time.sleep(1000)
27.333333
127
0.689345
0
0
0
0
0
0
0
0
462
0.28803
523f0a8ff5e73981f7f55509044d942b0dbc3daa
862
py
Python
traceback_test.py
tor4z/python_test
6b18110b4e82ad00a065b03d0ee8f7f331b2f874
[ "Unlicense" ]
null
null
null
traceback_test.py
tor4z/python_test
6b18110b4e82ad00a065b03d0ee8f7f331b2f874
[ "Unlicense" ]
null
null
null
traceback_test.py
tor4z/python_test
6b18110b4e82ad00a065b03d0ee8f7f331b2f874
[ "Unlicense" ]
null
null
null
import traceback class A: def __init__(self): pass def tb(self): es = traceback.extract_stack() print(es) fs = es[-2] print(fs.name) print(fs.locals) def another_function(): lumberstack(A()) lumberstack(A()) def lumberstack(a): a.tb() another_function() """ [<FrameSummary file traceback_test.py, line 23 in <module>>, <FrameSummary file traceback_test.py, line 16 in another_function>, <FrameSummary file traceback_test.py, line 21 in lumberstack>, <FrameSummary file traceback_test.py, line 9 in tb>] lumberstack None [<FrameSummary file traceback_test.py, line 23 in <module>>, <FrameSummary file traceback_test.py, line 17 in another_function>, <FrameSummary file traceback_test.py, line 21 in lumberstack>, <FrameSummary file traceback_test.py, line 9 in tb>] lumberstack None """
22.102564
67
0.700696
189
0.219258
0
0
0
0
0
0
532
0.617169
524047780d99a0102c1623f0dbd8a969e54512fc
4,577
py
Python
dev/cpp/cpptarget.py
bowlofstew/client
0d5ae42aaf9863e3871828b6df06170aad17c560
[ "MIT" ]
40
2015-04-15T09:40:23.000Z
2022-02-11T11:07:24.000Z
dev/cpp/cpptarget.py
bowlofstew/client
0d5ae42aaf9863e3871828b6df06170aad17c560
[ "MIT" ]
19
2015-04-15T18:34:53.000Z
2018-11-17T00:11:05.000Z
dev/cpp/cpptarget.py
bowlofstew/client
0d5ae42aaf9863e3871828b6df06170aad17c560
[ "MIT" ]
22
2015-04-15T09:45:46.000Z
2020-09-29T17:04:19.000Z
import os from biicode.common.model.brl.block_cell_name import BlockCellName from biicode.common.model.bii_type import BiiType def _binary_name(name): return os.path.splitext(name.replace("/", "_"))[0] class CPPTarget(object): def __init__(self): self.files = set() # The source files in this target self.dep_targets = set() # set of BlockNames, to which this target depends self.system = set() # These are the included system headers (stdio.h, math.h...) self.include_paths = {} # Initially {Order#: BlockNamePath}. At the end [FullPaths] @property def dep_names(self): return sorted([_binary_name(d) for d in self.dep_targets]) class CPPLibTarget(CPPTarget): template = """ # LIBRARY {library_name} ################################## # with interface {library_name}_interface # Source code files of the library SET(BII_LIB_SRC {files}) # STATIC by default if empty, or SHARED SET(BII_LIB_TYPE {type}) # Dependencies to other libraries (user2_block2, user3_blockX) SET(BII_LIB_DEPS {library_name}_interface {deps}) # System included headers SET(BII_LIB_SYSTEM_HEADERS {system}) # Required include paths SET(BII_LIB_INCLUDE_PATHS {paths}) """ def __init__(self, block_name): CPPTarget.__init__(self) self.name = _binary_name(block_name) self.type = "" # By default, libs are static def dumps(self): content = CPPLibTarget.template.format(library_name=self.name, files="\n\t\t\t".join(sorted(self.files)), type=self.type, deps=" ".join(self.dep_names), system=" ".join(sorted(self.system)), paths="\n\t\t\t\t\t".join(self.include_paths)) return content class CPPExeTarget(CPPTarget): template = """ # EXECUTABLE {exe_name} ################################## SET(BII_{exe_name}_SRC {files}) SET(BII_{exe_name}_DEPS {block_interface} {deps}) # System included headers SET(BII_{exe_name}_SYSTEM_HEADERS {system}) # Required include paths SET(BII_{exe_name}_INCLUDE_PATHS {paths}) """ def __init__(self, main): CPPTarget.__init__(self) assert isinstance(main, BlockCellName) assert not BiiType.isCppHeader(main.extension) self.main = main self.files.add(main.cell_name) self.name = _binary_name(main) self.block_interface = _binary_name(main.block_name) + "_interface" self.simple_name = _binary_name(main.cell_name) def dumps(self): content = CPPExeTarget.template.format(block_interface=self.block_interface, exe_name=self.simple_name, files="\n\t\t\t".join(sorted(self.files)), deps=" ".join(self.dep_names), system=" ".join(sorted(self.system)), paths="\n\t\t\t\t\t".join(self.include_paths)) return content class CPPBlockTargets(object): """ All the targets defined in a given block: - 1 Lib - N Exes - There is always an Interface Lib per block, but no parametrization required here """ def __init__(self, block_name): self.block_name = block_name self.is_dep = False # To indicate if lives in deps or blocks folder self.data = set() self.lib = CPPLibTarget(block_name) self.exes = [] # Of CPPExeTargets self.tests = set() # Of CPPExeTargets @property def filename(self): return "bii_%s_vars.cmake" % _binary_name(self.block_name) def dumps(self): exe_list = """# Executables to be created SET(BII_BLOCK_EXES {executables}) SET(BII_BLOCK_TESTS {tests}) """ vars_content = ["# Automatically generated file, do not edit\n" "SET(BII_IS_DEP %s)\n" % self.is_dep] vars_content.append(self.lib.dumps()) exes = [t.simple_name for t in self.exes] tests = [t.simple_name for t in self.tests] exes_list = exe_list.format(executables="\n\t\t\t".join(sorted(exes)), tests="\n\t\t\t".join(sorted(tests))) vars_content.append(exes_list) for exe in self.exes: content = exe.dumps() vars_content.append(content) return "\n".join(vars_content)
36.91129
93
0.592528
4,357
0.951934
0
0
201
0.043915
0
0
1,527
0.333625
52408c78b60c7bc5958da33b916643304a15847c
20,554
py
Python
monk/tf_keras_1/finetune/level_11_optimizers_main.py
Sanskar329/monk_v1
51a497a925ec1fb2c8fef1d51245ea7040a5a65a
[ "Apache-2.0" ]
7
2020-07-26T08:37:29.000Z
2020-10-30T10:23:11.000Z
monk/tf_keras_1/finetune/level_11_optimizers_main.py
mursalfk/monk_v1
62f34a52f242772186ffff7e56764e958fbcd920
[ "Apache-2.0" ]
null
null
null
monk/tf_keras_1/finetune/level_11_optimizers_main.py
mursalfk/monk_v1
62f34a52f242772186ffff7e56764e958fbcd920
[ "Apache-2.0" ]
null
null
null
from tf_keras_1.finetune.imports import * from system.imports import * from tf_keras_1.finetune.level_10_schedulers_main import prototype_schedulers class prototype_optimizers(prototype_schedulers): ''' Main class for all optimizers in expert mode Args: verbose (int): Set verbosity levels 0 - Print Nothing 1 - Print desired details ''' @accepts("self", verbose=int, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def __init__(self, verbose=1): super().__init__(verbose=verbose); ############################################################################################################################################### @warning_checks(None, ["lt", 1], momentum=["lt", 1.5], weight_decay=["lt", 0.01], momentum_dampening_rate=None, clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], momentum=["gte", 0], weight_decay=["gte", 0], momentum_dampening_rate=None, clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], momentum=[int, float], weight_decay=[int, float], momentum_dampening_rate=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_sgd(self, learning_rate, momentum=0, weight_decay=0, momentum_dampening_rate=0, clipnorm=0.0, clipvalue=0.0): ''' Select stochastic gradient descent optimizer Args: learning_rate (float): Initial base learning rate momentum (float): Momentum value for driving the weights towards minima weight_decay (float): Value for regularizing weights post every update momentum_dampening_rate (float): Reduction rate for momentum clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = sgd(self.system_dict, learning_rate, momentum=momentum, weight_decay=weight_decay, momentum_dampening_rate=momentum_dampening_rate, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ConstraintWarning("ArgumentWarning: momentum_dampening_rate is active only for pytorch in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt", 1], momentum=["lt", 1.5], weight_decay=["lt", 0.01], momentum_dampening_rate=None, clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], momentum=["gte", 0], weight_decay=["gte", 0], momentum_dampening_rate=None, clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], momentum=[int, float], weight_decay=[int, float], momentum_dampening_rate=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_nesterov_sgd(self, learning_rate, momentum=0, weight_decay=0, momentum_dampening_rate=0, clipnorm=0.0, clipvalue=0.0): ''' Select stochastic gradient descent optimizer with nesterov acceleration Args: learning_rate (float): Initial base learning rate momentum (float): Momentum value for driving the weights towards minima weight_decay (float): Value for regularizing weights post every update momentum_dampening_rate (float): Reduction rate for momentum clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = nesterov_sgd(self.system_dict, learning_rate, momentum=momentum, weight_decay=weight_decay, momentum_dampening_rate=momentum_dampening_rate, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ConstraintWarning("ArgumentWarning: momentum_dampening_rate is active only for pytorch in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt", 1], decay_rate=["lt", 1], epsilon=["lt", 0.001], weight_decay=["lt", 0.01], clipnorm=None, clipvalue=None, post_trace=None) @error_checks(None, ["gt", 0], decay_rate=["gt", 0], epsilon=["gte", 0], weight_decay=["gte", 0], clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], decay_rate=[int, float], epsilon=[int, float], weight_decay=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_rmsprop(self, learning_rate, decay_rate=0.99, epsilon=1e-08, weight_decay=0, clipnorm=0.0, clipvalue=0.0): ''' Select root mean score prop optimizer Args: learning_rate (float): Initial base learning rate decay_rate (float): A decay factor of moving average over past squared gradient. epsilon (float): A value to avoid division by zero weight_decay (float): Value for regularizing weights post every update clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = rmsprop(self.system_dict , learning_rate, decay_rate=decay_rate, epsilon=epsilon, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt, 1"], beta1=["lt", 1], beta2=["lt", 1], epsilon=["lt", 0.001], weight_decay=["lt", 0.01], amsgrad=None, clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], beta1=["gte", 0], beta2=["gte", 0], epssilon=["gte", 0], weight_decay=["gte", 0], amsgrad=None, clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], beta1=[int, float], beta2=[int, float], epsilon=[int, float], weight_decay=[int, float], amsgrad=bool, clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_adam(self, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, weight_decay=0, amsgrad=False, clipnorm=0.0, clipvalue=0.0): ''' Select ADAM optimizer Args: learning_rate (float): Initial base learning rate beta1 (float): Exponential decay rate for first momentum estimates beta2 (float): Exponential decay rate for first second estimates weight_decay (float): Value for regularizing weights post every update amsgrad (bool): If True, AMSGrad variant of this algorithm is used epsilon (float): A value to avoid division by zero clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = adam(self.system_dict, learning_rate, beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay, amsgrad=amsgrad, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ConstraintWarning("ArgumentWarning: amsgrad is active only for keras and pytorch in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt, 1"], beta1=["lt", 1], beta2=["lt", 1], epsilon=["lt", 0.001], weight_decay=["lt", 0.01], amsgrad=None, momentum_decay=None, clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], beta1=["gte", 0], beta2=["gte", 0], epssilon=["gte", 0], weight_decay=["gte", 0], amsgrad=None, momentum_decay=None, clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], beta1=[int, float], beta2=[int, float], epsilon=[int, float], weight_decay=[int, float], amsgrad=bool, momentum_decay=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_nesterov_adam(self, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, weight_decay=0, amsgrad=False, momentum_decay=0.004, clipnorm=0.0, clipvalue=0.0): ''' Select ADAM optimizer with nesterov momentum acceleration Args: learning_rate (float): Initial base learning rate beta1 (float): Exponential decay rate for first momentum estimates beta2 (float): Exponential decay rate for first second estimates weight_decay (float): Value for regularizing weights post every update amsgrad (bool): If True, AMSGrad variant of this algorithm is used epsilon (float): A value to avoid division by zero clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = nesterov_adam(self.system_dict, learning_rate, beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay, amsgrad=amsgrad, momentum_decay=momentum_decay, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("OptimizerWarning: nesterov adam is active only for keras and gluon in current version of Monk"); self.custom_print(""); ConstraintWarning("ArgumentWarning: amsgrad is inactive in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt", 1], beta1=["lt", 1], beta2=["lt", 1], epsilon=["lt", 0.001], weight_decay=["lt", 0.01], clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], beta1=["gte", 0], beta2=["gte", 0], epsilon=["gte", 0], weight_decay=["gte", 0], clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], beta1=[int, float], beta2=[int, float], epsilon=[int, float], weight_decay=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_adamax(self, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, weight_decay=0, clipnorm=0.0, clipvalue=0.0): ''' Select Adamax optimizer Args: learning_rate (float): Initial base learning rate beta1 (float): Exponential decay rate for first momentum estimates beta2 (float): Exponential decay rate for first second estimates weight_decay (float): Value for regularizing weights post every update epsilon (float): A value to avoid division by zero clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = adamax(self.system_dict, learning_rate, beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt", 1], rho=["lt", 1], epsilon=["lt", 0.001], weight_decay=["lt", 0.01], clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], rho=["gt", 0], epsilon=["gte", 0], weight_decay=["gte", 0], clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], rho=[int, float], epsilon=[int, float], weight_decay=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_adadelta(self, learning_rate, rho=0.9, epsilon=1e-06, weight_decay=0, clipnorm=0.0, clipvalue=0.0): ''' Select Adadelta optimizer Args: learning_rate (float): Initial base learning rate rho (float): Exponential decay rate for momentum estimates weight_decay (float): Value for regularizing weights post every update epsilon (float): A value to avoid division by zero clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = adadelta(self.system_dict, learning_rate, rho=rho, epsilon=epsilon, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt", 1], learning_rate_decay=None, weight_decay=["lt", 0.01], epsilon=None, clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], learning_rate_decay=None, weight_decay=["gte", 0], epsilon=None, clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], learning_rate_decay=[int, float], weight_decay=[int, float], epsilon=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_adagrad(self, learning_rate, learning_rate_decay=0, weight_decay=0, epsilon=1e-08, clipnorm=0.0, clipvalue=0.0): ''' Select Adagrad optimizer Args: learning_rate (float): Initial base learning rate learning_rate_decay (float): Learning rate decay factor weight_decay (float): Value for regularizing weights post every update epsilon (float): A value to avoid division by zero clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = adagrad(self.system_dict, learning_rate, learning_rate_decay=learning_rate_decay, weight_decay=weight_decay, epsilon=epsilon, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ConstraintWarning("ArgumentWarning: learning_rate_decay is active only for pytorch in current version of Monk"); self.custom_print(""); ###############################################################################################################################################
58.062147
151
0.579352
20,377
0.991389
0
0
17,685
0.860416
0
0
10,405
0.506227
524200af22407131ff0b4d610415254327069f0f
1,078
py
Python
test/com/facebook/buck/core/module/impl/test_app.py
Unknoob/buck
2dfc734354b326f2f66896dde7746a11965d5a13
[ "Apache-2.0" ]
8,027
2015-01-02T05:31:44.000Z
2022-03-31T07:08:09.000Z
test/com/facebook/buck/core/module/impl/test_app.py
Unknoob/buck
2dfc734354b326f2f66896dde7746a11965d5a13
[ "Apache-2.0" ]
2,355
2015-01-01T15:30:53.000Z
2022-03-30T20:21:16.000Z
test/com/facebook/buck/core/module/impl/test_app.py
Unknoob/buck
2dfc734354b326f2f66896dde7746a11965d5a13
[ "Apache-2.0" ]
1,280
2015-01-09T03:29:04.000Z
2022-03-30T15:14:14.000Z
# Copyright (c) Facebook, Inc. and its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import unittest class TestApp(unittest.TestCase): """ This is a Python test that allows to do testing of arbitrary applications The main purpose of using this approach is to provide an ability to run tests on Windows (which doesn't support sh_test). The command is passed to this test using `CMD` environment variable. """ def test_app(self): self.assertEquals(0, subprocess.call(os.environ["CMD"].split(" ")))
33.6875
92
0.737477
433
0.40167
0
0
0
0
0
0
886
0.821892
524335c52fc4efb7aa6390e9a66eced8d1cd6c0e
1,488
py
Python
models/FastNeuralStyleTransferModel.py
taivu1998/GANime
a1d1569a1797f3fc50159475de2e3d47697abfed
[ "MIT" ]
24
2020-03-20T05:43:16.000Z
2022-03-23T22:09:35.000Z
models/FastNeuralStyleTransferModel.py
bobyang9/GANime
c4e98274cc8ecddda0d6273c5d2670a8d356648f
[ "MIT" ]
null
null
null
models/FastNeuralStyleTransferModel.py
bobyang9/GANime
c4e98274cc8ecddda0d6273c5d2670a8d356648f
[ "MIT" ]
2
2020-05-24T23:07:08.000Z
2021-04-02T11:33:35.000Z
''' This program implements a Fast Neural Style Transfer model. References: https://www.tensorflow.org/tutorials/generative/style_transfer ''' from __future__ import absolute_import, division, print_function, unicode_literals import tensorflow as tf import tensorflow_hub as hub import os, sys import time import numpy as np from BaseModel import BaseModel from utils.data_pipeline import * class FastNeuralStyleTransfer(BaseModel): ''' A Fast Neural Style Transfer model. ''' def __init__(self): ''' Initializes the class. ''' super().__init__() def build_model(self): ''' Builds network architectures. ''' hub_module_path = 'https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/1' self.hub_module = hub.load(hub_module_path) def fit(self, content_image, style_image, output_path = 'stylized_image_fast.png'): ''' Trains the model. ''' start = time.time() self.stylized_image = self.hub_module(tf.constant(content_image), tf.constant(style_image))[0] self.save_output(output_path) end = time.time() print("Total time: {:.1f}".format(end - start)) def predict(self): ''' Generates an output image from an input. ''' return self.stylized_image def save_output(self, img_path): ''' Saves the output image. ''' output = tensor_to_image(self.stylized_image) output.save(img_path)
29.76
102
0.672043
1,084
0.728495
0
0
0
0
0
0
477
0.320565
5243ca3160b48deb0f80a4363230359abdd1c1a8
322
py
Python
editor/urls.py
AndersonBY/p5py
f002caf94df800e29173f78931f5db90003cf4ae
[ "MIT" ]
15
2019-12-13T04:25:23.000Z
2021-11-21T06:32:25.000Z
editor/urls.py
AndersonHJB/p5py_update
95b6bdf5353e70443b3e7444e6a698c0fb96aa2a
[ "MIT" ]
8
2020-01-07T22:30:55.000Z
2021-08-20T00:32:42.000Z
editor/urls.py
AndersonHJB/p5py_update
95b6bdf5353e70443b3e7444e6a698c0fb96aa2a
[ "MIT" ]
4
2020-01-03T19:18:40.000Z
2021-06-26T14:10:55.000Z
# -*- coding: utf-8 -*- # @Author: Anderson # @Date: 2019-04-25 00:30:09 # @Last Modified by: ander # @Last Modified time: 2019-12-07 01:14:16 from django.urls import path from . import views urlpatterns = [ path("", views.editor, name="editor"), path("upload_code", views.upload_code, name="upload_code") ]
23
62
0.658385
0
0
0
0
0
0
0
0
178
0.552795
524438036b4836d23308da7e1f4538ef603e1930
47
py
Python
Data Science Bootcamp for Beginners/4.py
yuto-moriizumi/Python
2de2903179f187c3c7105e8cf2f9600dded21f25
[ "MIT" ]
null
null
null
Data Science Bootcamp for Beginners/4.py
yuto-moriizumi/Python
2de2903179f187c3c7105e8cf2f9600dded21f25
[ "MIT" ]
null
null
null
Data Science Bootcamp for Beginners/4.py
yuto-moriizumi/Python
2de2903179f187c3c7105e8cf2f9600dded21f25
[ "MIT" ]
null
null
null
s = "Rats live on no evil star" print(s[::-1])
15.666667
31
0.595745
0
0
0
0
0
0
0
0
27
0.574468
52469f8ecb67d529ba2fdbe487fcda4b777dab68
4,168
py
Python
models/get_train_stats.py
olavosamp/semiauto-video-annotation
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
[ "MIT" ]
null
null
null
models/get_train_stats.py
olavosamp/semiauto-video-annotation
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
[ "MIT" ]
20
2019-07-15T21:49:29.000Z
2020-01-09T14:35:03.000Z
models/get_train_stats.py
olavosamp/semiauto-video-annotation
b1a46f9c0ad3bdcedab76b4cd730747ee2afd2fd
[ "MIT" ]
null
null
null
import numpy as np import torchvision.datasets as datasets from pathlib import Path import libs.dirs as dirs import libs.utils as utils import libs.dataset_utils as dutils import models.utils as mutils import libs.commons as commons from libs.vis_functions import plot_confusion_matrix def wrapper_train(epochs, model_path, history_path, dataset_path): seed = None device_id = 0 numImgBatch = 256 use_weights = True # ImageNet statistics dataTransforms = mutils.resnet_transforms(commons.IMAGENET_MEAN, commons.IMAGENET_STD) # Load Dataset objects for train and val sets from folder sets = ['train', 'val'] imageDataset = {} for phase in sets: f = dataset_path / phase imageDataset[phase] = datasets.ImageFolder(str(f), transform=dataTransforms[phase], is_valid_file=utils.check_empty_file) history, _ = mutils.train_network(dataset_path, dataTransforms, epochs=epochs, batch_size=numImgBatch, model_path=model_path, history_path=history_path, seed=seed, weighted_loss=use_weights, device_id=device_id) # Get best epoch results bestValIndex = np.argmin(history['loss-val']) bestValLoss = history['loss-val'][bestValIndex] bestValAcc = history['acc-val'][bestValIndex] confMat = history['conf-val'][bestValIndex] return bestValLoss, bestValAcc, confMat if __name__ == "__main__": numEvals = 5 net_type = dutils.get_input_network_type(commons.network_types) val_type = dutils.get_input_network_type(commons.val_types, message="validation set") rede = int(input("\nEnter net number.\n")) numEpochs = 25 # Dataset root folder datasetPath = Path(dirs.dataset) / "{}_dataset_rede_{}_val_{}".format(net_type, rede, val_type) datasetName = datasetPath.stem modelFolder = Path(dirs.saved_models) / \ "{}_{}_epochs".format(datasetName, numEpochs) historyFolder = Path(dirs.saved_models) / \ "history_{}_{}_epochs".format(datasetName, numEpochs) filePath = Path(dirs.results) / \ "log_evaluation_{}_{}_epochs.txt".format(datasetName, numEpochs) confMatPath = Path(dirs.results) / \ "confusion_matrix_{}.pdf".format(datasetName) valLoss = [] valAcc = [] print() # Run function many times and save best results for i in range(numEvals): print("\nStarting run number {}/{}.\n".format(i+1, numEvals)) modelPath = modelFolder / "model_run_{}.pt".format(i) historyPath = historyFolder / "history_run_{}.pickle".format(i) roundValLoss, roundValAcc, confMat = wrapper_train(numEpochs, modelPath, historyPath, datasetPath) valLoss.append(roundValLoss) classAcc = mutils.compute_class_acc(confMat) avgAcc = np.mean(classAcc) valAcc.append(roundValAcc) print("Debug\nAvg acc: {:.3f}".format(avgAcc)) print("other acc: {:.3f}\n".format(roundValAcc)) # Save best confusion matrix if np.argmin(valLoss) == i: bestConfMat = confMat printString = "" printString += "\nFinished training {} evaluation runs for dataset\n{}\n".format(numEvals, datasetPath) printString += "\nResulting statistics:\n\ Val Loss:\n\ Mean: {:.3f}\n\ Std : {:.3f}\n\ Val Avg Acc:\n\ Mean: {:.5f}\n\ Std {:.5f}\n".format(np.mean(valLoss), np.std(valLoss), np.mean(valAcc), np.std(valAcc)) print(printString) with open(filePath, mode='w') as f: f.write(printString) title = "Confusion Matrix "+str(datasetName) plot_confusion_matrix(confMat, title=title, normalize=True, show=False, save_path=confMatPath) # print("Conf matrix:") # print(confMat)
37.890909
107
0.608205
0
0
0
0
0
0
0
0
817
0.196017
524714f34c51c5c8dec83ff3feb40db05c434676
6,851
py
Python
pre_processing.py
ziyuanli17/Vessel-Segmentation
9954050b6f30a8da370ee8948083a2ab38b3580b
[ "MIT" ]
null
null
null
pre_processing.py
ziyuanli17/Vessel-Segmentation
9954050b6f30a8da370ee8948083a2ab38b3580b
[ "MIT" ]
null
null
null
pre_processing.py
ziyuanli17/Vessel-Segmentation
9954050b6f30a8da370ee8948083a2ab38b3580b
[ "MIT" ]
null
null
null
import cv2 as cv import sys import numpy as np import tifffile as ti import argparse import itertools max_lowThreshold = 100 window_name = 'Edge Map' title_trackbar = 'Min Threshold:' ratio = 3 kernel_size = 3 def CannyThreshold(val): low_threshold = val #img_blur = cv.blur(src_gray, (3,3)) detected_edges = cv.Canny(src_gray, low_threshold, low_threshold*ratio, kernel_size) mask = detected_edges != 0 dst = src * (mask[:,:,None].astype(src.dtype)) cv.imshow(window_name, dst) # Sort grey image colors by frequency of appearance def freq_sort(l): flat_list = [] for sublist in l: for item in sublist: flat_list.append(item) frequencies = {} for item in flat_list: if item in frequencies: frequencies[item] += 1 else: frequencies[item] = 1 return sorted(frequencies.items(), key=lambda x: x[1], reverse=True) # Remove colors of selection ranked by frequency def gray_filter(img, p_map, start, end): # Slice the color range p_map = p_map[start:end] # Break down the dic selected_colors = [] for p in p_map: selected_colors.append(p[0]) # Replace out-off-range colors with black r_len = len(img) c_len = len(img[0]) for i in range(r_len): for j in range(c_len): if img[i][j] not in selected_colors: img[i][j] = 0 return img # Remove disconnected noises def de_noise(img, kernel_size=1, criteria=4, iterations=4, remove_all=False): cur = 0 r_len = len(img) c_len = len(img[0]) while cur < iterations: cur += 1 for i in range(r_len): for j in range(c_len): # If the iterated pixel is already black if img[i][j] == 0: continue try: # X, Y = np.mgrid[j:j+kernel_size, i:i+kernel_size] # print(np.vstack((X.ravel(), Y.ravel()))) # exit(1) # Put adjacent pixels with given kernel size into the list p_list = [] indices = [p for p in itertools.product(range(kernel_size, -kernel_size-1, -1), repeat=2) if p != (0,0)] for idx in indices: p_list.append(img[i+idx[0]][j+idx[1]]) # Remove the pixel if number of adjacent black pixels are greater than the preset value if p_list.count(0) > criteria: img[i][j] = 0 if remove_all: for idx in indices: img[i+idx[0]][j+idx[1]] = 0 except IndexError: pass return img if __name__ == '__main__': src = cv.imread(cv.samples.findFile("input.tif")) img = cv.cvtColor(src, cv.COLOR_BGR2HSV) img_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) cv.imshow('original', img_gray) freq_dic = freq_sort(img_gray) filtered_img = gray_filter(img_gray, freq_dic, 10, -80) cv.imshow('filtered', filtered_img) ti.imwrite("filtered.tif", np.array([[filtered_img] * 90], np.uint8)) # de_noise_img = de_noise(filtered_img, 1, 4, 4) # de_noise_img = de_noise(de_noise_img, 2, 18, 1) de_noise_img = de_noise(filtered_img, 1, 5, 4) ti.imwrite("de_noise_img.tif", np.array([[de_noise_img] * 90], np.uint8)) eroded = cv.dilate(de_noise_img, np.ones((2, 2), np.uint8), iterations=1) dilated = cv.dilate(eroded, np.ones((2, 2), np.uint8), iterations=1) med_blur = cv.medianBlur(de_noise_img, 3) cv.imshow('dilated', dilated) cv.imshow('de-noised-more-aggressive', de_noise_img) cv.imshow('med_blur', med_blur) cv.waitKey() # img_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) # print(img_gray) # if img is None: # sys.exit("Could not read the image.") # # # rows, cols, channels = img.shape # dst = img.copy() # a = 2.5 # b = 380 # for i in range(rows): # for j in range(cols): # for c in range(3): # color = img[i, j][c]*a+b # if color > 255: # 防止像素值越界(0~255) # dst[i, j][c] = 255 # elif color < 0: # 防止像素值越界(0~255) # dst[i, j][c] = 0 # # blur_img = cv.GaussianBlur(img, ksize=(5, 5), sigmaX=1, sigmaY=1) # gaussian_gray = cv.GaussianBlur(img_gray, ksize=(5, 5), sigmaX=1, sigmaY=1) # ti.imwrite("Gaussian_blur.tif", np.array([[gaussian_gray]*90], np.uint8)) # # med_blur_img = cv.medianBlur(img_gray, 3) # ti.imwrite("med_blur.tif", np.array([[med_blur_img]*90], np.uint8)) # # ret, threshold = cv.threshold(blur_img, 85, 255, cv.THRESH_TOZERO_INV) # ret_gray, threshold_gray = cv.threshold(gaussian_gray, 85, 255, cv.THRESH_TOZERO_INV) # # kernel = np.ones((2, 2), np.uint8) # erosion = cv.erode(threshold, kernel, iterations=2) # erosion_gray = cv.erode(threshold_gray, kernel, iterations=2) # ti.imwrite("erosion.tif", np.array([[erosion_gray]*90], np.uint8)) # # dilation = cv.dilate(erosion, kernel, iterations=2) # dilation_gray = cv.dilate(threshold_gray, kernel, iterations=2) # ti.imwrite("dilation.tif", np.array([[dilation_gray]*90], np.uint8)) # # lower_grey = np.array([0, 0, 11]) # upper_grey = np.array([0, 0, 60]) # mask = cv.inRange(erosion, lower_grey, upper_grey) # mask = cv.fastNlMeansDenoising(mask, None, 5) # res = cv.bitwise_and(erosion, erosion, mask=mask) # res_gray = cv.cvtColor(res, cv.COLOR_BGR2GRAY) # ti.imwrite("filtered.tif", np.array([[res_gray]*90], np.uint8)) # # # gray = cv.cvtColor(res, cv.COLOR_BGR2GRAY) # # grad_x = cv.Sobel(gray, -1, 1, 0, ksize=5) # # grad_y = cv.Sobel(gray, -1, 0, 1, ksize=5) # # grad = cv.addWeighted(grad_x, 1, grad_y, 1, 0) # # # src = cv.GaussianBlur(src, (3, 3), 0) # # src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY) # # cv.namedWindow(window_name) # # cv.createTrackbar(title_trackbar, window_name, 0, max_lowThreshold, CannyThreshold) # # CannyThreshold(0) # # cv.waitKey() # # cv.imshow("src", img) # cv.imshow("blur", blur_img) # cv.imshow("threshold", threshold) # # cv.imshow("erosion", erosion) # cv.imshow("dilation", dilation) # # cv.imshow('mask', mask) # cv.imshow('filtered', res) # # # cv.imshow("grad", grad) # cv.imshow("blur", blur_img) # # k = cv.waitKey(0) # if k == ord("s"): # cv.imwrite("starry_night.png", erosion)
35.314433
125
0.564297
0
0
0
0
0
0
0
0
3,417
0.496152
5247c15213add12bf9a354a74ea71714c77c8312
1,179
py
Python
tests/hpp/tests/test_generation_utils.py
javisenberg/addonpayments-Python-SDK
8c7b60fd4d245dd588d9f230c17ffde4e8ed33ac
[ "MIT" ]
2
2018-04-11T13:53:38.000Z
2018-12-09T13:10:18.000Z
tests/hpp/tests/test_generation_utils.py
javisenberg/addonpayments-Python-SDK
8c7b60fd4d245dd588d9f230c17ffde4e8ed33ac
[ "MIT" ]
2
2019-03-28T12:49:16.000Z
2019-03-28T12:52:09.000Z
tests/hpp/tests/test_generation_utils.py
javisenberg/addonpayments-Python-SDK
8c7b60fd4d245dd588d9f230c17ffde4e8ed33ac
[ "MIT" ]
8
2017-07-10T13:32:23.000Z
2021-08-23T10:55:52.000Z
# -*- encoding: utf-8 -*- from __future__ import absolute_import, unicode_literals import re from addonpayments.utils import GenerationUtils class TestGenerationUtils: def test_generate_hash(self): """ Test Hash generation success case. """ test_string = '20120926112654.thestore.ORD453-11.00.Successful.3737468273643.79347' secret = 'mysecret' expected_result = '368df010076481d47a21e777871012b62b976339' result = GenerationUtils.generate_hash(test_string, secret) assert expected_result == result def test_generate_timestamp(self): """ Test timestamp generation. Hard to test this in a meaningful way. Checking length and valid characters. """ result = GenerationUtils().generate_timestamp() match = re.match(r'([0-9]{14})', result) assert match def test_generate_order_id(self): """ Test order Id generation. Hard to test this in a meaningful way. Checking length and valid characters. """ result = GenerationUtils().generate_order_id() match = re.match(r'[A-Za-z0-9-_]{32}', result) assert match
32.75
111
0.667515
1,032
0.875318
0
0
0
0
0
0
491
0.416455
5249be72adb028ad77088154fc0b99be75fc8ca2
5,276
py
Python
tcellmatch/models/layers/layer_conv.py
theislab/tcellmatch
ddd344e44147f97f35d6a4e7c3c7677981fd177e
[ "BSD-3-Clause" ]
25
2019-08-14T22:39:40.000Z
2022-03-02T15:42:35.000Z
tcellmatch/models/layers/layer_conv.py
theislab/tcellmatch
ddd344e44147f97f35d6a4e7c3c7677981fd177e
[ "BSD-3-Clause" ]
2
2021-07-13T23:40:14.000Z
2021-12-18T10:08:37.000Z
tcellmatch/models/layers/layer_conv.py
theislab/tcellmatch
ddd344e44147f97f35d6a4e7c3c7677981fd177e
[ "BSD-3-Clause" ]
4
2020-02-21T20:43:41.000Z
2022-03-21T14:38:58.000Z
import tensorflow as tf from typing import Union, Tuple class LayerConv(tf.keras.layers.Layer): """ A layer class that implements sequence convolution. Instances of this class can be used as layers in the context of tensorflow Models. This layer implements convolution and pooling. Uses the following sequence: convolution -> batch normalisation -> activation -> drop-out -> pooling TODO read a bit into whether this is the best order. """ sublayer_conv: tf.keras.layers.Conv1D sublayer_batchnorm: tf.keras.layers.BatchNormalization sublayer_act: tf.keras.layers.Activation sublayer_dropout: tf.keras.layers.Dropout sublayer_pool: tf.keras.layers.MaxPool1D def __init__( self, activation: str, filter_width: int, filters: int, stride: int, pool_size: int, pool_stride: int, batch_norm: bool = True, dropout: float = 0.0, input_shape: Union[Tuple, None] = None, trainable: bool = True, dtype=tf.float32 ): """ Note: Addition of batch normalisation results in non-trainable weights in this layer. :param activation: Activation function. Refer to documentation of tf.keras.layers.Conv2D :param filter_width: Number of neurons per filter. Refer to documentation of tf.keras.layers.Conv2D :param filters: Number of filters / output channels. Refer to documentation of tf.keras.layers.Conv2D :param stride: Stride size for convolution on sequence. Refer to documentation of tf.keras.layers.Conv2D :param pool_size: Size of max-pooling, ie. number of output nodes to pool over. Refer to documentation of tf.keras.layers.MaxPool2D:pool_size :param pool_stride: Stride of max-pooling. Refer to documentation of tf.keras.layers.MaxPool2D:strides :param batch_norm: Whether to perform batch normalization. :param dropout: Dropout rate to use during training. :param input_shape: :param trainable: :param dtype: """ tf.keras.layers.Layer.__init__(self=self, trainable=trainable, dtype=dtype) self.activation = activation self.filter_width = filter_width self.filters = filters self.stride = stride self.pool_size = pool_size self.pool_stride = pool_stride self.batch_norm = batch_norm self.dropout = dropout self.input_shapes = input_shape self.fwd_pass = [] def build(self, input_shape): """ Initialise layers. Allows for delayed evaluation of input shapes. """ self.sublayer_conv = tf.keras.layers.Conv1D( filters=self.filters, kernel_size=self.filter_width, activation='linear', strides=self.stride if self.stride is not None else None, padding='same', data_format='channels_last', use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, dtype=self.dtype ) if self.batch_norm: self.sublayer_batchnorm = tf.keras.layers.BatchNormalization( momentum=0.99, epsilon=0.001, center=True, scale=True, beta_initializer='zeros', gamma_initializer='ones', moving_mean_initializer='zeros', moving_variance_initializer='ones', beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None, renorm=False, renorm_clipping=None, renorm_momentum=0.99, fused=None, trainable=True, virtual_batch_size=None, adjustment=None, dtype=self.dtype ) self.sublayer_act = tf.keras.layers.Activation(self.activation) if self.dropout > 0: self.sublayer_dropout = tf.keras.layers.Dropout(rate=self.dropout) if self.pool_size is not None: self.sublayer_pool = tf.keras.layers.MaxPool1D( pool_size=self.pool_size, strides=self.pool_stride if self.pool_stride is not None else None, padding='same' ) def call(self, x, training=True, **kwargs): """ Forward pass through layer. :param x: input tensor :param training: Whether forward pass is in context of training or prediction: Use drop-out only during training. :return: output tensor """ x = self.sublayer_conv(x) if self.batch_norm: x = self.sublayer_batchnorm(x, training=training) x = self.sublayer_act(x) if self.dropout > 0 and training: x = self.sublayer_dropout(x) if self.pool_size is not None: x = self.sublayer_pool(x) return x
38.510949
112
0.608795
5,217
0.988817
0
0
0
0
0
0
1,807
0.342494
524c410c76a43bc48198d2c0eb692d37c913c273
1,207
py
Python
form-ex2/main.py
acandreani/ads_web_exercicios
a97ee7ebd0dba9e308b8e2d2318e577903f83f72
[ "MIT" ]
1
2019-03-13T14:33:28.000Z
2019-03-13T14:33:28.000Z
form-ex2/main.py
acandreani/ads_web_exercicios
a97ee7ebd0dba9e308b8e2d2318e577903f83f72
[ "MIT" ]
1
2021-06-23T20:56:49.000Z
2021-06-23T20:56:49.000Z
form-ex2/main.py
acandreani/ads_web_exercicios
a97ee7ebd0dba9e308b8e2d2318e577903f83f72
[ "MIT" ]
1
2019-04-24T13:10:58.000Z
2019-04-24T13:10:58.000Z
from flask import Flask, render_template, request app = Flask(__name__) bd={"usuario":"alexandre.c.andreani@gmail.com","senha":"12345"} def usuario_existe(usuario): return usuario == bd["usuario"] def verifica_senha(usuario,senha): return usuario == bd["usuario"] and senha==bd["senha"] @app.route("/") def student(): return render_template("aluno.html") @app.route("/login") def login(): return render_template("login.html") @app.route("/loginresult",methods=['POST']) def login_result(): if request.method == "POST": result = request.form print("result") print(result) if usuario_existe(result["email"]): if verifica_senha(result["email"],result["senha"]): return render_template("loginresult.html") else: return render_template("loginresult_senha_incorreta.html") else: return render_template("loginresult_usuario_incorreto.html") @app.route("/result",methods=['POST']) def result(): if request.method == "POST": result = request.form print("result") print(result) return render_template("result.html") if __name__== "__main__": app.run(host="0.0.0.0",debug= True)
21.553571
64
0.659486
0
0
0
0
791
0.655344
0
0
319
0.264292
524ce4f455e80acda735bd673cc6b8d4d9fa0738
2,883
py
Python
raspberry/camera/get_number_faces_opencv.py
Dangaran/home_station_project
890b342e79e3dd493a8f418ed9283f0d444e5073
[ "CC0-1.0" ]
null
null
null
raspberry/camera/get_number_faces_opencv.py
Dangaran/home_station_project
890b342e79e3dd493a8f418ed9283f0d444e5073
[ "CC0-1.0" ]
null
null
null
raspberry/camera/get_number_faces_opencv.py
Dangaran/home_station_project
890b342e79e3dd493a8f418ed9283f0d444e5073
[ "CC0-1.0" ]
null
null
null
import picamera from time import sleep from time import time import os import numpy as np import cv2 import imutils import argparse import face_recognition from camera.check_rectangle_overlap import check_rectangle_overlap # https://picamera.readthedocs.io/en/release-1.0/api.html def get_number_faces(): time_now = int(time()) # take picture camera = picamera.PiCamera() camera.resolution = (1024, 768) camera.start_preview() sleep(3) camera.capture('./camera/images/{}.jpg'.format(time_now)) camera.stop_preview() print('picture taken') # human detector with opencv HOGCV = cv2.HOGDescriptor() HOGCV.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector()) # read image and use the model image = cv2.imread('./camera/images/{}.jpg'.format(time_now)) image = imutils.resize(image, width = min(800, image.shape[1])) bounding_box_cordinates, weights = HOGCV.detectMultiScale(image, winStride = (4, 4), padding = (8, 8)) # change coordinates to list and recognize person if the Confidence Value is higher than 0.60 people_count = 0 people_coord = [] for item in range(len(bounding_box_cordinates)): if weights[item][0] > 0.70: people_coord.append(list(bounding_box_cordinates[item])) people_count += 1 cv2.waitKey(0) cv2.destroyAllWindows() # count number of faces in picture with face_recognition face_locations = face_recognition.face_locations(image) num_faces = len(face_locations) face_coord = [list(item) for item in face_locations] # compare opencv and face_recognition results. If face is within the rectangle from opencv substract one face since, the face belongs to the same person. for person in people_coord: for face in face_coord: if check_rectangle_overlap(person, face): num_faces -= 1 people_from_both_libraries = people_count + num_faces print('opencv has recogniced {0} people and face_recognition {1} faces'.format(people_count, num_faces)) # save picture only has faces on it pic_name = '' if people_from_both_libraries: pic_name = '{0}_{1}_people.jpg'.format(time_now, people_from_both_libraries) # draw retangles to compare results # opencv coordinates for person in people_coord: cv2.rectangle(image, (person[0], person[1]), (person[0]+person[2],person[1]+person[3]), (0,255,0), 2) # face_recognition coordinates for item in face_coord: cv2.rectangle(image, (item[3], item[2]), (item[1],item[0]), (0,255,0), 2) cv2.imwrite('./camera/images/{}'.format(pic_name), image) os.remove('./camera/images/{}.jpg'.format(time_now)) else: os.remove('./camera/images/{}.jpg'.format(time_now)) return people_from_both_libraries, pic_name
36.493671
157
0.687825
0
0
0
0
0
0
0
0
769
0.266736
525064b1ed219ef71b739ece262ba4a5a5c4ba31
609
py
Python
ejercicios_basicos/poo/poo10/test_figura_geo.py
JuanDuran85/ejemplos_python
47aa49c65384ab89654f362f3da6cd2b0ef386e5
[ "Apache-2.0" ]
null
null
null
ejercicios_basicos/poo/poo10/test_figura_geo.py
JuanDuran85/ejemplos_python
47aa49c65384ab89654f362f3da6cd2b0ef386e5
[ "Apache-2.0" ]
null
null
null
ejercicios_basicos/poo/poo10/test_figura_geo.py
JuanDuran85/ejemplos_python
47aa49c65384ab89654f362f3da6cd2b0ef386e5
[ "Apache-2.0" ]
null
null
null
from Cuadrado import Cuadrado from Rectangulo import Rectangulo print("Creacion objeto Cuadrado".center(50, "-")) cuadrado1 = Cuadrado(lado=10, color='azul') print(cuadrado1) print(cuadrado1.color) print(cuadrado1.ancho) print(cuadrado1.alto) print(cuadrado1.area()) # trabajando con el metodo MRO - Method Resolution Order print(Cuadrado.__mro__) print(Cuadrado.mro()) print("Creacion Objeto Rectangulo".center(50, "-")) reactangulo1 = Rectangulo(base=10, altura=20,color='verde') print(reactangulo1) print(reactangulo1.color) print(reactangulo1.ancho) print(reactangulo1.alto) print(reactangulo1.area())
26.478261
59
0.784893
0
0
0
0
0
0
0
0
129
0.211823
52518f3004b12037b13338b1f21cd61da818f5e7
1,399
py
Python
Gems/AWSCore/Code/Tools/ResourceMappingTool/multithread/worker.py
aaarsene/o3de
37e3b0226958974defd14dd6d808e8557dcd7345
[ "Apache-2.0", "MIT" ]
1
2021-07-20T12:39:24.000Z
2021-07-20T12:39:24.000Z
Gems/AWSCore/Code/Tools/ResourceMappingTool/multithread/worker.py
aaarsene/o3de
37e3b0226958974defd14dd6d808e8557dcd7345
[ "Apache-2.0", "MIT" ]
null
null
null
Gems/AWSCore/Code/Tools/ResourceMappingTool/multithread/worker.py
aaarsene/o3de
37e3b0226958974defd14dd6d808e8557dcd7345
[ "Apache-2.0", "MIT" ]
1
2021-07-20T11:07:25.000Z
2021-07-20T11:07:25.000Z
""" Copyright (c) Contributors to the Open 3D Engine Project. For complete copyright and license terms please see the LICENSE at the root of this distribution. SPDX-License-Identifier: Apache-2.0 OR MIT """ import sys import traceback from PySide2.QtCore import (QObject, QRunnable, Signal, Slot) class WorkerSignals(QObject): """ Defines the signals available from a running worker thread """ finished: Signal = Signal() error: Signal = Signal(tuple) result: Signal = Signal(object) class FunctionWorker(QRunnable): """ Custom worker, which is inheriting from QRunnable to handle worker thread setup, signals and wrap-up. """ def __init__(self, function: any, *args: str, **kwargs: int) -> None: super(FunctionWorker, self).__init__() self.function = function self.args = args self.kwargs = kwargs self.signals: WorkerSignals = WorkerSignals() @Slot() def run(self) -> None: try: result: object = self.function(*self.args, **self.kwargs) except: # catch all exceptions for this generic worker traceback.print_exc() exctype, value = sys.exc_info()[:2] self.signals.error.emit((exctype, value, traceback.format_exc())) else: self.signals.result.emit(result) finally: self.signals.finished.emit()
31.088889
155
0.64975
1,094
0.781987
0
0
458
0.327377
0
0
448
0.320229
5253378a89fcd0af689b45bb751a44315a891df5
1,252
py
Python
loopchain/blockchain/blocks/v0_5/block_builder.py
windies21/loopchain
6e96c8a7e006747af04187155678f2fae59e1389
[ "Apache-2.0" ]
105
2018-04-03T05:29:08.000Z
2022-01-28T17:33:20.000Z
loopchain/blockchain/blocks/v0_5/block_builder.py
laurenceyoon/loopchain
e87032779be4715c135c2c91d2757d9c63bf4e31
[ "Apache-2.0" ]
135
2018-09-04T07:11:02.000Z
2021-12-15T06:25:47.000Z
loopchain/blockchain/blocks/v0_5/block_builder.py
laurenceyoon/loopchain
e87032779be4715c135c2c91d2757d9c63bf4e31
[ "Apache-2.0" ]
46
2018-05-07T09:12:07.000Z
2022-02-23T09:58:37.000Z
# Copyright 2018-current ICON Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """block builder for version 0.5 block""" from loopchain.blockchain.blocks import BlockProverType from loopchain.blockchain.blocks.v0_4 import BlockBuilder from loopchain.blockchain.blocks.v0_5 import BlockHeader, BlockBody, BlockProver from loopchain.blockchain.types import Hash32 class BlockBuilder(BlockBuilder): version = BlockHeader.version BlockHeaderClass = BlockHeader BlockBodyClass = BlockBody def _build_transactions_hash(self): if not self.transactions: return Hash32.empty() block_prover = BlockProver(self.transactions.keys(), BlockProverType.Transaction) return block_prover.get_proof_root()
37.939394
89
0.769968
378
0.301917
0
0
0
0
0
0
615
0.491214
5254529e53215160bd673270bfca00a24d873a10
788
py
Python
calculadora.py
rheimon1/Calculadora
70e32d3f1e35df4f33b24a3a14255e48864d653d
[ "MIT" ]
null
null
null
calculadora.py
rheimon1/Calculadora
70e32d3f1e35df4f33b24a3a14255e48864d653d
[ "MIT" ]
null
null
null
calculadora.py
rheimon1/Calculadora
70e32d3f1e35df4f33b24a3a14255e48864d653d
[ "MIT" ]
null
null
null
def media_suicida (mediaProvas, mediaTrabalhos): if mediaProvas >= 5 and mediaTrabalhos >= 5: return mediaProvas * 0.6 + mediaTrabalhos * 0.4 return min(mediaProvas, mediaTrabalhos) notasProva = [] notasTrabalho = [] quantidadeProva = int(input("Quantidade de provas: ")) for item in range(quantidadeProva): n = float(input("Nota da prova {}: ".format(item + 1))) notasProva.append(n) quantidadeTrabalho = int(input("Quantidade de trabalhos: ")) for item in range(quantidadeTrabalho): n = float(input("Nota do trabalho {}: ".format(item + 1))) notasTrabalho.append((n)) mediaProva = sum(notasProva) / quantidadeProva mediaTrabalho = sum(notasTrabalho) / quantidadeTrabalho print("Media final = {}".format(media_suicida(mediaProva, mediaTrabalho)))
30.307692
74
0.713198
0
0
0
0
0
0
0
0
112
0.142132
52545d8fefcc154ae114cece82d9ea67a6e8c4b1
1,832
py
Python
lstm-lm/encoder.py
helloMLWo/daga
88c7a1776ff36bd1abe1026103454e23ec77b552
[ "MIT" ]
46
2021-02-21T23:19:14.000Z
2022-03-30T09:40:48.000Z
lstm-lm/encoder.py
helloMLWo/daga
88c7a1776ff36bd1abe1026103454e23ec77b552
[ "MIT" ]
11
2021-06-08T11:58:24.000Z
2022-03-31T10:07:42.000Z
lstm-lm/encoder.py
helloMLWo/daga
88c7a1776ff36bd1abe1026103454e23ec77b552
[ "MIT" ]
6
2021-03-29T03:53:12.000Z
2022-03-01T03:17:06.000Z
"""Encoder module""" from __future__ import division import torch import torch.nn as nn from torch.nn.utils.rnn import pack_padded_sequence as pack from torch.nn.utils.rnn import pad_packed_sequence as unpack class LSTMEncoder(nn.Module): """LSTM encoder""" def __init__( self, hidden_size, num_layers, bidirectional, embeddings, padding_idx, dropout=0.0, ): super(LSTMEncoder, self).__init__() num_directions = 2 if bidirectional else 1 assert hidden_size % num_directions == 0 hidden_size = hidden_size // num_directions self.embeddings = embeddings self.rnn = nn.LSTM( input_size=embeddings.embedding_dim, hidden_size=hidden_size, num_layers=num_layers, dropout=dropout if num_layers > 1 else 0, bidirectional=bidirectional, ) self.padding_idx = padding_idx self.bidirectional = bidirectional def _fix_enc_hidden(self, hidden): # The encoder hidden is (layers*directions) x batch x dim. # We need to convert it to layers x batch x (directions*dim). if self.bidirectional: hidden = torch.cat( [hidden[0 : hidden.size(0) : 2], hidden[1 : hidden.size(0) : 2]], 2 ) return hidden def forward(self, sents): emb = self.embeddings(sents) lengths = sents.ne(self.padding_idx).sum(0) lengths = lengths.view(-1).tolist() packed_emb = pack(emb, lengths) memory_bank, enc_final = self.rnn(packed_emb) memory_bank = unpack(memory_bank)[0] memory_bank = memory_bank.contiguous() enc_final = tuple([self._fix_enc_hidden(enc_hid) for enc_hid in enc_final]) return memory_bank, enc_final
32.714286
83
0.628821
1,619
0.883734
0
0
0
0
0
0
157
0.085699
52567cbfeeb66ea4c851b3e38405f2786a1b6cc1
4,239
py
Python
StataLovers/StataLovers/main_func.py
Kolpashnikova/StataLoversSource
34f8f6113680ae0952004ce6fffb1efcb828eca9
[ "MIT" ]
null
null
null
StataLovers/StataLovers/main_func.py
Kolpashnikova/StataLoversSource
34f8f6113680ae0952004ce6fffb1efcb828eca9
[ "MIT" ]
null
null
null
StataLovers/StataLovers/main_func.py
Kolpashnikova/StataLoversSource
34f8f6113680ae0952004ce6fffb1efcb828eca9
[ "MIT" ]
null
null
null
import pandas as pd def summarize(vars, df): ''' this function prints out descriptive statistics in the similar way that Stata function sum does. Args: pandas column of a df Output: None (print out) ''' num = max([len(i) for i in vars]) if num < 13: num = 13 print("{} | Obs Mean Std. Dev. Min Max ".format('Variable'.rjust(num))) print("{}-+----------------------------------------------------------".format('-'*num)) for var in vars: temp = df[var].describe() print("{} |{}{}{}{}{}".format(var.rjust(num), round(temp['count'], 1).astype(str).rjust(11), \ round(temp['mean'], 3).astype(str).rjust(12), \ round(temp['std'], 3).astype(str).rjust(13), \ round(temp['min'], 3).astype(str).rjust(11), \ round(temp['max'], 3).astype(str).rjust(11) \ )) def tab(var1, var2=None): ''' This is a function that gives an output similar to the tab function in Stata. ARGs: it takes columns of a dataframe as arguments. ''' if var2 is None: print("{}| Freq. Percent Cum.".format(var1.name.rjust(12))) print("------------+-----------------------------------") temp = pd.DataFrame(var1.value_counts()) temp.reset_index(inplace=True) temp = temp.sort_values(by="index").reset_index(drop=True) tots = temp[temp.columns[1]].sum() temp['percent']=100*(temp[temp.columns[1]]/tots) temp['cum_percent'] = 100*(temp[temp.columns[1]].cumsum()/tots) for y in range(0, len(temp)): print("{}|{}{}{}".format(round(temp.loc[y, temp.columns[0]], 3).astype(str).rjust(12), \ round(temp.loc[y, temp.columns[1]], 3).astype(str).rjust(11), \ round(temp.loc[y, 'percent'], 3).astype(str).rjust(12), \ round(temp.loc[y, 'cum_percent'], 3).astype(str).rjust(12))) print("------------+-----------------------------------") print(" Total |{} 100.00".format(tots.astype(str).rjust(11))) else: assert len(var1)==len(var2), "Columns are not of the same length, check if they belong to the same dataframe" dict1 = {var1.name: var1, var2.name: var2} df1 = pd.DataFrame(dict1, columns=[var1.name, var2.name]) number = len(var2.value_counts()) temp1= pd.DataFrame(var1.value_counts()) temp1.reset_index(inplace=True) temp1 = temp1.sort_values(by="index").reset_index(drop=True) temp2= pd.DataFrame(var2.value_counts()) temp2.reset_index(inplace=True) temp2 = temp2.sort_values(by="index").reset_index(drop=True) print(" |{}".format(var2.name.rjust(round(len(var2.value_counts())*12/2)))) string1 = str('{} |'.format(var1.name.rjust(12))) for i in range(number): string1+='{}'.format(temp2.loc[i, 'index'].astype(str).rjust(12)) string1+= str('| Total') print(string1) print('-------------+------------------------+----------') for j in range(len(temp1)): string2 = str('{} |'.format(round(temp1.loc[j, 'index'], 3).astype(str).rjust(12))) num2 = 0 for i in range(number): num1 = df1.loc[(df1[var1.name]==temp1.loc[j, 'index']) \ & (df1[var2.name]==temp2.loc[i, 'index']), \ var1.name].count() string2+='{}'.format(num1.astype(str).rjust(12)) num2 +=num1 string2 += '|{}'.format(num2.astype(str).rjust(10)) print(string2) print('-------------+------------------------+----------') string3 = str(' Total |') for i in range(number): string3 +='{}'.format(temp2.loc[i, var2.name].astype(str).rjust(12)) string3 +='|{}'.format(var2.count().astype(str).rjust(10)) print(string3)
51.072289
117
0.476056
0
0
0
0
0
0
0
0
1,051
0.247936