hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
aca6a6e0486d3884a6f02c4b628910863d9b5d95
834
py
Python
venv/Lib/site-packages/plotnine/geoms/geom_col.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
null
null
null
venv/Lib/site-packages/plotnine/geoms/geom_col.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
1
2020-10-02T21:43:06.000Z
2020-10-15T22:52:39.000Z
venv/Lib/site-packages/plotnine/geoms/geom_col.py
EkremBayar/bayar
aad1a32044da671d0b4f11908416044753360b39
[ "MIT" ]
null
null
null
from ..doctools import document from .geom_bar import geom_bar @document class geom_col(geom_bar): """ Bar plot with base on the x-axis This is an alternate version of :class:`geom_bar` that maps the height of bars to an existing variable in your data. If you want the height of the bar to represent a count of cases, use :class:`geom_bar`. {usage} Parameters ---------- {common_parameters} width : float, (default: None) Bar width. If :py:`None`, the width is set to `90%` of the resolution of the data. See Also -------- plotnine.geoms.geom_bar """ REQUIRED_AES = {'x', 'y'} NON_MISSING_AES = {'xmin', 'xmax', 'ymin', 'ymax'} DEFAULT_PARAMS = {'stat': 'identity', 'position': 'stack', 'na_rm': False, 'width': None}
26.0625
65
0.605516
758
0.908873
0
0
768
0.920863
0
0
604
0.724221
aca742d4a6a7d5b4d70457cb7408186ab91efbca
117
py
Python
my_app/admin.py
gh-8/FullSend-List
52544d1413b413eb9f646fb38613ca9865e5a88b
[ "MIT" ]
1
2020-08-06T06:32:32.000Z
2020-08-06T06:32:32.000Z
book_app/admin.py
Dhrutiman/my_book
412200f185cd760f3c3c182cf61321f05f59d920
[ "MIT" ]
4
2020-06-05T21:40:29.000Z
2021-06-02T00:54:34.000Z
book_app/admin.py
Dhrutiman/my_book
412200f185cd760f3c3c182cf61321f05f59d920
[ "MIT" ]
null
null
null
from django.contrib import admin from .models import Search # Register your models here. admin.site.register(Search)
23.4
32
0.811966
0
0
0
0
0
0
0
0
28
0.239316
aca90fb089b9626c7c5db9aee7d6d8d8f14631cd
1,155
py
Python
utils.py
SeoulTech-HCIRLab/ChannelAug
2701c86836150f86dfdf3ab4f57485f262c88b8f
[ "Apache-2.0" ]
3
2020-06-30T06:29:35.000Z
2021-03-02T14:18:55.000Z
utils.py
titania7777/ChannelAug
03dcd4aa6bdb8b2a38d5057b55672d8a862e4e11
[ "Apache-2.0" ]
null
null
null
utils.py
titania7777/ChannelAug
03dcd4aa6bdb8b2a38d5057b55672d8a862e4e11
[ "Apache-2.0" ]
null
null
null
# Max-Heinrich Laves # Institute of Mechatronic Systems # Leibniz Universität Hannover, Germany # 2019 # Code From https://github.com/mlaves/bayesian-temperature-scaling import torch __all__ = ['accuracy', 'kl_loss', 'nentr', 'xavier_normal_init'] def accuracy(input, target): _, max_indices = torch.max(input.data, 1) acc = (max_indices == target).sum().float() / max_indices.size(0) return acc.item() def kl_loss(logits): return -torch.nn.functional.log_softmax(logits, dim=1).mean() def nentr(p, base=None): """ Calculates entropy of p to the base b. If base is None, the natural logarithm is used. :param p: batches of class label probability distributions (softmax output) :param base: base b :return: """ eps = torch.tensor([1e-16], device=p.device) if base: base = torch.tensor([base], device=p.device, dtype=torch.float32) return (p.mul(p.add(eps).log().div(base.log()))).sum(dim=1).abs() else: return (p.mul(p.add(eps).log())).sum(dim=1).abs() def xavier_normal_init(m): if isinstance(m, torch.nn.Conv2d): torch.nn.init.xavier_normal_(m.weight.data)
30.394737
90
0.670996
0
0
0
0
0
0
0
0
431
0.372837
aca93614dee7aef5c0226f502f8bd32011883de9
8,927
py
Python
typotools.py
peczony/chgksuite
654e75df7dd4f1c7676f942e8a595c1437dfecfa
[ "MIT" ]
3
2017-03-11T11:24:43.000Z
2020-06-11T17:41:40.000Z
typotools.py
peczony/chgksuite
654e75df7dd4f1c7676f942e8a595c1437dfecfa
[ "MIT" ]
null
null
null
typotools.py
peczony/chgksuite
654e75df7dd4f1c7676f942e8a595c1437dfecfa
[ "MIT" ]
null
null
null
#!usr/bin/env python #! -*- coding: utf-8 -*- from __future__ import unicode_literals import re import traceback import urllib import pprint OPENING_QUOTES = set(['«', '„', '“']) CLOSING_QUOTES = set(['»', '“', '”']) QUOTES = OPENING_QUOTES | CLOSING_QUOTES | set(['"', "'"]) WHITESPACE = set([' ', ' ', '\n']) PUNCTUATION = set([',', '.', ':', ';', '?', '!']) OPENING_BRACKETS = ['[', '(', '{'] CLOSING_BRACKETS = [']', ')', '}'] BRACKETS = set(OPENING_BRACKETS) | set(CLOSING_BRACKETS) LOWERCASE_RUSSIAN = set(list('абвгдеёжзийклмнопрстуфхцчшщъыьэюя')) UPPERCASE_RUSSIAN = set(list('АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ')) POTENTIAL_ACCENTS = set(list('АОУЫЭЯЕЮИ')) BAD_BEGINNINGS = set(['Мак', 'мак', "О'", 'о’', 'О’', "о'"]) re_bad_wsp_start = re.compile(r'^[{}]+'.format(''.join(WHITESPACE))) re_bad_wsp_end = re.compile(r'[{}]+$'.format(''.join(WHITESPACE))) re_url = re.compile(r"""((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]""" """|[a-z0-9.\-]+[.‌​][a-z]{2,4}/)(?:[^\s()<>]+|(([^\s()<>]+|(([^\s()<>]+)))*))+""" """(?:(([^\s()<>]+|(‌​([^\s()<>]+)))*)|[^\s`!()[]{};:'".,<>?«»“”‘’]))""", re.DOTALL) re_percent = re.compile(r"(%[0-9a-fA-F]{2})+") def remove_excessive_whitespace(s): s = re_bad_wsp_start.sub('', s) s = re_bad_wsp_end.sub('', s) s = re.sub(r'\s+\n\s+', '\n', s) return s def convert_quotes(text): """ Convert quotes in *text* into HTML curly quote entities. >>> print(convert_quotes('"Isn\\'t this fun?"')) &#8220;Isn&#8217;t this fun?&#8221; """ punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]""" # Special case if the very first character is a quote # followed by punctuation at a non-word-break. Close the quotes by brute # force: text = re.sub(r"""^"(?=%s\\B)""" % (punct_class,), '«', text) # Special case for double sets of quotes, e.g.: # <p>He said, "'Quoted' words in a larger quote."</p> text = re.sub(r""""'(?=\w)""", '««', text) text = re.sub(r"""'"(?=\w)""", '««', text) # Special case for decade abbreviations (the '80s): text = re.sub(r"""\b'(?=\d{2}s)""", '’', text) close_class = r'[^\ \t\r\n\[\{\(\-]' dec_dashes = '–|—' # Get most opening single quotes: opening_single_quotes_regex = re.compile(r""" ( \s | # a whitespace char, or &nbsp;|  | # a non-breaking space entity, or -- | # dashes, or &[mn]dash; | # named dash entities %s | # or decimal entities &\#x201[34]; # or hex ) ' # the quote (?=\w) # followed by a word character """ % (dec_dashes,), re.VERBOSE) text = opening_single_quotes_regex.sub(r'\1«', text) closing_single_quotes_regex = re.compile(r""" (%s) ' (?!\s | s\b | \d) """ % (close_class,), re.VERBOSE) text = closing_single_quotes_regex.sub(r'\1»', text) closing_single_quotes_regex = re.compile(r""" (%s) ' (\s | s\b) """ % (close_class,), re.VERBOSE) text = closing_single_quotes_regex.sub(r'\1»\2', text) # Get most opening double quotes: opening_double_quotes_regex = re.compile(r""" ( \s | # a whitespace char, or &nbsp; | # a non-breaking space entity, or -- | # dashes, or &[mn]dash; | # named dash entities %s | # or decimal entities &\#x201[34]; # or hex ) " # the quote (?=\w) # followed by a word character """ % (dec_dashes,), re.VERBOSE) text = opening_double_quotes_regex.sub(r'\1«', text) # Double closing quotes: closing_double_quotes_regex = re.compile(r""" #(%s)? # character that indicates the quote should be closing " (?=\s) """ % (close_class,), re.VERBOSE) try: text = closing_double_quotes_regex.sub('»', text) except: print(repr(traceback.format_exc())) closing_double_quotes_regex = re.compile(r""" (%s) # character that indicates the quote should be closing " """ % (close_class,), re.VERBOSE) text = closing_double_quotes_regex.sub(r'\1»', text) # Any remaining quotes should be opening ones. text = re.sub('"', '«', text) return text def get_next_opening_quote_character(s, index): i = index + 1 while i < len(s): if s[i] in OPENING_QUOTES: return s[i], i i += 1 return '', '' def get_next_quote_character(s, index): i = index + 1 while i < len(s): if s[i] in QUOTES: return s[i], i i += 1 return '', '' def get_next_closing_quote_character(s, index): i = index + 1 while i < len(s): if s[i] in CLOSING_QUOTES: return s[i], i i += 1 return '', '' def get_quotes_right(s): # s = re.sub(r'(?<=[{}{}{}])["\']'.format(''.join(WHITESPACE), # ''.join(CLOSING_QUOTES), ''.join(PUNCTUATION)), '«', s) # s = re.sub(r'["\'](?=[{}{}{}])'.format(''.join(WHITESPACE), # ''.join(OPENING_QUOTES), ''.join(PUNCTUATION)), '»', s) s = re.sub(r'“','"',s) s = re.sub(r'[{}]'.format(''.join(OPENING_QUOTES)), '«', s) s = re.sub(r'[{}]'.format(''.join(CLOSING_QUOTES)), '»', s) s = convert_quotes(s) # alternate quotes i = 0 s = list(s) if get_next_quote_character(s, -1)[0]: s[get_next_quote_character(s, -1)[1]] = '«' while i < len(s): if (s[i] == '«' and get_next_quote_character(s, i)[0] == '«'): s[get_next_quote_character(s, i)[1]] = '„' i += 1 s = s[::-1] if get_next_quote_character(s, -1)[0]: s[get_next_quote_character(s, -1)[1]] = '»' i = 0 while i < len(s): if (s[i] == '»' and get_next_quote_character(s, i)[0] == '»'): s[get_next_quote_character(s, i)[1]] = '“' i += 1 s = s[::-1] s = ''.join(s) return s def get_dashes_right(s): s = re.sub(r'(?<=\s)-+(?=\s)','—',s) s = re.sub(r'(?<=\d)-(?<=\d)','–',s) s = re.sub(r'-(?=\d)','−',s) return s def detect_accent(s): for word in re.split(r'[^{}{}]+'.format( ''.join(LOWERCASE_RUSSIAN),''.join(UPPERCASE_RUSSIAN)),s): if word.upper() != word and len(word) > 1: try: i = 1 word_new = word while i < len(word_new): if (word_new[i] in POTENTIAL_ACCENTS and word_new[:i] not in BAD_BEGINNINGS): word_new = word_new[:i] + '`' + word_new[i].lower() + word_new[i+1:] i += 1 if word != word_new: s = (s[:s.index(word)] + word_new + s[s.index(word)+len(word):]) except: print(repr(word)) return s def percent_decode(s): grs = sorted([match.group(0) for match in re_percent.finditer(s)], key=len, reverse=True) for gr in grs: try: s = s.replace(gr,urllib.unquote(gr.encode('utf8')).decode('utf8')) except: pass return s def recursive_typography(s): if isinstance(s, basestring): s = typography(s) return s elif isinstance(s, list): new_s = [] for element in s: new_s.append(recursive_typography(element)) return new_s def typography(s): s = remove_excessive_whitespace(s) s = get_quotes_right(s) s = get_dashes_right(s) s = detect_accent(s) s = percent_decode(s) return s def matching_bracket(s): assert s in OPENING_BRACKETS or s in CLOSING_BRACKETS if s in OPENING_BRACKETS: return CLOSING_BRACKETS[OPENING_BRACKETS.index(s)] return OPENING_BRACKETS[CLOSING_BRACKETS.index(s)] def find_matching_closing_bracket(s, index): s = list(s) i = index assert s[i] in OPENING_BRACKETS ob = s[i] cb = matching_bracket(ob) counter = 0 while i < len(s): if s[i] == ob: counter += 1 if s[i] == cb: counter -= 1 if counter == 0: return i i += 1 return None def find_matching_opening_bracket(s, index): s = list(s) i = index assert s[i] in CLOSING_BRACKETS cb = s[i] ob = matching_bracket(cb) counter = 0 if i < 0: i = len(s) - abs(i) while i < len(s) and i >= 0: if s[i] == cb: counter += 1 if s[i] == ob: counter -= 1 if counter == 0: return i i -= 1 return None
31.768683
92
0.498264
0
0
0
0
0
0
0
0
3,184
0.350429
aca9c4f8c8d50f2255b23ff003cb2be5b15e7571
1,029
py
Python
src/test_evader.py
zacespinosa/homicidal_chauffeur
ad9186171e822d0021f22d5b16a37f76bad531af
[ "MIT" ]
null
null
null
src/test_evader.py
zacespinosa/homicidal_chauffeur
ad9186171e822d0021f22d5b16a37f76bad531af
[ "MIT" ]
null
null
null
src/test_evader.py
zacespinosa/homicidal_chauffeur
ad9186171e822d0021f22d5b16a37f76bad531af
[ "MIT" ]
null
null
null
import random as random import numpy as np from dynamics import Simulator, Pursuer, Evader def test_evader(): num_d_states = 25 num_phi_states = 20 num_phi_d_states = 20 num_actions = 10 num_states = num_d_states*num_phi_states*num_phi_d_states num_epochs = 1000 p = Pursuer() e = Evader(num_d_states, num_phi_states, num_phi_d_states, num_actions, np.array([10,10]), learning='Q-learning', load_q=True) s = Simulator(p, e, num_d_states, num_phi_states, num_phi_d_states, num_actions, verbose=True) while s.restarts < 1: # execute optimal pursuer strategy while training evader a_p = p.optimal_strategy(e.pos, p.pos) # a_e = e.optimal_strategy(e.pos, p.pos, p.R_p) # execute Q Learning policy for evader state = e.s a_e = e.qLearningPolicy(state) p_info, e_info = s.simulate(a_p, a_e, discrete_p_action=False, discrete_e_action=True) if s.end_game: s.restart_game() new_state = e_info[0] r_e = e_info[1] print("Evader captured: ", s.num_captures, "/", s.restarts, " times.") test_evader()
28.583333
127
0.738581
0
0
0
0
0
0
0
0
184
0.178814
acaa7e107370a525398d8d212fa14ab906e106e9
1,892
py
Python
tests/testing_drive.py
alexzanderr/_core-dev
831f69dad524e450c4243b1dd88f26de80e1d444
[ "MIT" ]
null
null
null
tests/testing_drive.py
alexzanderr/_core-dev
831f69dad524e450c4243b1dd88f26de80e1d444
[ "MIT" ]
null
null
null
tests/testing_drive.py
alexzanderr/_core-dev
831f69dad524e450c4243b1dd88f26de80e1d444
[ "MIT" ]
null
null
null
import unittest from core.drive import copy from core.aesthetics import * class TestingDrivepy(unittest.TestCase): def test_copy_function(self): source_tests = [ r"D:\Alexzander__\programming\python\Python2Executable", r"D:\Alexzander__\programming\python\byzantion", r"D:\Alexzander__\programming\python\BizidayNews", r"D:\Alexzander__\programming\python\bitcoin", r"D:\Alexzander__\programming\python\core", r"", r"", ] destination_tests = [ r"D:\Alexzander__\programming\python\testing_copy_func", r"D:\Alexzander__\programming\python\testing_copy_func", r"D:\Alexzander__\programming\python\testing_copy_func", r"D:\Alexzander__\programming\python\testing_copy_func", r"D:\Alexzander__\programming\python\testing_copy_func", r"", r"", ] for index, (source, destination) in enumerate( zip(source_tests, destination_tests), start=1 ): if source != r"" and destination != r"": try: result = self.assertEqual( copy( source, destination, open_destination_when_done=False, __print=False), True ) if result is None: print(f"Test #{index} {green_bold('passed')}.") except BaseException as exception: print(red_bold(type(exception))) print(red_bold(exception)) print(f"Test #{index} DIDNT pass!") if __name__ == '__main__': unittest.main()
35.037037
72
0.509514
1,756
0.928118
0
0
0
0
0
0
609
0.321882
acaad146ce57d3448a856e316aa846e7146bad1e
207
py
Python
app/routes.py
apigram/HospitalWaiterAuthService
9fcff5c215f3ec99658ab2b2d300dd6f511d52fc
[ "Apache-2.0" ]
null
null
null
app/routes.py
apigram/HospitalWaiterAuthService
9fcff5c215f3ec99658ab2b2d300dd6f511d52fc
[ "Apache-2.0" ]
null
null
null
app/routes.py
apigram/HospitalWaiterAuthService
9fcff5c215f3ec99658ab2b2d300dd6f511d52fc
[ "Apache-2.0" ]
null
null
null
from app import app from flask_restful import Api from app.resources.auth import TokenResource api = Api(app) # Token resource api.add_resource(TokenResource, '/authservice/token', endpoint='auth_token')
20.7
76
0.797101
0
0
0
0
0
0
0
0
48
0.231884
acabdd0d2c7145cdd03eb03d939a79b0867a484a
60,084
py
Python
rioxarray/rioxarray.py
jhamman/rioxarray
924df03522b8f53225231a435533f942cc4965e5
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
rioxarray/rioxarray.py
jhamman/rioxarray
924df03522b8f53225231a435533f942cc4965e5
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
rioxarray/rioxarray.py
jhamman/rioxarray
924df03522b8f53225231a435533f942cc4965e5
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# -- coding: utf-8 -- """ This module is an extension for xarray to provide rasterio capabilities to xarray datasets/dataarrays. Credits: The `reproject` functionality was adopted from https://github.com/opendatacube/datacube-core # noqa Source file: - https://github.com/opendatacube/datacube-core/blob/084c84d78cb6e1326c7fbbe79c5b5d0bef37c078/datacube/api/geo_xarray.py # noqa datacube is licensed under the Apache License, Version 2.0: - https://github.com/opendatacube/datacube-core/blob/1d345f08a10a13c316f81100936b0ad8b1a374eb/LICENSE # noqa """ import copy import math from uuid import uuid4 import numpy as np import pyproj import rasterio.warp import xarray from affine import Affine from rasterio.crs import CRS from rasterio.enums import Resampling from rasterio.features import geometry_mask from rasterio.windows import get_data_window from scipy.interpolate import griddata from rioxarray.crs import crs_to_wkt from rioxarray.exceptions import ( DimensionError, DimensionMissingCoordinateError, InvalidDimensionOrder, MissingCRS, NoDataInBounds, OneDimensionalRaster, RioXarrayError, TooManyDimensions, ) FILL_VALUE_NAMES = ("_FillValue", "missing_value", "fill_value", "nodata") UNWANTED_RIO_ATTRS = ("nodatavals", "crs", "is_tiled", "res") DEFAULT_GRID_MAP = "spatial_ref" def affine_to_coords(affine, width, height, x_dim="x", y_dim="y"): """Generate 1d pixel centered coordinates from affine. Based on code from the xarray rasterio backend. Parameters ---------- affine: :obj:`affine.Affine` The affine of the grid. width: int The width of the grid. height: int The height of the grid. x_dim: str, optional The name of the X dimension. Default is 'x'. y_dim: str, optional The name of the Y dimension. Default is 'y'. Returns ------- dict: x and y coordinate arrays. """ x_coords, _ = affine * (np.arange(width) + 0.5, np.zeros(width) + 0.5) _, y_coords = affine * (np.zeros(height) + 0.5, np.arange(height) + 0.5) return {y_dim: y_coords, x_dim: x_coords} def _get_grid_map_name(src_data_array): """Get the grid map name of the variable.""" try: return src_data_array.attrs["grid_mapping"] except KeyError: return DEFAULT_GRID_MAP def _generate_attrs(src_data_array, dst_affine, dst_nodata): # add original attributes new_attrs = copy.deepcopy(src_data_array.attrs) # remove all nodata information for unwanted_attr in FILL_VALUE_NAMES + UNWANTED_RIO_ATTRS: new_attrs.pop(unwanted_attr, None) # add nodata information fill_value = ( src_data_array.rio.nodata if src_data_array.rio.nodata is not None else dst_nodata ) if src_data_array.rio.encoded_nodata is None and fill_value is not None: new_attrs["_FillValue"] = fill_value # add raster spatial information new_attrs["transform"] = tuple(dst_affine)[:6] new_attrs["grid_mapping"] = _get_grid_map_name(src_data_array) return new_attrs def add_xy_grid_meta(coords): """Add x,y metadata to coordinates""" # add metadata to x,y coordinates if "x" in coords: x_coord_attrs = dict(coords["x"].attrs) x_coord_attrs["long_name"] = "x coordinate of projection" x_coord_attrs["standard_name"] = "projection_x_coordinate" coords["x"].attrs = x_coord_attrs elif "longitude" in coords: x_coord_attrs = dict(coords["longitude"].attrs) x_coord_attrs["long_name"] = "longitude" x_coord_attrs["standard_name"] = "longitude" coords["longitude"].attrs = x_coord_attrs if "y" in coords: y_coord_attrs = dict(coords["y"].attrs) y_coord_attrs["long_name"] = "y coordinate of projection" y_coord_attrs["standard_name"] = "projection_y_coordinate" coords["y"].attrs = y_coord_attrs elif "latitude" in coords: x_coord_attrs = dict(coords["latitude"].attrs) x_coord_attrs["long_name"] = "latitude" x_coord_attrs["standard_name"] = "latitude" coords["latitude"].attrs = x_coord_attrs return coords def add_spatial_ref(in_ds, dst_crs, grid_map_name): in_ds.rio.write_crs( input_crs=dst_crs, grid_mapping_name=grid_map_name, inplace=True ) return in_ds def _add_attrs_proj(new_data_array, src_data_array): """Make sure attributes and projection correct""" # make sure dimension information is preserved if new_data_array.rio._x_dim is None: new_data_array.rio._x_dim = src_data_array.rio.x_dim if new_data_array.rio._y_dim is None: new_data_array.rio._y_dim = src_data_array.rio.y_dim # make sure attributes preserved new_attrs = _generate_attrs( src_data_array, new_data_array.rio.transform(recalc=True), None ) # remove fill value if it already exists in the encoding # this is for data arrays pulling the encoding from a # source data array instead of being generated anew. if "_FillValue" in new_data_array.encoding: new_attrs.pop("_FillValue", None) new_data_array.rio.set_attrs(new_attrs, inplace=True) # make sure projection added add_xy_grid_meta(new_data_array.coords) new_data_array = add_spatial_ref( new_data_array, src_data_array.rio.crs, _get_grid_map_name(src_data_array) ) # make sure encoding added new_data_array.encoding = src_data_array.encoding.copy() return new_data_array def _warp_spatial_coords(data_array, affine, width, height): """get spatial coords in new transform""" new_spatial_coords = affine_to_coords(affine, width, height) return { "x": xarray.IndexVariable("x", new_spatial_coords["x"]), "y": xarray.IndexVariable("y", new_spatial_coords["y"]), } def _get_nonspatial_coords(src_data_array): coords = {} for coord in set(src_data_array.coords) - { src_data_array.rio.x_dim, src_data_array.rio.y_dim, DEFAULT_GRID_MAP, }: if src_data_array[coord].dims: coords[coord] = xarray.IndexVariable( src_data_array[coord].dims, src_data_array[coord].values, src_data_array[coord].attrs, ) else: coords[coord] = xarray.Variable( src_data_array[coord].dims, src_data_array[coord].values, src_data_array[coord].attrs, ) return coords def _make_coords(src_data_array, dst_affine, dst_width, dst_height, dst_crs): """Generate the coordinates of the new projected `xarray.DataArray`""" coords = _get_nonspatial_coords(src_data_array) new_coords = _warp_spatial_coords(src_data_array, dst_affine, dst_width, dst_height) new_coords.update(coords) return add_xy_grid_meta(new_coords) def _make_dst_affine( src_data_array, src_crs, dst_crs, dst_resolution=None, dst_shape=None ): """Determine the affine of the new projected `xarray.DataArray`""" src_bounds = src_data_array.rio.bounds() src_height, src_width = src_data_array.rio.shape dst_height, dst_width = dst_shape if dst_shape is not None else (None, None) resolution_or_width_height = { k: v for k, v in [ ("resolution", dst_resolution), ("dst_height", dst_height), ("dst_width", dst_width), ] if v is not None } dst_affine, dst_width, dst_height = rasterio.warp.calculate_default_transform( src_crs, dst_crs, src_width, src_height, *src_bounds, **resolution_or_width_height, ) return dst_affine, dst_width, dst_height def _write_metatata_to_raster(raster_handle, xarray_dataset, tags): """ Write the metadata stored in the xarray object to raster metadata """ tags = xarray_dataset.attrs if tags is None else {**xarray_dataset.attrs, **tags} # write scales and offsets try: raster_handle.scales = tags["scales"] except KeyError: try: raster_handle.scales = (tags["scale_factor"],) * raster_handle.count except KeyError: pass try: raster_handle.offsets = tags["offsets"] except KeyError: try: raster_handle.offsets = (tags["add_offset"],) * raster_handle.count except KeyError: pass # filter out attributes that should be written in a different location skip_tags = ( UNWANTED_RIO_ATTRS + FILL_VALUE_NAMES + ("transform", "scales", "scale_factor", "add_offset", "offsets") ) # this is for when multiple values are used # in this case, it will be stored in the raster description if not isinstance(tags.get("long_name"), str): skip_tags += ("long_name",) tags = {key: value for key, value in tags.items() if key not in skip_tags} raster_handle.update_tags(**tags) # write band name information long_name = xarray_dataset.attrs.get("long_name") if isinstance(long_name, (tuple, list)): if len(long_name) != raster_handle.count: raise RioXarrayError( "Number of names in the 'long_name' attribute does not equal " "the number of bands." ) for iii, band_description in enumerate(long_name): raster_handle.set_band_description(iii + 1, band_description) else: band_description = long_name or xarray_dataset.name if band_description: for iii in range(raster_handle.count): raster_handle.set_band_description(iii + 1, band_description) def _get_data_var_message(obj): """ Get message for named data variables. """ try: return f" Data variable: {obj.name}" if obj.name else "" except AttributeError: return "" class XRasterBase(object): """This is the base class for the GIS extensions for xarray""" def __init__(self, xarray_obj): self._obj = xarray_obj self._x_dim = None self._y_dim = None # Determine the spatial dimensions of the `xarray.DataArray` if "x" in self._obj.dims and "y" in self._obj.dims: self._x_dim = "x" self._y_dim = "y" elif "longitude" in self._obj.dims and "latitude" in self._obj.dims: self._x_dim = "longitude" self._y_dim = "latitude" # properties self._width = None self._height = None self._crs = None @property def crs(self): """:obj:`rasterio.crs.CRS`: Retrieve projection from `xarray.DataArray` or `xarray.Dataset` """ if self._crs is not None: return None if self._crs is False else self._crs try: # look in grid_mapping grid_mapping_coord = self._obj.attrs.get("grid_mapping", DEFAULT_GRID_MAP) try: self.set_crs( pyproj.CRS.from_cf(self._obj.coords[grid_mapping_coord].attrs), inplace=True, ) except pyproj.exceptions.CRSError: pass except KeyError: try: # look in attrs for 'crs' self.set_crs(self._obj.attrs["crs"], inplace=True) except KeyError: self._crs = False return None return self._crs def _get_obj(self, inplace): """ Get the object to modify. Parameters ---------- inplace: bool If True, returns self. Returns ------- xarray.Dataset or xarray.DataArray: """ if inplace: return self._obj obj_copy = self._obj.copy(deep=True) # preserve attribute information obj_copy.rio._x_dim = self._x_dim obj_copy.rio._y_dim = self._y_dim obj_copy.rio._width = self._width obj_copy.rio._height = self._height obj_copy.rio._crs = self._crs return obj_copy def set_crs(self, input_crs, inplace=True): """ Set the CRS value for the Dataset/DataArray without modifying the dataset/data array. Parameters ---------- input_crs: object Anything accepted by `rasterio.crs.CRS.from_user_input`. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- xarray.Dataset or xarray.DataArray: Dataset with crs attribute. """ crs = CRS.from_user_input(crs_to_wkt(input_crs)) obj = self._get_obj(inplace=inplace) obj.rio._crs = crs return obj def write_crs( self, input_crs=None, grid_mapping_name=DEFAULT_GRID_MAP, inplace=False ): """ Write the CRS to the dataset in a CF compliant manner. Parameters ---------- input_crs: object Anything accepted by `rasterio.crs.CRS.from_user_input`. grid_mapping_name: str, optional Name of the coordinate to store the CRS information in. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- xarray.Dataset or xarray.DataArray: Modified dataset with CF compliant CRS information. """ if input_crs is not None: data_obj = self.set_crs(input_crs, inplace=inplace) else: data_obj = self._get_obj(inplace=inplace) # remove old grid maping coordinate if exists try: del data_obj.coords[grid_mapping_name] except KeyError: pass if data_obj.rio.crs is None: raise MissingCRS( "CRS not found. Please set the CRS with 'set_crs()' or 'write_crs()'." ) # add grid mapping coordinate data_obj.coords[grid_mapping_name] = xarray.Variable((), 0) grid_map_attrs = pyproj.CRS.from_user_input(data_obj.rio.crs).to_cf() # spatial_ref is for compatibility with GDAL crs_wkt = crs_to_wkt(data_obj.rio.crs) grid_map_attrs["spatial_ref"] = crs_wkt grid_map_attrs["crs_wkt"] = crs_wkt data_obj.coords[grid_mapping_name].rio.set_attrs(grid_map_attrs, inplace=True) # add grid mapping attribute to variables if hasattr(data_obj, "data_vars"): for var in data_obj.data_vars: if ( self.x_dim in data_obj[var].dims and self.y_dim in data_obj[var].dims ): data_obj[var].rio.update_attrs( dict(grid_mapping=grid_mapping_name), inplace=True ).rio.set_spatial_dims( x_dim=self.x_dim, y_dim=self.y_dim, inplace=True ) return data_obj.rio.update_attrs( dict(grid_mapping=grid_mapping_name), inplace=True ) def set_attrs(self, new_attrs, inplace=False): """ Set the attributes of the dataset/dataarray and reset rioxarray properties to re-search for them. Parameters ---------- new_attrs: dict A dictionary of new attributes. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- xarray.Dataset or xarray.DataArray: Modified dataset with new attributes. """ data_obj = self._get_obj(inplace=inplace) # set the attributes data_obj.attrs = new_attrs # reset rioxarray properties depending # on attributes to be generated data_obj.rio._nodata = None data_obj.rio._crs = None return data_obj def update_attrs(self, new_attrs, inplace=False): """ Update the attributes of the dataset/dataarray and reset rioxarray properties to re-search for them. Parameters ---------- new_attrs: dict A dictionary of new attributes to update with. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- xarray.Dataset or xarray.DataArray: Modified dataset with updated attributes. """ data_attrs = dict(self._obj.attrs) data_attrs.update(**new_attrs) return self.set_attrs(data_attrs, inplace=inplace) def set_spatial_dims(self, x_dim, y_dim, inplace=True): """ This sets the spatial dimensions of the dataset. Parameters ---------- x_dim: str The name of the x dimension. y_dim: str The name of the y dimension. inplace: bool, optional If True, it will modify the dataframe in place. Otherwise it will return a modified copy. Returns ------- xarray.Dataset or xarray.DataArray: Dataset with spatial dimensions set. """ def set_dims(obj, in_x_dim, in_y_dim): if in_x_dim in obj.dims: obj.rio._x_dim = x_dim else: raise DimensionError( f"x dimension ({x_dim}) not found.{_get_data_var_message(obj)}" ) if y_dim in obj.dims: obj.rio._y_dim = y_dim else: raise DimensionError( f"y dimension ({x_dim}) not found.{_get_data_var_message(obj)}" ) data_obj = self._get_obj(inplace=inplace) set_dims(data_obj, x_dim, y_dim) return data_obj @property def x_dim(self): if self._x_dim is not None: return self._x_dim raise DimensionError( "x dimension not found. 'set_spatial_dims()' can address this." f"{_get_data_var_message(self._obj)}" ) @property def y_dim(self): if self._y_dim is not None: return self._y_dim raise DimensionError( "x dimension not found. 'set_spatial_dims()' can address this." f"{_get_data_var_message(self._obj)}" ) @property def width(self): """int: Returns the width of the dataset (x dimension size)""" if self._width is not None: return self._width self._width = self._obj[self.x_dim].size return self._width @property def height(self): """int: Returns the height of the dataset (y dimension size)""" if self._height is not None: return self._height self._height = self._obj[self.y_dim].size return self._height @property def shape(self): """tuple(int, int): Returns the shape (height, width)""" return (self.height, self.width) def isel_window(self, window): """ Use a rasterio.window.Window to select a subset of the data. Parameters ---------- window: :class:`rasterio.window.Window` The window of the dataset to read. Returns ------- :obj:`xarray.Dataset` | :obj:`xarray.DataArray`: The data in the window. """ (row_start, row_stop), (col_start, col_stop) = window.toranges() row_slice = slice(int(math.floor(row_start)), int(math.ceil(row_stop))) col_slice = slice(int(math.floor(col_start)), int(math.ceil(col_stop))) return self._obj.isel( {self.y_dim: row_slice, self.x_dim: col_slice} ).rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) @xarray.register_dataarray_accessor("rio") class RasterArray(XRasterBase): """This is the GIS extension for :class:`xarray.DataArray`""" def __init__(self, xarray_obj): super(RasterArray, self).__init__(xarray_obj) # properties self._nodata = None self._count = None def set_nodata(self, input_nodata, inplace=True): """ Set the nodata value for the DataArray without modifying the data array. Parameters ---------- input_nodata: object Valid nodata for dtype. inplace: bool, optional If True, it will write to the existing dataset. Default is False. Returns ------- xarray.DataArray: Dataset with nodata attribute set. """ obj = self._get_obj(inplace=inplace) obj.rio._nodata = input_nodata return obj def write_nodata(self, input_nodata, inplace=False): """ Write the nodata to the DataArray in a CF compliant manner. Parameters ---------- input_nodata: object Nodata value for the DataArray. If input_nodata is None, it will remove the _FillValue attribute. inplace: bool, optional If True, it will write to the existing DataArray. Default is False. Returns ------- xarray.DataArray: Modified DataArray with CF compliant nodata information. """ data_obj = self._get_obj(inplace=inplace) input_nodata = False if input_nodata is None else input_nodata if input_nodata is not False: data_obj.rio.update_attrs(dict(_FillValue=input_nodata), inplace=True) else: new_vars = dict(data_obj.attrs) new_vars.pop("_FillValue", None) data_obj.rio.set_attrs(new_vars, inplace=True) data_obj.rio.set_nodata(input_nodata, inplace=True) return data_obj @property def encoded_nodata(self): """Return the encoded nodata value for the dataset if encoded.""" return self._obj.encoding.get("_FillValue") @property def nodata(self): """Get the nodata value for the dataset.""" if self._nodata is not None: return None if self._nodata is False else self._nodata if self.encoded_nodata is not None: self._nodata = np.nan else: self._nodata = self._obj.attrs.get( "_FillValue", self._obj.attrs.get( "missing_value", self._obj.attrs.get("fill_value", self._obj.attrs.get("nodata")), ), ) # look in places used by `xarray.open_rasterio` if self._nodata is None: try: self._nodata = self._obj._file_obj.acquire().nodata except AttributeError: try: self._nodata = self._obj.attrs["nodatavals"][0] except (KeyError, IndexError): pass if self._nodata is None: self._nodata = False return None return self._nodata def _cached_transform(self): """ Get the transform from attrs or property. """ try: return Affine(*self._obj.attrs["transform"][:6]) except KeyError: pass return None def resolution(self, recalc=False): """Determine the resolution of the `xarray.DataArray` Parameters ---------- recalc: bool, optional Will force the resolution to be recalculated instead of using the transform attribute. """ transform = self._cached_transform() if ( not recalc or self.width == 1 or self.height == 1 ) and transform is not None: resolution_x = transform.a resolution_y = transform.e return resolution_x, resolution_y # if the coordinates of the spatial dimensions are missing # use the cached transform resolution try: left, bottom, right, top = self._internal_bounds() except DimensionMissingCoordinateError: if transform is None: raise resolution_x = transform.a resolution_y = transform.e return resolution_x, resolution_y if self.width == 1 or self.height == 1: raise OneDimensionalRaster( "Only 1 dimenional array found. Cannot calculate the resolution." f"{_get_data_var_message(self._obj)}" ) resolution_x = (right - left) / (self.width - 1) resolution_y = (bottom - top) / (self.height - 1) return resolution_x, resolution_y def _internal_bounds(self): """Determine the internal bounds of the `xarray.DataArray`""" if self.x_dim not in self._obj.coords: raise DimensionMissingCoordinateError(f"{self.x_dim} missing coordinates.") elif self.y_dim not in self._obj.coords: raise DimensionMissingCoordinateError(f"{self.y_dim} missing coordinates.") left = float(self._obj[self.x_dim][0]) right = float(self._obj[self.x_dim][-1]) top = float(self._obj[self.y_dim][0]) bottom = float(self._obj[self.y_dim][-1]) return left, bottom, right, top def _check_dimensions(self): """ This function validates that the dimensions 2D/3D and they are are in the proper order. Returns ------- str or None: Name extra dimension. """ extra_dims = list(set(list(self._obj.dims)) - set([self.x_dim, self.y_dim])) if len(extra_dims) > 1: raise TooManyDimensions( "Only 2D and 3D data arrays supported." f"{_get_data_var_message(self._obj)}" ) elif extra_dims and self._obj.dims != (extra_dims[0], self.y_dim, self.x_dim): raise InvalidDimensionOrder( "Invalid dimension order. Expected order: {0}. " "You can use `DataArray.transpose{0}`" " to reorder your dimensions.".format( (extra_dims[0], self.y_dim, self.x_dim) ) + f"{_get_data_var_message(self._obj)}" ) elif not extra_dims and self._obj.dims != (self.y_dim, self.x_dim): raise InvalidDimensionOrder( "Invalid dimension order. Expected order: {0}" "You can use `DataArray.transpose{0}` " "to reorder your dimensions.".format((self.y_dim, self.x_dim)) + f"{_get_data_var_message(self._obj)}" ) return extra_dims[0] if extra_dims else None @property def count(self): if self._count is not None: return self._count extra_dim = self._check_dimensions() self._count = 1 if extra_dim is not None: self._count = self._obj[extra_dim].size return self._count def bounds(self, recalc=False): """Determine the bounds of the `xarray.DataArray` Parameters ---------- recalc: bool, optional Will force the bounds to be recalculated instead of using the transform attribute. Returns ------- left, bottom, right, top: float Outermost coordinates. """ resolution_x, resolution_y = self.resolution(recalc=recalc) try: # attempt to get bounds from xarray coordinate values left, bottom, right, top = self._internal_bounds() left -= resolution_x / 2.0 right += resolution_x / 2.0 top -= resolution_y / 2.0 bottom += resolution_y / 2.0 except DimensionMissingCoordinateError: transform = self._cached_transform() left = transform.c top = transform.f right = left + resolution_x * self.width bottom = top + resolution_y * self.height return left, bottom, right, top def transform_bounds(self, dst_crs, densify_pts=21, recalc=False): """Transform bounds from src_crs to dst_crs. Optionally densifying the edges (to account for nonlinear transformations along these edges) and extracting the outermost bounds. Note: this does not account for the antimeridian. Parameters ---------- dst_crs: str, :obj:`rasterio.crs.CRS`, or dict Target coordinate reference system. densify_pts: uint, optional Number of points to add to each edge to account for nonlinear edges produced by the transform process. Large numbers will produce worse performance. Default: 21 (gdal default). recalc: bool, optional Will force the bounds to be recalculated instead of using the transform attribute. Returns ------- left, bottom, right, top: float Outermost coordinates in target coordinate reference system. """ return rasterio.warp.transform_bounds( self.crs, dst_crs, *self.bounds(recalc=recalc), densify_pts=densify_pts ) def transform(self, recalc=False): """Determine the affine of the `xarray.DataArray`""" src_left, _, _, src_top = self.bounds(recalc=recalc) src_resolution_x, src_resolution_y = self.resolution(recalc=recalc) return Affine.translation(src_left, src_top) * Affine.scale( src_resolution_x, src_resolution_y ) def reproject( self, dst_crs, resolution=None, shape=None, transform=None, resampling=Resampling.nearest, ): """ Reproject :class:`xarray.DataArray` objects Powered by `rasterio.warp.reproject` .. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported. Requires either a grid mapping variable with 'spatial_ref' or a 'crs' attribute to be set containing a valid CRS. If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT. .. versionadded:: 0.0.27 shape .. versionadded:: 0.0.28 transform Parameters ---------- dst_crs: str OGC WKT string or Proj.4 string. resolution: float or tuple(float, float), optional Size of a destination pixel in destination projection units (e.g. degrees or metres). shape: tuple(int, int), optional Shape of the destination in pixels (dst_height, dst_width). Cannot be used together with resolution. transform, optional The destination transform. resampling: Resampling method, optional See rasterio.warp.reproject for more details. Returns ------- :class:`xarray.DataArray`: A reprojected DataArray. """ if resolution is not None and (shape is not None or transform is not None): raise RioXarrayError("resolution cannot be used with shape or transform.") if self.crs is None: raise MissingCRS( "CRS not found. Please set the CRS with 'set_crs()' or 'write_crs()'." f"{_get_data_var_message(self._obj)}" ) src_affine = self.transform(recalc=True) if transform is None: dst_affine, dst_width, dst_height = _make_dst_affine( self._obj, self.crs, dst_crs, resolution, shape ) else: dst_affine = transform if shape is not None: dst_height, dst_width = shape else: dst_height, dst_width = self.shape extra_dim = self._check_dimensions() if extra_dim: dst_data = np.zeros( (self._obj[extra_dim].size, dst_height, dst_width), dtype=self._obj.dtype.type, ) else: dst_data = np.zeros((dst_height, dst_width), dtype=self._obj.dtype.type) try: dst_nodata = self._obj.dtype.type( self.nodata if self.nodata is not None else -9999 ) except ValueError: # if integer, set nodata to -9999 dst_nodata = self._obj.dtype.type(-9999) src_nodata = self._obj.dtype.type( self.nodata if self.nodata is not None else dst_nodata ) rasterio.warp.reproject( source=np.copy(self._obj.load().data), destination=dst_data, src_transform=src_affine, src_crs=self.crs, src_nodata=src_nodata, dst_transform=dst_affine, dst_crs=dst_crs, dst_nodata=dst_nodata, resampling=resampling, ) # add necessary attributes new_attrs = _generate_attrs(self._obj, dst_affine, dst_nodata) # make sure dimensions with coordinates renamed to x,y dst_dims = [] for dim in self._obj.dims: if dim == self.x_dim: dst_dims.append("x") elif dim == self.y_dim: dst_dims.append("y") else: dst_dims.append(dim) xda = xarray.DataArray( name=self._obj.name, data=dst_data, coords=_make_coords(self._obj, dst_affine, dst_width, dst_height, dst_crs), dims=tuple(dst_dims), attrs=new_attrs, ) xda.encoding = self._obj.encoding return add_spatial_ref(xda, dst_crs, DEFAULT_GRID_MAP) def reproject_match(self, match_data_array, resampling=Resampling.nearest): """ Reproject a DataArray object to match the resolution, projection, and region of another DataArray. Powered by `rasterio.warp.reproject` .. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported. Requires either a grid mapping variable with 'spatial_ref' or a 'crs' attribute to be set containing a valid CRS. If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT. Parameters ---------- match_data_array: :obj:`xarray.DataArray` DataArray of the target resolution and projection. resampling: Resampling method, optional See rasterio.warp.reproject for more details. Returns -------- :obj:`xarray.DataArray` Contains the data from the src_data_array, reprojected to match match_data_array. """ dst_crs = crs_to_wkt(match_data_array.rio.crs) return self.reproject( dst_crs, transform=match_data_array.rio.transform(recalc=True), shape=match_data_array.rio.shape, resampling=resampling, ) def slice_xy(self, minx, miny, maxx, maxy): """Slice the array by x,y bounds. Parameters ---------- minx: float Minimum bound for x coordinate. miny: float Minimum bound for y coordinate. maxx: float Maximum bound for x coordinate. maxy: float Maximum bound for y coordinate. Returns ------- DataArray: A sliced :class:`xarray.DataArray` object. """ left, bottom, right, top = self._internal_bounds() if top > bottom: y_slice = slice(maxy, miny) else: y_slice = slice(miny, maxy) if left > right: x_slice = slice(maxx, minx) else: x_slice = slice(minx, maxx) subset = self._obj.sel( {self.x_dim: x_slice, self.y_dim: y_slice} ).rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) subset.attrs["transform"] = tuple(self.transform(recalc=True)) return subset def clip_box(self, minx, miny, maxx, maxy, auto_expand=False, auto_expand_limit=3): """Clip the :class:`xarray.DataArray` by a bounding box. Parameters ---------- minx: float Minimum bound for x coordinate. miny: float Minimum bound for y coordinate. maxx: float Maximum bound for x coordinate. maxy: float Maximum bound for y coordinate. auto_expand: bool If True, it will expand clip search if only 1D raster found with clip. auto_expand_limit: int maximum number of times the clip will be retried before raising an exception. Returns ------- DataArray: A clipped :class:`xarray.DataArray` object. """ if self.width == 1 or self.height == 1: raise OneDimensionalRaster( "At least one of the raster x,y coordinates has only one point." f"{_get_data_var_message(self._obj)}" ) resolution_x, resolution_y = self.resolution() clip_minx = minx - abs(resolution_x) / 2.0 clip_miny = miny - abs(resolution_y) / 2.0 clip_maxx = maxx + abs(resolution_x) / 2.0 clip_maxy = maxy + abs(resolution_y) / 2.0 cl_array = self.slice_xy(clip_minx, clip_miny, clip_maxx, clip_maxy) if cl_array.rio.width < 1 or cl_array.rio.height < 1: raise NoDataInBounds( f"No data found in bounds.{_get_data_var_message(self._obj)}" ) if cl_array.rio.width == 1 or cl_array.rio.height == 1: if auto_expand and auto_expand < auto_expand_limit: return self.clip_box( clip_minx, clip_miny, clip_maxx, clip_maxy, auto_expand=int(auto_expand) + 1, auto_expand_limit=auto_expand_limit, ) raise OneDimensionalRaster( "At least one of the clipped raster x,y coordinates" " has only one point." f"{_get_data_var_message(self._obj)}" ) # make sure correct attributes preserved & projection added _add_attrs_proj(cl_array, self._obj) return cl_array def clip(self, geometries, crs, all_touched=False, drop=True, invert=False): """ Crops a :class:`xarray.DataArray` by geojson like geometry dicts. Powered by `rasterio.features.geometry_mask`. Parameters ---------- geometries: list A list of geojson geometry dicts. crs: :obj:`rasterio.crs.CRS` The CRS of the input geometries. all_touched : bool, optional If True, all pixels touched by geometries will be burned in. If false, only pixels whose center is within the polygon or that are selected by Bresenham's line algorithm will be burned in. drop: bool, optional If True, drop the data outside of the extent of the mask geoemtries Otherwise, it will return the same raster with the data masked. Default is True. invert: boolean, optional If False, pixels that do not overlap shapes will be set as nodata. Otherwise, pixels that overlap the shapes will be set as nodata. False by default. Returns ------- DataArray: A clipped :class:`xarray.DataArray` object. Examples: >>> geometry = ''' {"type": "Polygon", ... "coordinates": [ ... [[-94.07955380199459, 41.69085871273774], ... [-94.06082436942204, 41.69103313774798], ... [-94.06063203899649, 41.67932439500822], ... [-94.07935807746362, 41.679150041277325], ... [-94.07955380199459, 41.69085871273774]]]}''' >>> cropping_geometries = [geojson.loads(geometry)] >>> xds = xarray.open_rasterio('cool_raster.tif') >>> cropped = xds.rio.clip(geometries=cropping_geometries, crs=4326) """ if self.crs is None: raise MissingCRS( "CRS not found. Please set the CRS with 'set_crs()' or 'write_crs()'." f"{_get_data_var_message(self._obj)}" ) dst_crs = CRS.from_user_input(crs_to_wkt(crs)) if self.crs != dst_crs: geometries = [ rasterio.warp.transform_geom(dst_crs, self.crs, geometry) for geometry in geometries ] clip_mask_arr = geometry_mask( geometries=geometries, out_shape=(int(self.height), int(self.width)), transform=self.transform(recalc=True), invert=not invert, all_touched=all_touched, ) clip_mask_xray = xarray.DataArray( clip_mask_arr, coords={ self.y_dim: self._obj.coords[self.y_dim], self.x_dim: self._obj.coords[self.x_dim], }, dims=(self.y_dim, self.x_dim), ) cropped_ds = self._obj.where(clip_mask_xray) if drop: cropped_ds.rio.set_spatial_dims( x_dim=self.x_dim, y_dim=self.y_dim, inplace=True ) cropped_ds = cropped_ds.rio.isel_window( get_data_window(np.ma.masked_array(clip_mask_arr, ~clip_mask_arr)) ) if self.nodata is not None and not np.isnan(self.nodata): cropped_ds = cropped_ds.fillna(self.nodata) cropped_ds = cropped_ds.astype(self._obj.dtype) if ( cropped_ds.coords[self.x_dim].size < 1 or cropped_ds.coords[self.y_dim].size < 1 ): raise NoDataInBounds( f"No data found in bounds.{_get_data_var_message(self._obj)}" ) # make sure correct attributes preserved & projection added _add_attrs_proj(cropped_ds, self._obj) return cropped_ds def _interpolate_na(self, src_data, method="nearest"): """ This method uses scipy.interpolate.griddata to interpolate missing data. Parameters ---------- method: {‘linear’, ‘nearest’, ‘cubic’}, optional The method to use for interpolation in `scipy.interpolate.griddata`. Returns ------- :class:`numpy.ndarray`: An interpolated :class:`numpy.ndarray`. """ src_data_flat = np.copy(src_data).flatten() try: data_isnan = np.isnan(self.nodata) except TypeError: data_isnan = False if not data_isnan: data_bool = src_data_flat != self.nodata else: data_bool = ~np.isnan(src_data_flat) if not data_bool.any(): return src_data x_coords, y_coords = np.meshgrid( self._obj.coords[self.x_dim].values, self._obj.coords[self.y_dim].values ) return griddata( points=(x_coords.flatten()[data_bool], y_coords.flatten()[data_bool]), values=src_data_flat[data_bool], xi=(x_coords, y_coords), method=method, fill_value=self.nodata, ) def interpolate_na(self, method="nearest"): """ This method uses scipy.interpolate.griddata to interpolate missing data. Parameters ---------- method: {‘linear’, ‘nearest’, ‘cubic’}, optional The method to use for interpolation in `scipy.interpolate.griddata`. Returns ------- :class:`xarray.DataArray`: An interpolated :class:`xarray.DataArray` object. """ extra_dim = self._check_dimensions() if extra_dim: interp_data = [] for _, sub_xds in self._obj.groupby(extra_dim): interp_data.append( self._interpolate_na(sub_xds.load().data, method=method) ) interp_data = np.array(interp_data) else: interp_data = self._interpolate_na(self._obj.load().data, method=method) interp_array = xarray.DataArray( name=self._obj.name, data=interp_data, coords=self._obj.coords, dims=self._obj.dims, attrs=self._obj.attrs, ) interp_array.encoding = self._obj.encoding # make sure correct attributes preserved & projection added _add_attrs_proj(interp_array, self._obj) return interp_array def to_raster( self, raster_path, driver="GTiff", dtype=None, tags=None, windowed=False, recalc_transform=True, **profile_kwargs, ): """ Export the DataArray to a raster file. Parameters ---------- raster_path: str The path to output the raster to. driver: str, optional The name of the GDAL/rasterio driver to use to export the raster. Default is "GTiff". dtype: str, optional The data type to write the raster to. Default is the datasets dtype. tags: dict, optional A dictionary of tags to write to the raster. windowed: bool, optional If True, it will write using the windows of the output raster. This only works if the output raster is tiled. As such, if you set this to True, the output raster will be tiled. Default is False. **profile_kwargs Additional keyword arguments to pass into writing the raster. The nodata, transform, crs, count, width, and height attributes are ignored. """ dtype = str(self._obj.dtype) if dtype is None else dtype # get the output profile from the rasterio object # if opened with xarray.open_rasterio() try: out_profile = self._obj._file_obj.acquire().profile except AttributeError: out_profile = {} out_profile.update(profile_kwargs) # filter out the generated attributes out_profile = { key: value for key, value in out_profile.items() if key not in ( "driver", "height", "width", "crs", "transform", "nodata", "count", "dtype", ) } with rasterio.open( raster_path, "w", driver=driver, height=int(self.height), width=int(self.width), count=int(self.count), dtype=dtype, crs=self.crs, transform=self.transform(recalc=recalc_transform), nodata=( self.encoded_nodata if self.encoded_nodata is not None else self.nodata ), **out_profile, ) as dst: _write_metatata_to_raster(dst, self._obj, tags) # write data to raster if windowed: window_iter = dst.block_windows(1) else: window_iter = [(None, None)] for _, window in window_iter: if window is not None: out_data = self.isel_window(window) else: out_data = self._obj if self.encoded_nodata is not None: out_data = out_data.fillna(self.encoded_nodata) data = out_data.astype(dtype).load().data if data.ndim == 2: dst.write(data, 1, window=window) else: dst.write(data, window=window) @xarray.register_dataset_accessor("rio") class RasterDataset(XRasterBase): """This is the GIS extension for :class:`xarray.Dataset`""" @property def vars(self): """list: Returns non-coordinate varibles""" return list(self._obj.data_vars) @property def crs(self): """:obj:`rasterio.crs.CRS`: Retrieve projection from `xarray.Dataset` """ if self._crs is not None: return None if self._crs is False else self._crs self._crs = super().crs if self._crs is not None: return self._crs for var in self.vars: crs = self._obj[var].rio.crs if crs is not None: self._crs = crs break else: self._crs = False return None return self._crs def reproject( self, dst_crs, resolution=None, shape=None, transform=None, resampling=Resampling.nearest, ): """ Reproject :class:`xarray.Dataset` objects .. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported. Requires either a grid mapping variable with 'spatial_ref' or a 'crs' attribute to be set containing a valid CRS. If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT. .. versionadded:: 0.0.27 shape .. versionadded:: 0.0.28 transform Parameters ---------- dst_crs: str OGC WKT string or Proj.4 string. resolution: float or tuple(float, float), optional Size of a destination pixel in destination projection units (e.g. degrees or metres). shape: tuple(int, int), optional Shape of the destination in pixels (dst_height, dst_width). Cannot be used together with resolution. transform, optional The destination transform. resampling: Resampling method, optional See rasterio.warp.reproject for more details. Returns -------- :class:`xarray.Dataset`: A reprojected Dataset. """ resampled_dataset = xarray.Dataset(attrs=self._obj.attrs) for var in self.vars: resampled_dataset[var] = ( self._obj[var] .rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) .rio.reproject( dst_crs, resolution=resolution, shape=shape, transform=transform, resampling=resampling, ) ) return resampled_dataset def reproject_match(self, match_data_array, resampling=Resampling.nearest): """ Reproject a Dataset object to match the resolution, projection, and region of another DataArray. .. note:: Only 2D/3D arrays with dimensions 'x'/'y' are currently supported. Requires either a grid mapping variable with 'spatial_ref' or a 'crs' attribute to be set containing a valid CRS. If using a WKT (e.g. from spatiareference.org), make sure it is an OGC WKT. Parameters ---------- match_data_array: :obj:`xarray.DataArray` DataArray of the target resolution and projection. resampling: Resampling method, optional See rasterio.warp.reproject for more details. Returns -------- :obj:`xarray.Dataset` Contains the data from the src_data_array, reprojected to match match_data_array. """ resampled_dataset = xarray.Dataset(attrs=self._obj.attrs) for var in self.vars: resampled_dataset[var] = ( self._obj[var] .rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) .rio.reproject_match(match_data_array, resampling=resampling) ) return resampled_dataset.rio.set_spatial_dims( x_dim=self.x_dim, y_dim=self.y_dim, inplace=True ) def clip_box(self, minx, miny, maxx, maxy, auto_expand=False, auto_expand_limit=3): """Clip the :class:`xarray.Dataset` by a bounding box. .. warning:: Only works if all variables in the dataset have the same coordinates. Parameters ---------- minx: float Minimum bound for x coordinate. miny: float Minimum bound for y coordinate. maxx: float Maximum bound for x coordinate. maxy: float Maximum bound for y coordinate. auto_expand: bool If True, it will expand clip search if only 1D raster found with clip. auto_expand_limit: int maximum number of times the clip will be retried before raising an exception. Returns ------- DataArray: A clipped :class:`xarray.Dataset` object. """ clipped_dataset = xarray.Dataset(attrs=self._obj.attrs) for var in self.vars: clipped_dataset[var] = ( self._obj[var] .rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) .rio.clip_box( minx, miny, maxx, maxy, auto_expand=auto_expand, auto_expand_limit=auto_expand_limit, ) ) return clipped_dataset.rio.set_spatial_dims( x_dim=self.x_dim, y_dim=self.y_dim, inplace=True ) def clip(self, geometries, crs, all_touched=False, drop=True, invert=False): """ Crops a :class:`xarray.Dataset` by geojson like geometry dicts. .. warning:: Only works if all variables in the dataset have the same coordinates. Powered by `rasterio.features.geometry_mask`. Parameters ---------- geometries: list A list of geojson geometry dicts. crs: :obj:`rasterio.crs.CRS` The CRS of the input geometries. all_touched : boolean, optional If True, all pixels touched by geometries will be burned in. If false, only pixels whose center is within the polygon or that are selected by Bresenham's line algorithm will be burned in. drop: bool, optional If True, drop the data outside of the extent of the mask geoemtries Otherwise, it will return the same raster with the data masked. Default is True. invert: boolean, optional If False, pixels that do not overlap shapes will be set as nodata. Otherwise, pixels that overlap the shapes will be set as nodata. False by default. Returns ------- Dataset: A clipped :class:`xarray.Dataset` object. Examples: >>> geometry = ''' {"type": "Polygon", ... "coordinates": [ ... [[-94.07955380199459, 41.69085871273774], ... [-94.06082436942204, 41.69103313774798], ... [-94.06063203899649, 41.67932439500822], ... [-94.07935807746362, 41.679150041277325], ... [-94.07955380199459, 41.69085871273774]]]}''' >>> cropping_geometries = [geojson.loads(geometry)] >>> xds = xarray.open_rasterio('cool_raster.tif') >>> cropped = xds.rio.clip(geometries=cropping_geometries, crs=4326) """ clipped_dataset = xarray.Dataset(attrs=self._obj.attrs) for var in self.vars: clipped_dataset[var] = ( self._obj[var] .rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) .rio.clip( geometries, crs=crs, all_touched=all_touched, drop=drop, invert=invert, ) ) return clipped_dataset.rio.set_spatial_dims( x_dim=self.x_dim, y_dim=self.y_dim, inplace=True ) def interpolate_na(self, method="nearest"): """ This method uses `scipy.interpolate.griddata` to interpolate missing data. Parameters ---------- method: {‘linear’, ‘nearest’, ‘cubic’}, optional The method to use for interpolation in `scipy.interpolate.griddata`. Returns ------- :class:`xarray.DataArray`: An interpolated :class:`xarray.DataArray` object. """ interpolated_dataset = xarray.Dataset(attrs=self._obj.attrs) for var in self.vars: interpolated_dataset[var] = ( self._obj[var] .rio.set_spatial_dims(x_dim=self.x_dim, y_dim=self.y_dim, inplace=True) .rio.interpolate_na(method=method) ) return interpolated_dataset.rio.set_spatial_dims( x_dim=self.x_dim, y_dim=self.y_dim, inplace=True ) def to_raster( self, raster_path, driver="GTiff", dtype=None, tags=None, windowed=False, recalc_transform=True, **profile_kwargs, ): """ Export the Dataset to a raster file. Only works with 2D data. Parameters ---------- raster_path: str The path to output the raster to. driver: str, optional The name of the GDAL/rasterio driver to use to export the raster. Default is "GTiff". dtype: str, optional The data type to write the raster to. Default is the datasets dtype. tags: dict, optional A dictionary of tags to write to the raster. windowed: bool, optional If True, it will write using the windows of the output raster. This only works if the output raster is tiled. As such, if you set this to True, the output raster will be tiled. Default is False. **profile_kwargs Additional keyword arguments to pass into writing the raster. The nodata, transform, crs, count, width, and height attributes are ignored. """ variable_dim = "band_{}".format(uuid4()) data_array = self._obj.to_array(dim=variable_dim) # write data array names to raster data_array.attrs["long_name"] = data_array[variable_dim].values.tolist() # ensure raster metadata preserved scales = [] offsets = [] nodatavals = [] crs_list = [] for data_var in data_array[variable_dim].values: scales.append(self._obj[data_var].attrs.get("scale_factor", 1.0)) offsets.append(self._obj[data_var].attrs.get("add_offset", 0.0)) nodatavals.append(self._obj[data_var].rio.nodata) crs_list.append(self._obj[data_var].rio.crs) data_array.attrs["scales"] = scales data_array.attrs["offsets"] = offsets nodata = nodatavals[0] if ( all(nodataval == nodata for nodataval in nodatavals) or np.isnan(nodatavals).all() ): data_array.rio.write_nodata(nodata, inplace=True) else: raise RioXarrayError( "All nodata values must be the same when exporting to raster. " "Current values: {}".format(nodatavals) ) crs = crs_list[0] if all(crs_i == crs for crs_i in crs_list): data_array.rio.write_crs(crs, inplace=True) else: raise RioXarrayError( "All CRS must be the same when exporting to raster. " "Current values: {}".format(crs_list) ) # write it to a raster data_array.rio.to_raster( raster_path=raster_path, driver=driver, dtype=dtype, tags=tags, windowed=windowed, recalc_transform=recalc_transform, **profile_kwargs, )
34.871735
128
0.585797
50,133
0.833882
0
0
42,392
0.705123
0
0
25,910
0.430971
acad836bb967db6d3ec59df7b4fb252d32176a06
905
py
Python
src/the_teleop/test_popout.py
NuenoB/TheTeleop
57e3f745d391743fac408fb44bf20ffad945aa19
[ "BSD-3-Clause" ]
null
null
null
src/the_teleop/test_popout.py
NuenoB/TheTeleop
57e3f745d391743fac408fb44bf20ffad945aa19
[ "BSD-3-Clause" ]
null
null
null
src/the_teleop/test_popout.py
NuenoB/TheTeleop
57e3f745d391743fac408fb44bf20ffad945aa19
[ "BSD-3-Clause" ]
null
null
null
#! /usr/bin/env python import os import rospy import rospkg from readbag import restore from qt_gui.plugin import Plugin from python_qt_binding.QtCore import Qt from python_qt_binding import loadUi from python_qt_binding.QtGui import QFileDialog, QGraphicsView, QIcon, QWidget from PyQt4 import QtGui, QtCore from example_ui import * from PyQt4 import QtGui from v2 import Ui_addbag class Form1(QtGui.QWidget, Ui_addbag): def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) self.setupUi(self) self.pushButton_2.clicked.connect(self.handleButton) self.window2 = None def handleButton(self): if self.window2 is None: self.window2 = Form1(self) self.window2.show() self.hide() def pop(): import sys app = QtGui.QApplication(sys.argv) window = Form1() window.show() sys.exit(app.exec_())
23.815789
78
0.709392
385
0.425414
0
0
0
0
0
0
22
0.024309
acb0e8f231c4c3b90b8012cacf947784493ba5d8
2,387
py
Python
_km.py
rajesh-lab/Inverse-Weighted-Survival-Games
8e86065001fe58a3f99523b590992ac84a5d1cc4
[ "MIT" ]
7
2021-11-25T19:11:12.000Z
2022-01-12T00:22:39.000Z
_km.py
rajesh-lab/Inverse-Weighted-Survival-Games
8e86065001fe58a3f99523b590992ac84a5d1cc4
[ "MIT" ]
null
null
null
_km.py
rajesh-lab/Inverse-Weighted-Survival-Games
8e86065001fe58a3f99523b590992ac84a5d1cc4
[ "MIT" ]
null
null
null
import models import os import copy import torch import torch.nn as nn from lifelines import KaplanMeierFitter as KMFitter import pycox import numpy as np # local import catdist import data_utils import _concordance import _nll import _saver def str_to_bool(arg): """Convert an argument string into its boolean value. Args: arg: String representing a bool. Returns: Boolean value for the string. """ if arg.lower() in ('yes', 'true', 't', 'y', '1'): return True elif arg.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') def isnan(x): return torch.any(torch.isnan(x)) def safe_log(x,eps): return (x+eps).log() def clip(prob,clip_min): return prob.clamp(min=clip_min) def round3(x): return round(x,3) class Meter: def __init__(self): self.N = 0 self.total = 0 def update(self,val,N): self.total += val self.N += N def avg(self): return round(self.total / self.N,4) ############################################ ############ KM G IPCW F BS and BLL ######## ############################################ def cdfvals_to_probs(cdfvals,args): K=cdfvals.shape[1] Gprobs = torch.zeros_like(cdfvals).to(args.device) Gprobs[:,0] = cdfvals[:,0] for k in range(1,K-1): Gprobs[:,k] = cdfvals[:,k] - cdfvals[:,k-1] Gprobs[:,K-1] = 1 - (Gprobs[:,:K-1]).sum(-1) return Gprobs def cdfvals_to_dist(cdfvals,bsz,args): cdfvals = cdfvals.unsqueeze(0).repeat(bsz,1) Gprobs = cdfvals_to_probs(cdfvals,args) assert torch.all( (Gprobs.sum(-1) - 1.0).abs() < 1e-4) Gdist = catdist.CatDist(logits=None, args=args, probs=Gprobs, k=None) return Gdist def get_KM_cdfvals(loader,args): u=loader.dataset.U delta=loader.dataset.Delta durations = u.cpu().numpy() is_censored = ~delta.cpu().numpy() km = pycox.utils.kaplan_meier surv_func = km(durations,is_censored).to_numpy() cdf_func = 1. - surv_func km_support = np.sort(np.unique(durations)) cdfvals = torch.zeros(args.K).to(args.device) for i,val in enumerate(km_support): cdfvals[val] = cdf_func[i] for i,val in enumerate(cdfvals): if i > 0: if val==0.0: cdfvals[i]=cdfvals[i-1] return cdfvals
25.393617
73
0.601592
215
0.090071
0
0
0
0
0
0
367
0.153749
acb17cfb85ffc305e2395079620b49264e4e9636
377
py
Python
active/setup.py
jordan-schneider/value-alignment-verification
f2c877b16dfefa7cd8089b7aa3fe084ab907235e
[ "MIT" ]
null
null
null
active/setup.py
jordan-schneider/value-alignment-verification
f2c877b16dfefa7cd8089b7aa3fe084ab907235e
[ "MIT" ]
2
2020-05-25T14:50:11.000Z
2021-01-18T20:23:30.000Z
active/setup.py
jordan-schneider/batch-active-preference-based-learning
f2c877b16dfefa7cd8089b7aa3fe084ab907235e
[ "MIT" ]
1
2021-08-24T18:22:13.000Z
2021-08-24T18:22:13.000Z
from distutils.core import setup from pathlib import Path # TODO(joschnei): Add typing info setup( name="active", version="0.1", packages=["active",], install_requires=[ "scipy", "numpy", "driver @ git+https://github.com/jordan-schneider/driver-env.git#egg=driver", ], package_data = { 'active': ['py.typed'], }, )
19.842105
85
0.588859
0
0
0
0
0
0
0
0
162
0.429708
acb265bb1c2030e0a289057dc790dfa08270a963
762
py
Python
macgraph/input/print_gqa.py
vinamramattoo/gqa-node-properties
53ceea7ac759b831ac3e116c4426995db3bde834
[ "Unlicense" ]
null
null
null
macgraph/input/print_gqa.py
vinamramattoo/gqa-node-properties
53ceea7ac759b831ac3e116c4426995db3bde834
[ "Unlicense" ]
null
null
null
macgraph/input/print_gqa.py
vinamramattoo/gqa-node-properties
53ceea7ac759b831ac3e116c4426995db3bde834
[ "Unlicense" ]
null
null
null
import tableprint from collections import Counter from .args import * from .util import * if __name__ == "__main__": args = get_args() output_classes = Counter() question_types = Counter() with tableprint.TableContext(headers=["Type", "Question", "Answer"], width=[40,50,15]) as t: for i in read_gqa(args): output_classes[i["answer"]] += 1 question_types[i["question"]["type_string"]] += 1 t([ i["question"]["type_string"], i["question"]["english"], i["answer"] ]) def second(v): return v[1] tableprint.table(headers=["Answer", "Count"], width=[20,5], data=sorted(output_classes.items(), key=second)) tableprint.table(headers=["Question", "Count"], width=[20,5], data=sorted(question_types.items(), key=second))
22.411765
111
0.664042
0
0
0
0
0
0
0
0
147
0.192913
acb35b9248fdf0502fc6a04e4f7dc0973b25b18c
10,811
py
Python
kaldi_tflite/lib/layers/tdnn/tdnn.py
ishine/kaldi-tflite
77284d60985ca7ab04eeb977cb08cfaad488afca
[ "Apache-2.0" ]
null
null
null
kaldi_tflite/lib/layers/tdnn/tdnn.py
ishine/kaldi-tflite
77284d60985ca7ab04eeb977cb08cfaad488afca
[ "Apache-2.0" ]
null
null
null
kaldi_tflite/lib/layers/tdnn/tdnn.py
ishine/kaldi-tflite
77284d60985ca7ab04eeb977cb08cfaad488afca
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 # Copyright (2021-) Shahruk Hossain <shahruk10@gmail.com> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from typing import Tuple, Iterable import numpy as np import tensorflow as tf from tensorflow.keras.layers import Layer from tensorflow.keras.initializers import Initializer, GlorotUniform from kaldi_tflite.lib.layers.tdnn.utils import reshapeKaldiTdnnWeights class TDNN(Layer): """ This layer implements a kaldi styled time delayed neural network layer. It's implemented to produce the same output as a TDNN layer implemented in Kaldi's Nnet3 framework. Asymmetrical left / right context is allowed just like Kaldi's splicing specification (e.g. context = [-3, -1, 0, 1]). This layer's weights can be intialized using the `<LinearParams>` and `<BiasParams>` of tdnn.affine components with the same number of units and context configuration as this layer. """ def __init__(self, units: int, context: list = [0], subsampling_factor: int = 1, padding: str = "SAME", use_bias: bool = True, kernel_initializer: Initializer = GlorotUniform(), bias_initializer: Initializer = GlorotUniform(), activation: str = None, name: str = None, **kwargs): """ Instantiates a TDNN layer with the given configuration. Parameters ---------- units : int Dimension of layer output. context: list, optional, List of timesteps to use in the convolution where 0 is the current timestep and -N would be the previous Nth timestep and +N would be the future Nth timestep. By default [0], no temporal context. subsampling_factor: int, optional If set to N, will evaluate output for kernel centered at every Nth timestep in the input. By default, 1 (no subsampling). padding: str, optional Padding option can be either "SAME" or "VALID". If "SAME", the input will be padded so that the output has the same number of timesteps as the input when subsampling_factor = 1. If "VALID", no padding will be done, and the kernel will be evaluated only at timestamps where it is completely within the input. By default "SAME", (same as Kaldi). use_bias: bool, optional If true, bias vector added to layer output, by default True. kernel_initializer: tf.keras.initializers.Initializer, optional Initializer to use when randomly initializing TDNN kernel weights, by default GlorotUniform (also called Xavier uniform initializer). bias_initializer: tf.keras.initializers.Initializer, optional Initializer to use when randomly initializing bias vector, by default GlorotUniform (also called Xavier uniform initializer). name : str, optional Name of the given layer. Auto set if set to None. By default None. """ super(TDNN, self).__init__(trainable=True, name=name, **kwargs) self.units = units self.useBias = use_bias self.subsamplingFactor = subsampling_factor if self.subsamplingFactor <= 0: raise ValueError("subsampling_factor should be > 0") self.padding = padding.upper() if self.padding not in ["VALID", "SAME"]: raise ValueError("padding should be either 'VALID' or 'SAME'") if context is None: self.context = [0] elif isinstance(context, int): self.context = [context] elif isinstance(context, list): self.context = context if len(context) > 0 else [0] else: raise ValueError("context should be None, a list or an integer") self.context.sort() self.contextOffset = tf.constant([context], dtype=tf.int32) self.kernelWidth = len(context) self.kernelInitializer = kernel_initializer self.biasInitializer = bias_initializer self.activation = activation if self.activation is not None: self.activationFunc = tf.keras.activations.get(activation) # Inputs to this layers are expected to be in the shape # (batch, timesteps, featdim) self.batchAxis = 0 self.timeAxis = 1 self.featAxis = -1 def build(self, input_shape: tuple): super(TDNN, self).build(input_shape) inputFeatDim = input_shape[self.featAxis] # Convolutional kernel weights; 2D kernel with length = 1 and width = # length of specified context timesteps. We use a 2D convolution kernel # here because it becomes simpler to apply on how the inputs are shaped # after applying tf.gather on them; see call() self.kernel = self.add_weight( name='kernel', shape=(1, self.kernelWidth, inputFeatDim, self.units), initializer=self.kernelInitializer, ) # Bias vector. self.bias = None if self.useBias: self.bias = self.add_weight( name="bias", shape=(self.units,), initializer=self.biasInitializer, ) def compute_output_shape(self, input_shape) -> tuple: batchSize = input_shape[self.batchAxis] inputTimesteps = input_shape[self.timeAxis] start, end = self.getStartEndSteps(inputTimesteps) outputTimesteps = (end - start) / self.subsamplingFactor outputShape = (batchSize, outputTimesteps, self.units) return outputShape def get_config(self) -> dict: config = super(TDNN, self).get_config() config.update({ "units": self.units, "context": self.context, "subsampling_factor": self.subsamplingFactor, "padding": self.padding, "use_bias": self.useBias, "kernel_intializer": self.kernelInitializer, "bias_initializer": self.biasInitializer, "activation": self.activation, }) return config def set_weights(self, weights: Iterable[np.ndarray], fmt: str = "kaldi"): """ Sets the weights of the layer, from numpy arrays. The weights can either be in the shape and order kaldi provides them in (2D matrices for kernels and 1D vector for biases) or how tensorflow expects them (output of `get_weights()`). Parameters ---------- weights : Iterable[np.ndarray] Kernel and Bias weights as a list of numpy arrays. If the layer is configured to not use bias vector, only kernel weights are expected in the list. fmt : str, optional The format in which the weights of the kernel are arranged in - either "kaldi" or "tensorflow", by default "kaldi". Raises ------ ValueError If the "order" is not "kaldi" or "tensorflow". if the number of weights in the weight list is unexpected. If the shape of the weights do not match expected shapes. """ fmt = fmt.lower() if fmt not in ["kaldi", "tensorflow"]: raise ValueError(f"expected 'fmt' to be either 'kaldi' or 'tensorflow', got {fmt}") if len(weights) == 0: raise ValueError(f"expected a weight list of at least length 2, got 0") if self.useBias: if len(weights) != 2: raise ValueError(f"expected a weight list of length 2, got {len(weights)}") kernel = weights[0] if fmt == "kaldi": kernel = reshapeKaldiTdnnWeights(kernel, self.units, self.kernelWidth) if self.useBias: bias = weights[1] return super(TDNN, self).set_weights([kernel, bias]) return super(TDNN, self).set_weights([kernel]) def getStartEndSteps(self, inputTimesteps: int) -> Tuple[int, int]: start = 0 end = inputTimesteps if self.padding == "VALID": if self.context[0] < 0: start = -1 * self.context[0] if self.context[-1] > 0: end = inputTimesteps - self.context[-1] return start, end def getIndicesToEval(self, inputTimesteps: int) -> tf.Tensor: start, end = self.getStartEndSteps(inputTimesteps) indices = tf.range(start=start, limit=end, delta=self.subsamplingFactor) context = tf.tile(input=self.contextOffset, multiples=[tf.size(indices), 1]) indices = tf.expand_dims(indices, axis=1) indices = context + indices # Limiting indices to be within bounds. This is equivalent to padding # the input by repeating the values at the boundaries. if self.padding == "SAME": indices = tf.clip_by_value(indices, 0, inputTimesteps - 1) return indices def call(self, inputs): inputShape = tf.shape(inputs) inputTimesteps = inputShape[self.timeAxis] # inputToEval has shape = (batch, numEval, kernelWidth, inputFeatDim) indicesToEval = self.getIndicesToEval(inputTimesteps) inputToEval = tf.gather(params=inputs, indices=indicesToEval, axis=self.timeAxis) # Using 2D convolution with a kernel length of 1, effectively 1D # convolution along kernel width. It works out easier this way when # working with tf.gather. # # Furthermore, tf.nn.conv1d reshapes the inputs and invokes tf.nn.conv2d # anyway (https://www.tensorflow.org/api_docs/python/tf/nn/conv1d) output = tf.nn.conv2d( inputToEval, self.kernel, strides=(1, 1), padding="VALID", data_format="NHWC", ) # Removing the dimension along kernelWidth since it has become 1 after # applying the convolution above. output = tf.squeeze(output, axis=-2) if self.useBias: output = output + self.bias if self.activation is not None: output = self.activationFunc(output) return output
38.47331
95
0.622144
9,835
0.909722
0
0
0
0
0
0
5,352
0.495051
acb38fbd951d6721bb277eafb67e6e86f5c11fc0
534
py
Python
Python/1 pengenalan python/2 komentar dan operasi matematika/6 kesimpulan.py
ekovegeance-com/tree
7a429d0f35c5a71769820177f60d22e7231b4e40
[ "Apache-2.0" ]
3
2020-12-21T13:01:35.000Z
2020-12-27T08:25:57.000Z
Python/1 pengenalan python/2 komentar dan operasi matematika/6 kesimpulan.py
ekovegeance-com/tree
7a429d0f35c5a71769820177f60d22e7231b4e40
[ "Apache-2.0" ]
2
2020-12-05T23:26:16.000Z
2020-12-27T10:21:47.000Z
Python/1 pengenalan python/2 komentar dan operasi matematika/6 kesimpulan.py
faizH3/faiz
c6a38717b91db8f76a0c4c4fd3168eb3ce8123ef
[ "Apache-2.0" ]
3
2021-07-27T19:05:40.000Z
2021-11-08T09:03:23.000Z
# Instruksi: # Buatlah komentar di garis pertama, # Buat variabel bernama jumlah_pacar yang isinya angka (bukan desimal), # Buat variabel bernama lagi_galau yang isinya boolean, # Buat variabel dengan nama terserah anda dan gunakan salah satu dari operator matematika yang telah kita pelajari. #variabel untuk menyimpan data # tipe data boolean dan angka # spasi: pentingnya spasi dalam python # komentar: untuk menjelaskan kode #operasi matematika: mulai dari penambahan sampai modulus jumlah_pacar = 1 lagi_galau = False umur = 20
35.6
115
0.803371
0
0
0
0
0
0
0
0
477
0.893258
acb6313fbce86ba3db7156bb75d62a0032b83dff
38
py
Python
04/py/q2.py
RussellDash332/practice-makes-perfect
917822b461550a2e3679351e467362f95d9e428d
[ "MIT" ]
2
2021-11-18T06:22:09.000Z
2021-12-25T09:52:57.000Z
04/py/q2.py
RussellDash332/practice-makes-perfect
917822b461550a2e3679351e467362f95d9e428d
[ "MIT" ]
2
2021-11-17T16:28:00.000Z
2021-12-01T09:59:40.000Z
04/py/q2.py
RussellDash332/practice-makes-perfect
917822b461550a2e3679351e467362f95d9e428d
[ "MIT" ]
null
null
null
print((lambda x: lambda y: 2*x)(3)(4))
38
38
0.605263
0
0
0
0
0
0
0
0
0
0
acb6c3f828ace9e1cc9ed180cfa072d2399e9b6f
3,612
py
Python
authusers/views.py
JoyMbugua/auth-app
068adbe1d019fddf3852abbf06ae9b1edd6a6158
[ "MIT" ]
null
null
null
authusers/views.py
JoyMbugua/auth-app
068adbe1d019fddf3852abbf06ae9b1edd6a6158
[ "MIT" ]
null
null
null
authusers/views.py
JoyMbugua/auth-app
068adbe1d019fddf3852abbf06ae9b1edd6a6158
[ "MIT" ]
null
null
null
from django.shortcuts import render from rest_framework.response import Response from rest_framework.views import APIView from rest_framework import status, permissions import pyotp import base64 from rest_framework_simplejwt.tokens import RefreshToken from .serializers import CustomUserSerializer from .models import CustomUser from .models import MagicLink from .utils import send_operations class UserLogin(APIView): """ view for handling login post requests """ def post(self, request): # check if a user with that email exists email = request.data.get('email') phone = request.data.get('phone_number') user = None try: if email: user = CustomUser.objects.get(email=email) if phone: users = CustomUser.objects.all() user = CustomUser.objects.get(phone_number=phone) except CustomUser.DoesNotExist: return Response(status=status.HTTP_400_BAD_REQUEST) send_operations(request, user) return Response({'status':201, 'userdata': user.username}) class CustomUserCreate(APIView): """ Creates a user """ permission_classes = (permissions.AllowAny, ) def post(self, request): serializer = CustomUserSerializer(data=request.data) if serializer.is_valid(): user = serializer.save() if user: user.counter += 1 user.save() send_operations(request, user) return Response(serializer.data, status=status.HTTP_201_CREATED) return Response(status=status.HTTP_400_BAD_REQUEST) class DashboardView(APIView): """ a protected view """ permission_classes = (permissions.IsAuthenticated,) def get(self, request): return Response(data={"message": "welcome home"}, status=status.HTTP_200_OK) class VerifyOTPView(APIView): """ verifies entered otp and manually generates a jwt token for a user """ def get_tokens_for_user(self, user, otp): """ generates jwt with otp code """ refresh = RefreshToken.for_user(user) refresh['otp'] = otp return { 'refresh': str(refresh), 'access': str(refresh.access_token), } def post(self, request): username = request.data.get('username') print("username",username) user = CustomUser.objects.get(username=username) if user is not None: key = base64.b32encode(user.username.encode()) otp = pyotp.HOTP(key) if otp.verify(request.data['otpCode'], user.counter): user.isVerified = True user.code = otp.at(user.counter) user.save() token = self.get_tokens_for_user(user, user.code) return Response({'status': 200, 'message': 'otp verified', 'token': token}) else: return Response({'status': 400, 'message': 'wrong otp code'}) return Response({'status': 400, 'message': 'user does not exist'}) class LoginUserFromEmail(APIView): """ creates a jwt from url associated with user """ def post(self,request): user = CustomUser.objects.last() if user is not None: magic_link = MagicLink.objects.get(user=user) magic_link_token = magic_link.get_tokens_for_user(user) return Response({'status': 200, 'message': 'magiclink ok', 'token': magic_link_token}) return Response({'status': 400, 'message': 'user does not exist'})
32.540541
98
0.625138
3,202
0.886489
0
0
0
0
0
0
645
0.178571
acb6c72808305e6d9b0b8c17d139a73696a34b74
22,550
py
Python
atom3d/util/formats.py
bfabiandev/atom3d
b2499ff743be2e851c286cabf64696682abffa44
[ "MIT" ]
null
null
null
atom3d/util/formats.py
bfabiandev/atom3d
b2499ff743be2e851c286cabf64696682abffa44
[ "MIT" ]
null
null
null
atom3d/util/formats.py
bfabiandev/atom3d
b2499ff743be2e851c286cabf64696682abffa44
[ "MIT" ]
null
null
null
"""Methods to convert between different file formats.""" import collections as col import gzip import os import re import Bio.PDB.Atom import Bio.PDB.Chain import Bio.PDB.Model import Bio.PDB.Residue import Bio.PDB.Structure import numpy as np import pandas as pd # -- MANIPULATING DATAFRAMES -- def split_df(df, key): """ Split dataframe containing structure(s) based on specified key. Most commonly used to split by ensemble (`key="ensemble"`) or subunit (`key=["ensemble", "subunit"]`). :param df: Molecular structure(s) in ATOM3D dataframe format. :type df: pandas.DataFrame :param key: key on which to split dataframe. To split on multiple keys, provide all keys in a list. Must be compatible with dataframe hierarchy, i.e. ensemble > subunit > structure > model > chain. :type key: Union[str, list[str]] :return: List of tuples containing keys and corresponding sub-dataframes. :rtypes: list[tuple] """ return [(x, y) for x, y in df.groupby(key)] def merge_dfs(dfs): """Combine a list of dataframes into a single dataframe. Assumes dataframes contain the same columns.""" return pd.concat(dfs).reset_index(drop=True) # -- CONVERTING INTERNAL FORMATS -- def bp_to_df(bp): """Convert biopython representation to ATOM3D dataframe representation. :param bp: Molecular structure in Biopython representation. :type bp: Bio.PDB.Structure :return: Molecular structure in ATOM3D dataframe format. :rtype: pandas.DataFrame """ df = col.defaultdict(list) for atom in Bio.PDB.Selection.unfold_entities(bp, 'A'): residue = atom.get_parent() chain = residue.get_parent() model = chain.get_parent() df['ensemble'].append(bp.get_id()) df['subunit'].append(0) df['structure'].append(bp.get_id()) df['model'].append(model.serial_num) df['chain'].append(chain.id) df['hetero'].append(residue.id[0]) df['insertion_code'].append(residue.id[2]) df['residue'].append(residue.id[1]) df['segid'].append(residue.segid) df['resname'].append(residue.resname) df['altloc'].append(atom.altloc) df['occupancy'].append(atom.occupancy) df['bfactor'].append(atom.bfactor) df['x'].append(atom.coord[0]) df['y'].append(atom.coord[1]) df['z'].append(atom.coord[2]) df['element'].append(atom.element) df['name'].append(atom.name) df['fullname'].append(atom.fullname) df['serial_number'].append(atom.serial_number) df = pd.DataFrame(df) return df def df_to_bp(df_in): """Convert ATOM3D dataframe representation to biopython representation. Assumes dataframe contains only one structure. :param df_in: Molecular structure in ATOM3D dataframe format. :type df_in: pandas.DataFrame :return: Molecular structure in BioPython format. :rtype: Bio.PDB.Structure """ all_structures = df_to_bps(df_in) if len(all_structures) > 1: raise RuntimeError('More than one structure in provided dataframe.') return all_structures[0] def df_to_bps(df_in): """Convert ATOM3D dataframe representation containing multiple structures to list of Biopython structures. Assumes different structures are specified by `ensemble` and `structure` columns of dataframe. :param df_in: Molecular structures in ATOM3D dataframe format. :type df_in: pandas.DataFrame :return : List of molecular structures in BioPython format. :rtype: list[Bio.PDB.Structure] """ df = df_in.copy() all_structures = [] for (structure, s_atoms) in split_df(df_in, ['ensemble', 'structure']): new_structure = Bio.PDB.Structure.Structure(structure[1]) for (model, m_atoms) in df.groupby(['model']): new_model = Bio.PDB.Model.Model(model) for (chain, c_atoms) in m_atoms.groupby(['chain']): new_chain = Bio.PDB.Chain.Chain(chain) for (residue, r_atoms) in c_atoms.groupby( ['hetero', 'residue', 'insertion_code']): # Take first atom as representative for residue values. rep = r_atoms.iloc[0] new_residue = Bio.PDB.Residue.Residue( (rep['hetero'], rep['residue'], rep['insertion_code']), rep['resname'], rep['segid']) for row, atom in r_atoms.iterrows(): new_atom = Bio.PDB.Atom.Atom( atom['name'], [atom['x'], atom['y'], atom['z']], atom['bfactor'], atom['occupancy'], atom['altloc'], atom['fullname'], atom['serial_number'], atom['element']) new_residue.add(new_atom) new_chain.add(new_residue) new_model.add(new_chain) new_structure.add(new_model) all_structures.append(new_structure) return all_structures # -- READING FILES -- # ## general reader function to get a Biopython structure # (not suported: sharded, silent, xyz-gdb) def read_any(f, name=None): """Read any ATOM3D file type into Biopython structure (compatible with pdb, pdb.gz, mmcif, sdf, xyz). :param f: file path :type f: Union[str, Path] :param name: optional name or identifier for structure. If None (default), use file basename. :type name: str :return: Biopython object containing structure :rtype: Bio.PDB.Structure """ if is_pdb(f): return read_pdb(f, name) elif is_pdb_gz(f): return read_pdb_gz(f, name) elif is_mmcif(f): return read_mmcif(f, name) elif is_sdf(f): return read_sdf(f) elif is_xyz(f): return read_xyz(f, name) else: raise ValueError(f"Unrecognized filetype for {f:}") ## functions to check file format patterns = { 'pdb': r'pdb[0-9]*$', 'pdb.gz': r'pdb[0-9]*\.gz$', 'mmcif': r'(mm)?cif$', 'sdf': r'sdf[0-9]*$', 'xyz': r'xyz[0-9]*$', 'xyz-gdb': r'xyz[0-9]*$', 'silent': r'out$', 'sharded': r'@[0-9]+', } _regexes = {k: re.compile(v) for k, v in patterns.items()} def is_pdb(f): """Check if file is in pdb format.""" return _regexes['pdb'].search(str(f)) def is_pdb_gz(f): """Check if file is in mmcif format.""" return _regexes['pdb.gz'].search(str(f)) def is_mmcif(f): """Check if file is in mmcif format.""" return _regexes['mmcif'].search(str(f)) def is_sdf(f): """Check if file is in sdf format.""" return _regexes['sdf'].search(str(f)) def is_xyz(f): """Check if file is in xyz format.""" return _regexes['xyz'].search(str(f)) def is_sharded(f): """Check if file is in sharded format.""" return _regexes['sharded'].search(str(f)) ## reader functions for specific file formats def read_pdb(pdb_file, name=None): """Read pdb file into Biopython structure. :param pdb_file: file path :type pdb_file: Union[str, Path] :param name: optional name or identifier for structure. If None (default), use file basename. :type name: str :return: Biopython object containing structure :rtype: Bio.PDB.Structure """ if name is None: name = os.path.basename(pdb_file) parser = Bio.PDB.PDBParser(QUIET=True) bp = parser.get_structure(name, pdb_file) return bp def read_pdb_gz(pdb_gz_file, name=None): """Read pdb.gz file into Biopython structure. :param pdb_gz_file: file path :type pdb_gz_file: Union[str, Path] :param name: optional name or identifier for structure. If None (default), use file basename. :type name: str :return: Biopython object containing structure :rtype: Bio.PDB.Structure """ if name is None: name = os.path.basename(pdb_gz_file) parser = Bio.PDB.PDBParser(QUIET=True) bp = parser.get_structure( name, gzip.open(pdb_gz_file, mode='rt', encoding='latin1')) return bp def read_mmcif(mmcif_file, name=None): """Read mmCIF file into Biopython structure. :param mmcif_file: file path :type mmcif_file: Union[str, Path] :param name: optional name or identifier for structure. If None (default), use file basename. :type name: str :return: Biopython object containing structure :rtype: Bio.PDB.Structure """ if name is None: os.path.basename(mmcif_file) parser = Bio.PDB.MMCIFParser(QUIET=True) return parser.get_structure(name, mmcif_file) def read_sdf(sdf_file, name=None, sanitize=False, add_hs=False, remove_hs=False): """Read SDF file into Biopython structure. :param sdf_file: file path :type sdf_file: Union[str, Path] :param sanitize: sanitize structure with RDKit. :type sanitize: bool :param add_hs: add hydrogen atoms with RDKit. :type add_hs: bool :param remove_hs: remove hydrogen atoms with RDKit. :type remove_hs: bool :return: Biopython object containing structure :rtype: Bio.PDB.Structure """ dflist = [] molecules = read_sdf_to_mol(sdf_file, sanitize=sanitize, add_hs=add_hs, remove_hs=remove_hs) for im,m in enumerate(molecules): if m is not None: df = mol_to_df(m, residue=im, ensemble=m.GetProp("_Name"), structure=m.GetProp("_Name"), model=m.GetProp("_Name")) dflist.append(df) assert len(dflist) >= 1 if len(dflist) > 1: bp = df_to_bp(merge_dfs(dflist)) else: bp = df_to_bp(dflist[0]) return bp def read_sdf_to_mol(sdf_file, sanitize=False, add_hs=False, remove_hs=False): """Reads a list of molecules from an SDF file. :param add_hs: Specifies whether to add hydrogens. Defaults to False :type add_hs: bool :param remove_hs: Specifies whether to remove hydrogens. Defaults to False :type remove_hs: bool :param sanitize: Specifies whether to sanitize the molecule. Defaults to False :type sanitize: bool :return: list of molecules in RDKit format. :rtype: list[rdkit.Chem.rdchem.Mol] """ from rdkit import Chem suppl = Chem.SDMolSupplier(sdf_file, sanitize=sanitize, removeHs=remove_hs) molecules = [mol for mol in suppl] if add_hs: for mol in molecules: if mol is not None: mol = Chem.AddHs(mol, addCoords=True) return molecules def mol_to_df(mol, add_hs=False, structure=None, model=None, ensemble=None, residue=999): """ Convert molecule in RDKit format to ATOM3D dataframe format, with PDB-style columns. :param mol: Molecule in RDKit format. :type mol: rdkit.Chem.rdchem.Mol :return: Dataframe in standard ATOM3D format. :rtype: pandas.DataFrame """ from rdkit import Chem df = col.defaultdict(list) if add_hs: mol = Chem.AddHs(mol, addCoords=True) conf = mol.GetConformer() for i, a in enumerate(mol.GetAtoms()): position = conf.GetAtomPosition(i) df['ensemble'].append(ensemble) df['structure'].append(structure) df['model'].append(model) df['chain'].append('LIG') df['hetero'].append('') df['insertion_code'].append('') df['residue'].append(residue) df['segid'].append('') df['resname'].append('LIG') df['altloc'].append('') df['occupancy'].append(1) df['bfactor'].append(0) df['x'].append(position.x) df['y'].append(position.y) df['z'].append(position.z) df['element'].append(a.GetSymbol().upper()) df['serial_number'].append(i) df = pd.DataFrame(df) # Make up atom names elements = df['element'].unique() el_count = {} for e in elements: el_count[e] = 0 new_name = [] for el in df['element']: el_count[el] += 1 new_name.append('%s%i'%(el,el_count[el])) df['name'] = new_name df['fullname'] = new_name return df def read_xyz(xyz_file, name=None, gdb=False): """Read an XYZ file into Biopython representation (optionally with GDB9-specific data) :param inputfile: Path to input file in XYZ format. :type inputfile: Union[str, Path] :param gdb_data: Specifies whether to process and return GDB9-specific data. :type gdb_date: bool :return: If `gdb=False`, returns Biopython Structure object containing molecule structure. If `gdb=True`, returns tuple containing \n \t- bp (Bio.PDB.Structure): Biopython object containing molecule structure.\n \t- data (list[float]): Scalar molecular properties.\n \t- freq (list[float]): Harmonic vibrational frequencies (:math:`3n_{atoms}−5` or :math:`3n_{atoms}-6`, in :math:`cm^{−1}`).\n \t- smiles (str): SMILES string from GDB-17 and from B3LYP relaxation.\n \t- inchi (str): InChI string for Corina and B3LYP geometries. """ # Load the xyz file into a dataframe if gdb: df, data, freq, smiles, inchi = read_xyz_to_df(xyz_file, gdb_data=True) else: df = read_xyz_to_df(xyz_file) if name is not None: df.index.name = name # Make up atom names elements = df['element'].unique() el_count = {} for e in elements: el_count[e] = 0 new_name = [] for el in df['element']: el_count[el] += 1 new_name.append('%s%i'%(el,el_count[el])) # Fill additional fields df['ensemble'] = [df.name.replace(' ','_')]*len(df) df['subunit'] = [0]*len(df) df['structure'] = [df.name.replace(' ','_')]*len(df) df['model'] = [0]*len(df) df['chain'] = ['L']*len(df) df['hetero'] = ['']*len(df) df['insertion_code'] = ['']*len(df) df['residue'] = [1]*len(df) df['segid'] = ['LIG']*len(df) df['resname'] = ['LIG']*len(df) df['altloc'] = ['']*len(df) df['occupancy'] = [1.]*len(df) df['bfactor'] = [0.]*len(df) df['name'] = new_name df['fullname'] = new_name df['serial_number'] = range(len(df)) # Convert to biopython representation bp = df_to_bp(df) if gdb: return bp, data, freq, smiles, inchi else: return bp def read_xyz_to_df(inputfile, gdb_data=False): """Read an XYZ file into Pandas DataFrame representation (optionally with GDB9-specific data) :param inputfile: Path to input file in XYZ format. :type inputfile: Union[str, Path] :param gdb_data: Specifies whether to process and return GDB9-specific data. :type gdb_date: bool :return: If `gdb=False`, returns DataFrame containing molecule structure. If `gdb=True`, returns tuple containing\n \t- molecule (pandas.DataFrame): Pandas DataFrame containing molecule structure.\n \t- data (list[float]): Scalar molecular properties. Returned only when `gdb=True`.\n \t- freq (list[float]): Harmonic vibrational frequencies (:math:`3n_{atoms}−5` or :math:`3n_{atoms}-6`, in :math:`cm^{−1}`). Returned only when `gdb=True`.\n \t- smiles (str): SMILES string from GDB-17 and from B3LYP relaxation. Returned only when `gdb=True`.\n \t- inchi (str): InChI string for Corina and B3LYP geometries. Returned only when `gdb=True`.\n """ with open(inputfile) as f: # Reading number of atoms in the molecule num_atoms = int(f.readline().strip()) # Loading GDB ID and label data line_labels = f.readline().strip().split('\t') name = line_labels[0] if gdb_data: data = [float(ll) for ll in line_labels[1:]] # Skip atom data (will be read using pandas below) for n in range(num_atoms): f.readline() # Harmonic vibrational frequencies if gdb_data: freq = [float(ll) for ll in f.readline().strip().split('\t')] # SMILES and InChI if gdb_data: smiles = f.readline().strip().split('\t')[0] if gdb_data: inchi = f.readline().strip().split('\t')[0] # Define columns: element, x, y, z, Mulliken charges (GDB only) columns = ['element','x', 'y', 'z'] if gdb_data: columns += ['charge'] # Load atom information molecule = pd.read_table(inputfile, names=columns, skiprows=2, nrows=num_atoms, delim_whitespace=True) # Name the dataframe molecule.name = name molecule.index.name = name # return molecule info if gdb_data: return molecule, data, freq, smiles, inchi else: return molecule # -- WRITING FILES -- def write_pdb(out_file, structure, **kwargs): """Write a biopython structure to a pdb file. This function accepts any viable arguments to Bio.PDB.PDBIO.save() as keyword arguments. :param out_file: Path to output PDB file. :type out_file: Union[str, Path] :param structure: Biopython object containing protein structure. :type structure: Bio.PDB.Structure """ io = Bio.PDB.PDBIO() io.set_structure(structure) io.save(out_file, **kwargs) return def write_mmcif(out_file, structure, **kwargs): """Write a biopython structure to an mmcif file. This function accepts any viable arguments to Bio.PDB.MMCIFIO.save() as keyword arguments. :param out_file: Path to output mmCIF file. :type out_file: Union[str, Path] :param structure: Biopython object containing protein structure. :type structure: Bio.PDB.structure """ io = Bio.PDB.MMCIFIO() io.set_structure(structure) io.save(out_file) return # -- CONVENIENCE FUNCTIONS AND CONSTANTS-- # (for custom data conversions) atomic_number = {'H': 1, 'He': 2, 'Li': 3, 'Be': 4, 'B': 5, 'C': 6, 'N': 7, 'O': 8, 'F': 9, 'Ne': 10, 'Na': 11, 'Mg': 12, 'Al': 13, 'Si': 14, 'P': 15, 'S': 16, 'Cl': 17, 'Ar': 18, 'K': 19, 'Ca': 20, 'Sc': 21, 'Ti': 22, 'V': 23, 'Cr': 24, 'Mn': 25, 'Fe': 26, 'Co': 27, 'Ni': 28, 'Cu': 29, 'Zn': 30, 'Ga': 31, 'Ge': 32, 'As': 33, 'Se': 34, 'Br': 35, 'Kr': 36, 'Rb': 37, 'Sr': 38, 'Y': 39, 'Zr': 40, 'Nb': 41, 'Mo': 42, 'Tc': 43, 'Ru': 44, 'Rh': 45, 'Pd': 46, 'Ag': 47, 'Cd': 48, 'In': 49, 'Sn': 50, 'Sb': 51, 'Te': 52, 'I': 53, 'Xe': 54, 'Cs': 55, 'Ba': 56, 'La': 57, 'Ce': 58, 'Pr': 59, 'Nd': 60, 'Pm': 61, 'Sm': 62, 'Eu': 63, 'Gd': 64, 'Tb': 65, 'Dy': 66, 'Ho': 67, 'Er': 68, 'Tm': 69, 'Yb': 70, 'Lu': 71, 'Hf': 72, 'Ta': 73, 'W': 74, 'Re': 75, 'Os': 76, 'Ir': 77, 'Pt': 78, 'Au': 79, 'Hg': 80, 'Tl': 81, 'Pb': 82, 'Bi': 83, 'Po': 84, 'At': 85, 'Rn': 86, 'Fr': 87, 'Ra': 88, 'Ac': 89, 'Th': 90, 'Pa': 91, 'U': 92, 'Np': 93, 'Pu': 94, 'Am': 95, 'Cm': 96, 'Bk': 97, 'Cf': 98, 'Es': 99, 'Fm': 100, 'Md': 101, 'No': 102, 'Lr': 103, 'Rf': 104, 'Db': 105, 'Sg': 106, 'Bh': 107, 'Hs': 108, 'Mt': 109, 'Ds': 110, 'Rg': 111, 'Cn': 112, 'Nh': 113, 'Fl': 114, 'Mc': 115, 'Lv': 116, 'Ts': 117, 'Og': 118} def get_coordinates_from_df(df): """Extract XYZ coordinates from molecule in dataframe format. :param df: Dataframe containing molecular structure. Must have columns named `x`, `y`, and `z`. :type df: pandas.DataFrame :return: XYZ coordinates as N x 3 array :rtype: numpy.ndarray """ xyz = np.empty([len(df), 3]) xyz[:, 0] = np.array(df.x) xyz[:, 1] = np.array(df.y) xyz[:, 2] = np.array(df.z) return xyz def get_coordinates_of_conformer(mol): """Reads the coordinates of a conformer. :params mol: Molecule in RDKit format. :type mol: rdkit.Chem.rdchem.Mol :return: XYZ coordinates of molecule as N x 3 float array. :rtype: numpy.ndarray """ symb = [a.GetSymbol() for a in mol.GetAtoms()] conf = mol.GetConformer() xyz = np.empty([mol.GetNumAtoms(), 3]) for ia, name in enumerate(symb): position = conf.GetAtomPosition(ia) xyz[ia] = np.array([position.x, position.y, position.z]) return xyz def get_connectivity_matrix_from_mol(mol): """Calculates the binary bond connectivity matrix from a molecule. :param mol: Molecule in RDKit format. :type mol: rdkit.Chem.rdchem.Mol :return: Binary connectivity matrix (N x N) containing all molecular bonds. :rtype: numpy.ndarray """ # Initialization num_at = mol.GetNumAtoms() connect_matrix = np.zeros([num_at, num_at], dtype=int) # Go through all atom pairs and check for bonds between them for a in mol.GetAtoms(): for b in mol.GetAtoms(): bond = mol.GetBondBetweenAtoms(a.GetIdx(), b.GetIdx()) if bond is not None: connect_matrix[a.GetIdx(), b.GetIdx()] = 1 return connect_matrix def get_bonds_matrix_from_mol(mol): """ Calculates matrix of bond types from a molecule and returns as numpy array. Bond types are encoded as double: single bond (1.0) double bond (2.0) triple bond (3.0) aromatic bond (1.5). :param mol: Molecule in RDKit format. :type mol: rdkit.Chem.rdchem.Mol :return: Bond matrix (N x N) with bond types encoded as double. :rtype: numpy.ndarray """ # Initialization num_at = mol.GetNumAtoms() bonds_matrix = np.zeros([num_at, num_at]) # Go through all atom pairs and check for bonds between them for a in mol.GetAtoms(): for b in mol.GetAtoms(): bond = mol.GetBondBetweenAtoms(a.GetIdx(), b.GetIdx()) if bond is not None: bt = bond.GetBondTypeAsDouble() bonds_matrix[a.GetIdx(), b.GetIdx()] = bt return bonds_matrix def get_bonds_list_from_mol(mol): """ Calculates all bonds and bond types from a molecule and returns as dataframe. Bond types are encoded as double: single bond (1.0) double bond (2.0) triple bond (3.0) aromatic bond (1.5). :param mol: Molecule in RDKit format. :type mol: rdkit.Chem.rdchem.Mol :return: Bond information as dataframe with columns `atom1`, `atom2`, `type`. :rtype: pandas.DataFrame """ bonds_list = [] for b in mol.GetBonds(): atom1 = b.GetBeginAtomIdx() atom2 = b.GetEndAtomIdx() btype = b.GetBondTypeAsDouble() bonds_list.append([atom1,atom2,btype]) col = ['atom1','atom2','type'] bonds_df = pd.DataFrame(bonds_list, columns=col) return bonds_df
34.427481
205
0.613082
0
0
0
0
0
0
0
0
11,213
0.497074
acb710e7d527ab9e28db345a225760bb21d72924
304
py
Python
py_basic/acc_in.py
M1NH42/py
792616d08013b46011db8ed6b56e40b7d9859ae2
[ "MIT" ]
null
null
null
py_basic/acc_in.py
M1NH42/py
792616d08013b46011db8ed6b56e40b7d9859ae2
[ "MIT" ]
null
null
null
py_basic/acc_in.py
M1NH42/py
792616d08013b46011db8ed6b56e40b7d9859ae2
[ "MIT" ]
null
null
null
# About: Implementation of the accumulator program # in python 3 specialization # ask to enter string phrase = input("Enter a string: ") # initialize total variable with zero tot = 0 # iterate through the string for char in phrase : if char != " " : tot += 1 # print the result print(tot)
19
50
0.6875
0
0
0
0
0
0
0
0
203
0.667763
acb7128eb35a5bcc7a3788d8062b0bad5eee5296
2,014
py
Python
general/hash_algo.py
RakeshRam/algorithm-python
80eb48bfdb98aa156fdb39a40b1f98840cddfeb9
[ "MIT" ]
null
null
null
general/hash_algo.py
RakeshRam/algorithm-python
80eb48bfdb98aa156fdb39a40b1f98840cddfeb9
[ "MIT" ]
null
null
null
general/hash_algo.py
RakeshRam/algorithm-python
80eb48bfdb98aa156fdb39a40b1f98840cddfeb9
[ "MIT" ]
null
null
null
class MapHash: def __init__(self, maxsize=6): self.maxsize = maxsize # Real scenario 64. self.hash = [None] * self.maxsize # Will be a 2D list def _get_hash_key(self, key): hash = sum(ord(k) for k in key) return hash % self.maxsize def add(self, key, value): hash_key = self._get_hash_key(key) # Hash key hash_value = [key, value] # ADD if self.hash[hash_key] is None: self.hash[hash_key] = [hash_value] # Key-value(IS a list) return True else: # Update for pair in self.hash[hash_key]: # Update-value if pair[0] == key: pair[1] = value # Update-value return True # append new Key in same hash key self.hash[hash_key].append(hash_value) def get(self, key): hash_key = self._get_hash_key(key) if self.hash[hash_key] is not None: for k, v in self.hash[hash_key]: if k == key: return v return "Key Error" def delete(self, key): hash_key = self._get_hash_key(key) if self.hash[hash_key] is not None: for i in range(len(self.hash[hash_key])): if self.hash[hash_key][i][0] == key: self.hash[hash_key].pop(i) return True else: return "Key Error" def __str__(self): return str(self.hash) def pprint(self): for item in self.hash: if item is not None: print(str(item)) data = MapHash() data.add('CaptainAmerica', '567-8888') data.add('Thor', '293-6753') data.add('Thor', '333-8233') data.add('IronMan', '293-8625') data.add('BlackWidow', '852-6551') data.add('Hulk', '632-4123') data.add('Spiderman', '567-2188') data.add('BlackPanther', '777-8888') print(data) print(data.get('BlackPanther')) print(data.delete('BlackPanther')) print(data.pprint())
30.515152
71
0.546177
1,629
0.808838
0
0
0
0
0
0
354
0.17577
acbbc4f8e25d9363036a7e6c45e12fa4a283bea3
983
py
Python
args.py
fang1fan/m5-python-starter
434bcd701c04707e5a5c3ed07ee51d0a66687dfc
[ "Apache-2.0" ]
1
2021-01-15T01:45:58.000Z
2021-01-15T01:45:58.000Z
args.py
fang1fan/m5-python-starter
434bcd701c04707e5a5c3ed07ee51d0a66687dfc
[ "Apache-2.0" ]
null
null
null
args.py
fang1fan/m5-python-starter
434bcd701c04707e5a5c3ed07ee51d0a66687dfc
[ "Apache-2.0" ]
null
null
null
import argparse parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--d_model', type=int, default=0, help='d_model') parser.add_argument('--d_head', type=int, default=2, help='head') parser.add_argument('--d_inner', type=bool, default=True, help='inner layers') parser.add_argument('--n_token', type=str, default='roberta-base', help='number of tokens') parser.add_argument('--n_layer', type=str, default='gru', help='number of hidden layers') parser.add_argument('--n_head', type=int, default=2, help='num attention heads') parser.add_argument('--dropout', type=int, default=1024, help='dropout') parser.add_argument('--dropatt', type=int, default=0.5, help='dropatt') parser.add_argument('--attention_dropout_prob', type=int, default=1024, help='attention_dropout_prob') parser.add_argument('--output_dropout_prob', type=int, default=0.5, help='output_dropout_prob') args = parser.parse_args() args = vars(args)
44.681818
102
0.755849
0
0
0
0
0
0
0
0
310
0.315361
acbe635e277d91246135b52d895f25061519435c
628
py
Python
maxblog/apps/posts/forms.py
masich/maxblog
27807ba06415de7d06bdca3a2a1d7135f09ce612
[ "MIT" ]
null
null
null
maxblog/apps/posts/forms.py
masich/maxblog
27807ba06415de7d06bdca3a2a1d7135f09ce612
[ "MIT" ]
null
null
null
maxblog/apps/posts/forms.py
masich/maxblog
27807ba06415de7d06bdca3a2a1d7135f09ce612
[ "MIT" ]
null
null
null
from django.forms import forms, ModelForm from .models import Comment, Post class CommentForm(ModelForm): class Meta: model = Comment fields = ['author_name', 'text'] labels = { 'author_name': 'Your name', 'text': 'Comment' } class PostForm(ModelForm): class Meta: model = Post fields = ['authors', 'title', 'text', 'tags', 'section'] labels = { 'authors': 'Post authors', 'title': 'Post title', 'text': 'Post text', 'tags': 'Post tags', 'section': 'Post section', }
23.259259
64
0.506369
545
0.867834
0
0
0
0
0
0
194
0.308917
acc035b9369687f59419626c7212f23a2d9ed6d2
12,257
py
Python
core/gdrn_selfocc_modeling/tools/dataset_utils.py
4PiR2/SO-Pose
a3a61d2c97b1084a4754d6c12e45e16d85809729
[ "Apache-2.0" ]
32
2021-08-23T02:07:28.000Z
2022-03-22T08:51:07.000Z
core/gdrn_selfocc_modeling/tools/dataset_utils.py
Pamyuu/SO-Pose
a3a61d2c97b1084a4754d6c12e45e16d85809729
[ "Apache-2.0" ]
13
2021-09-28T02:23:19.000Z
2022-03-31T03:23:10.000Z
core/gdrn_selfocc_modeling/tools/dataset_utils.py
Pamyuu/SO-Pose
a3a61d2c97b1084a4754d6c12e45e16d85809729
[ "Apache-2.0" ]
11
2021-08-21T05:57:18.000Z
2022-03-23T22:31:53.000Z
import copy import logging import numpy as np import operator import pickle import random import mmcv import torch import torch.multiprocessing as mp import torch.utils.data as data from torch.utils.data import dataloader from detectron2.utils.serialize import PicklableWrapper from detectron2.data.build import worker_init_reset_seed, get_detection_dataset_dicts from detectron2.data.common import AspectRatioGroupedDataset, DatasetFromList, MapDataset from detectron2.data.dataset_mapper import DatasetMapper from detectron2.data.samplers import InferenceSampler, RepeatFactorTrainingSampler, TrainingSampler from detectron2.data import DatasetCatalog, MetadataCatalog from detectron2.structures import BoxMode import ref from . import my_comm as comm logger = logging.getLogger(__name__) def flat_dataset_dicts(dataset_dicts): """ flatten the dataset dicts of detectron2 format original: list of dicts, each dict contains some image-level infos and an "annotations" field for instance-level infos of multiple instances => flat the instance level annotations flat format: list of dicts, each dict includes the image/instance-level infos an `inst_id` of a single instance, `inst_infos` includes only one instance """ new_dicts = [] for dataset_dict in dataset_dicts: img_infos = {_k: _v for _k, _v in dataset_dict.items() if _k not in ["annotations"]} if "annotations" in dataset_dict: for inst_id, anno in enumerate(dataset_dict["annotations"]): rec = {"inst_id": inst_id, "inst_infos": anno} rec.update(img_infos) new_dicts.append(rec) else: rec = img_infos new_dicts.append(rec) return new_dicts def filter_invalid_in_dataset_dicts(dataset_dicts, visib_thr=0.0): """ filter invalid instances in the dataset_dicts (for train) Args: visib_thr: """ num_filtered = 0 new_dicts = [] for dataset_dict in dataset_dicts: new_dict = {_k: _v for _k, _v in dataset_dict.items() if _k not in ["annotations"]} if "annotations" in dataset_dict: new_annos = [] for inst_id, anno in enumerate(dataset_dict["annotations"]): if anno.get("visib_fract", 1.0) > visib_thr: new_annos.append(anno) else: num_filtered += 1 if len(new_annos) == 0: continue new_dict["annotations"] = new_annos new_dicts.append(new_dict) if num_filtered > 0: logger.warning(f"filtered out {num_filtered} instances with visib_fract <= {visib_thr}") return new_dicts def trivial_batch_collator(batch): """A batch collator that does nothing. https://github.com/pytorch/fairseq/issues/1171 """ dataloader._use_shared_memory = False return batch def filter_empty_dets(dataset_dicts): """ Filter out images with empty detections NOTE: here we assume detections are in "annotations" Args: dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. Returns: list[dict]: the same format, but filtered. """ num_before = len(dataset_dicts) def valid(anns): if len(anns) > 0: return True # for ann in anns: # if ann.get("iscrowd", 0) == 0: # return True return False dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])] num_after = len(dataset_dicts) if num_after < num_before: logger = logging.getLogger(__name__) logger.warning( "Removed {} images with empty detections. {} images left.".format(num_before - num_after, num_after) ) return dataset_dicts def load_detections_into_dataset( dataset_name, dataset_dicts, det_file, top_k_per_obj=1, score_thr=0.0, train_objs=None, top_k_per_im=None, ): """Load test detections into the dataset. Args: dataset_name (str): dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. det_file (str): file path of pre-computed detections, in json format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger.info("Loading detections for {} from: {}".format(dataset_name, det_file)) detections = mmcv.load(det_file) meta = MetadataCatalog.get(dataset_name) objs = meta.objs ref_key = meta.ref_key data_ref = ref.__dict__[ref_key] models_info = data_ref.get_models_info() if "annotations" in dataset_dicts[0]: logger.warning("pop the original annotations, load detections") new_dataset_dicts = [] for i, record_ori in enumerate(dataset_dicts): record = copy.deepcopy(record_ori) scene_im_id = record["scene_im_id"] if scene_im_id not in detections: # not detected logger.warning(f"no detections found in {scene_im_id}") continue dets_i = detections[scene_im_id] annotations = [] obj_annotations = {obj: [] for obj in objs} for det in dets_i: obj_id = det["obj_id"] bbox_est = det["bbox_est"] # xywh time = det.get("time", 0.0) score = det.get("score", 1.0) if score < score_thr: continue obj_name = data_ref.id2obj[obj_id] if obj_name not in objs: # detected obj is not interested continue if train_objs is not None: # not in trained objects if obj_name not in train_objs: continue label = objs.index(obj_name) inst = { "category_id": label, "bbox_est": bbox_est, "bbox_mode": BoxMode.XYWH_ABS, "score": score, "time": time, "model_info": models_info[str(obj_id)], # TODO: maybe just load this in the main function } obj_annotations[obj_name].append(inst) for obj, cur_annos in obj_annotations.items(): scores = [ann["score"] for ann in cur_annos] sel_annos = [ann for _, ann in sorted(zip(scores, cur_annos), key=lambda pair: pair[0], reverse=True)][ :top_k_per_obj ] annotations.extend(sel_annos) # NOTE: maybe [], no detections record["annotations"] = annotations new_dataset_dicts.append(record) if len(new_dataset_dicts) < len(dataset_dicts): logger.warning( "No detections found in {} images. original: {} imgs, left: {} imgs".format( len(dataset_dicts) - len(new_dataset_dicts), len(dataset_dicts), len(new_dataset_dicts) ) ) return new_dataset_dicts def load_init_poses_into_dataset( dataset_name, dataset_dicts, init_pose_file, top_k_per_obj=1, score_thr=0.0, train_objs=None, top_k_per_im=None, ): """Load initial poses into the dataset. Args: dataset_name (str): dataset_dicts (list[dict]): annotations in Detectron2 Dataset format. init_pose_file (str): file path of pre-computed initial poses, in json format. Returns: list[dict]: the same format as dataset_dicts, but added proposal field. """ logger.info("Loading initial poses for {} from: {}".format(dataset_name, init_pose_file)) init_det_poses = mmcv.load(init_pose_file) meta = MetadataCatalog.get(dataset_name) objs = meta.objs ref_key = meta.ref_key data_ref = ref.__dict__[ref_key] models_info = data_ref.get_models_info() if "annotations" in dataset_dicts[0]: logger.warning("pop the original annotations, load initial poses") for record in dataset_dicts: scene_im_id = record["scene_im_id"] dets_i = init_det_poses[scene_im_id] annotations = [] obj_annotations = {obj: [] for obj in objs} for det in dets_i: obj_id = det["obj_id"] # NOTE: need to prepare init poses into this format pose_est = np.array(det["pose_est"], dtype=np.float32).reshape(3, 4) bbox_est = det.get("bbox_est", None) # xywh or None time = det.get("time", 0.0) score = det.get("score", 1.0) if score < score_thr: continue obj_name = data_ref.id2obj[obj_id] if obj_name not in objs: # detected obj is not interested continue if train_objs is not None: # not in trained objects if obj_name not in train_objs: continue label = objs.index(obj_name) inst = { "category_id": label, "pose_est": pose_est, "score": score, "time": time, "model_info": models_info[str(obj_id)], # TODO: maybe just load this in the main function } if bbox_est is not None: # if None, compute bboxes from poses and 3D points later inst["bbox_est"] = bbox_est inst["bbox_mode"] = BoxMode.XYWH_ABS obj_annotations[obj_name].append(inst) for obj, cur_annos in obj_annotations.items(): scores = [ann["score"] for ann in cur_annos] sel_annos = [ann for _, ann in sorted(zip(scores, cur_annos), key=lambda pair: pair[0], reverse=True)][ :top_k_per_obj ] annotations.extend(sel_annos) # NOTE: maybe [], no initial poses record["annotations"] = annotations return dataset_dicts def my_build_batch_data_loader(dataset, sampler, total_batch_size, *, aspect_ratio_grouping=False, num_workers=0): """Build a batched dataloader for training. Args: dataset (torch.utils.data.Dataset): map-style PyTorch dataset. Can be indexed. sampler (torch.utils.data.sampler.Sampler): a sampler that produces indices total_batch_size, aspect_ratio_grouping, num_workers): see :func:`build_detection_train_loader`. Returns: iterable[list]. Length of each list is the batch size of the current GPU. Each element in the list comes from the dataset. """ world_size = comm.get_world_size() assert ( total_batch_size > 0 and total_batch_size % world_size == 0 ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(total_batch_size, world_size) batch_size = total_batch_size // world_size # Horovod: limit # of CPU threads to be used per worker. if num_workers > 0: torch.set_num_threads(num_workers) kwargs = {"num_workers": num_workers} # When supported, use 'forkserver' to spawn dataloader workers instead of 'fork' to prevent # issues with Infiniband implementations that are not fork-safe # https://github.com/horovod/horovod/blob/master/examples/pytorch/pytorch_imagenet_resnet50.py # if (num_workers > 0 and hasattr(mp, '_supports_context') and # mp._supports_context and 'forkserver' in mp.get_all_start_methods()): # kwargs['multiprocessing_context'] = 'forkserver' if aspect_ratio_grouping: data_loader = torch.utils.data.DataLoader( dataset, sampler=sampler, batch_sampler=None, collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements worker_init_fn=worker_init_reset_seed, #pin_memory=True, **kwargs, ) # yield individual mapped dict return AspectRatioGroupedDataset(data_loader, batch_size) else: batch_sampler = torch.utils.data.sampler.BatchSampler( sampler, batch_size, drop_last=True ) # drop_last so the batch always have the same size return torch.utils.data.DataLoader( dataset, batch_sampler=batch_sampler, collate_fn=trivial_batch_collator, worker_init_fn=worker_init_reset_seed, #pin_memory=True, **kwargs, )
36.263314
115
0.632781
0
0
0
0
0
0
0
0
4,188
0.341682
acc03fcb53c4b1a3e12c7dd2807ba7725140ab62
4,204
py
Python
tests/settings.py
tanjibpa/django-coconut
456add56cd2bf851a245337cfbf71c6587d1bf98
[ "BSD-2-Clause" ]
null
null
null
tests/settings.py
tanjibpa/django-coconut
456add56cd2bf851a245337cfbf71c6587d1bf98
[ "BSD-2-Clause" ]
null
null
null
tests/settings.py
tanjibpa/django-coconut
456add56cd2bf851a245337cfbf71c6587d1bf98
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # # django-coconuts # Copyright (c) 2008-2017, Jeremy Lainé # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import getpass import os DEBUG = True ADMINS = ( # ('Your Name', 'your_email@example.com'), ) DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': '', # Or path to database file if using sqlite3. } } # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.4/ref/settings/#allowed-hosts ALLOWED_HOSTS = [] # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Make this unique, and don't share it with anybody. SECRET_KEY = 'i!8c$-3sc+6+t$rma%l6(ux9ma8pq7f!h03=y!kx_hdf#*!y%o' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.dirname(__file__) + '/../templates', ] } ] MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'tests.urls' INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'coconuts', ) # FIXME: make this windows-friendly tmp_dir = '/tmp/coconuts.test.%s' % getpass.getuser() COCONUTS_CACHE_ROOT = os.path.join(tmp_dir, 'cache') COCONUTS_DATA_ROOT = os.path.join(tmp_dir, 'data')
35.627119
85
0.732873
0
0
0
0
0
0
0
0
3,504
0.833294
acc0ce4ad97f2f6c746aca5ab1091cc7bf3d7762
3,419
py
Python
models/oomusic_transcoder.py
nicolasmartinelli/oomusic
376618eb61a64aae9d04db21f42ce859c1951f22
[ "MIT" ]
4
2018-03-04T19:11:20.000Z
2020-01-13T18:23:15.000Z
models/oomusic_transcoder.py
nicolasmartinelli/oomusic
376618eb61a64aae9d04db21f42ce859c1951f22
[ "MIT" ]
5
2016-10-10T12:46:14.000Z
2017-02-01T22:14:39.000Z
models/oomusic_transcoder.py
DocMarty84/oomusic
376618eb61a64aae9d04db21f42ce859c1951f22
[ "MIT" ]
3
2017-09-04T02:58:41.000Z
2019-09-17T08:10:29.000Z
# -*- coding: utf-8 -*- import datetime import os import subprocess from odoo import fields, models from odoo.tools import OrderedSet class MusicTranscoder(models.Model): _name = "oomusic.transcoder" _description = "Music Transcoder" _order = "sequence" name = fields.Char("Transcoder Name", required=True) sequence = fields.Integer( default=10, help="Sequence used to order the transcoders. The lower the value, the higher the priority", ) command = fields.Char( "Command line", required=True, help="""Command to execute for transcoding. Specific keywords are automatically replaced: - "%i": input file - "%s": start from this seek time - "%b": birate for output file - "%n": extra parameter for normaliation """, ) bitrate = fields.Integer( "Bitrate/Quality", required=True, help="""Default bitrate or quality (for VBR). Can be changed if necessary when the transcoding function is called.""", ) black_formats = fields.Many2many( "oomusic.format", string="Blacklisted Formats", index=True, help="Input formats which cannot be converted using this transcoder.", ) output_format = fields.Many2one("oomusic.format", string="Output Format", required=True) buffer_size = fields.Integer( "Buffer Size (KB)", required=True, default=200, help="""Size of the buffer used while streaming. A larger value can reduce the potential file download errors when playing, but will increase the waiting delay when switching songs. The default value (200 KB) should be a good compromise between waiting delay and download stability. A large value (e.g. 20000 KB) will ensure the complete download of the file before playing.""", ) def transcode(self, track_id, bitrate=0, seek=0, norm=False): """ Method used to transcode a track. It takes in charge the replacement of the specific keywords of the command, and returns the subprocess executed. The subprocess output is redirected to stdout, so it is possible to stream the transcoding result while it is still ongoing. :param track_id: ID of the track to transcode :param bitrate: value of the bitrate for the output file. Optional field aimed to override the default value :param seek: start time for the encoding :returns: subprocess redirected to stdout. :rtype: subprocess.Popen """ self.ensure_one() Track = self.env["oomusic.track"].browse([track_id]) cmd = ( self.command.replace("%s", "%s" % (str(datetime.timedelta(seconds=seek)))) .replace("%b", "%d" % (bitrate or self.bitrate)) .replace("%n", "-af loudnorm=I=-18" if norm else "") .replace("%f", "%s" % os.path.splitext(Track.path)[1][1:]) ) cmd = [c for c in cmd.split(" ") if c] cmd[cmd.index("%i")] = Track.path proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=open(os.devnull, "w")) return proc def _get_browser_output_formats(self): return OrderedSet( self.search([("output_format", "in", ["opus", "ogg", "mp3"])]).mapped( "output_format.name" ) )
37.988889
100
0.623574
3,280
0.959345
0
0
0
0
0
0
1,905
0.55718
acc1831c2a1513db9dd276b866be14f74d8ab6db
341
py
Python
Chapter04/CNN_1.py
PacktPublishing/Practical-Convolutional-Neural-Networks
fabffa7f8afa8986a3f2e0756ec8bc4c12836eb9
[ "MIT" ]
23
2018-03-22T21:30:32.000Z
2022-01-02T13:26:34.000Z
Chapter04/CNN_1.py
huanghanchi/Practical-Convolutional-Neural-Networks
365aa803d38316ed9749e4c8c0f3ae2667788781
[ "MIT" ]
2
2018-05-21T04:53:34.000Z
2019-03-05T13:04:34.000Z
Chapter04/CNN_1.py
huanghanchi/Practical-Convolutional-Neural-Networks
365aa803d38316ed9749e4c8c0f3ae2667788781
[ "MIT" ]
17
2018-03-12T12:00:19.000Z
2022-02-22T16:36:36.000Z
#Refer AlexNet implementation code, returns last fully connected layer fc7 = AlexNet(resized, feature_extract=True) shape = (fc7.get_shape().as_list()[-1], 43) fc8_weight = tf.Variable(tf.truncated_normal(shape, stddev=1e-2)) fc8_b = tf.Variable(tf.zeros(43)) logits = tf.nn.xw_plus_b(fc7, fc8_weight, fc8_b) probs = tf.nn.softmax(logits)
34.1
70
0.756598
0
0
0
0
0
0
0
0
70
0.205279
acc1ce36377278a085395e3d7d5f5eee13f3cf3a
169
py
Python
sexyhotel/venv/Scripts/django-admin.py
JesseChanCN/sexyHotel
b0e2362073fe6fa5bd551f96a9a45f1e26f3d353
[ "BSD-2-Clause" ]
null
null
null
sexyhotel/venv/Scripts/django-admin.py
JesseChanCN/sexyHotel
b0e2362073fe6fa5bd551f96a9a45f1e26f3d353
[ "BSD-2-Clause" ]
1
2021-10-02T22:17:39.000Z
2021-10-02T22:17:39.000Z
sexyhotel/venv/Scripts/django-admin.py
JesseChanCN/sexyHotel
b0e2362073fe6fa5bd551f96a9a45f1e26f3d353
[ "BSD-2-Clause" ]
4
2018-11-26T03:59:52.000Z
2018-11-26T08:30:54.000Z
#!C:\Users\Administrator\Desktop\装饰器代码\venv\Scripts\python.exe from django.core import management if __name__ == "__main__": management.execute_from_command_line()
28.166667
62
0.792899
0
0
0
0
0
0
0
0
82
0.458101
acc201e9c32baaef93d15355f2aee82e6196773e
11,704
py
Python
fuzz.py
codebyzen/smart-url-fuzzer
de611628fa7ab445a23f438bfccf3a7b4ecf840a
[ "Apache-2.0" ]
38
2018-06-04T14:37:13.000Z
2022-03-02T16:38:46.000Z
fuzz.py
codebyzen/smart-url-fuzzer
de611628fa7ab445a23f438bfccf3a7b4ecf840a
[ "Apache-2.0" ]
7
2018-06-04T15:21:15.000Z
2021-07-23T09:40:31.000Z
fuzz.py
codebyzen/smart-url-fuzzer
de611628fa7ab445a23f438bfccf3a7b4ecf840a
[ "Apache-2.0" ]
19
2020-02-20T12:17:46.000Z
2022-01-27T17:15:28.000Z
import logging import sys import os from logging.handlers import RotatingFileHandler from multiprocessing.pool import ThreadPool from optparse import OptionParser import requests from requests.packages import urllib3 urllib3.disable_warnings() # Workers configurations ASYNC_WORKERS_COUNT = 100 # How many threads will make http requests. WORKERS_DECREMENTED_COUNT_ON_ERROR = 10 # Retry the fuzzing with x less workers, to decrease the load on the server. STARTED_JOB_LOG_INTERVAL = 100 # Every x started jobs, a log will be written # IO Configurations DEFAULT_PATHS_LIST_FILE = 'words_lists/Filenames_or_Directories_Common.wordlist' VALID_ENDPOINTS_FILE = 'endpoints.txt' # HTTP Configuration RESOURCE_EXISTS_STATUS_CODES = list(range(200, 300)) + [401, 402, 403] DEFAULT_BASE_URL = 'https://www.example.com' # Logging configurations LOGS_DIRECTORY_FULL_NAME = 'logs' LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s' LOGGING_LEVEL = logging.INFO BACKUP_LOGS_FILES_COUNT = 5 FUZZING_LOGGER_NAME = 'fuzzing' LOG_FILE_MAX_BYTES = 0.5 * 1000 * 1000 # 500 KB class FilesFactory(object): """ Manage files and directories """ files = [] urls = [] def read_files_from_directory(self, user_path): self.files = [os.path.join(user_path, f) for f in os.listdir(user_path) if os.path.isfile(os.path.join(user_path, f))] def read_lines_from_files(self): for l in self.files: h = open(l, 'r') self.urls += h.read().splitlines() def __init__(self,user_path): if os.path.isdir(user_path): self.read_files_from_directory(user_path) self.read_lines_from_files() elif(os.path.isfile(user_path)): self.files.append(user_path) self.read_lines_from_files() class LoggerFactory(object): """ Manages loggers """ loggers = {} logging_level = LOGGING_LEVEL logging.basicConfig(stream=sys.stdout, level=logging_level, format=LOG_FORMAT) # Modifying the logger's level to ERROR to prevent console spam logging.getLogger('urllib3').setLevel(logging.WARNING) @staticmethod def get_logger(logger_name): """ Gets a logger by it's name. Created the logger if it don't exist yet. :param logger_name: The name of the logger (identifier). :return: The logger instance. :returns: Logger """ if logger_name not in LoggerFactory.loggers: LoggerFactory.loggers[logger_name] = LoggerFactory._get_logger(logger_name) return LoggerFactory.loggers[logger_name] @staticmethod def _get_logger(logger_name, logs_directory_path=LOGS_DIRECTORY_FULL_NAME): """ Creates a logger with rolling file handler, Or returns the logger if it already exists. :param logger_name: The name of the logger :param logs_directory_path: The path of the directory that the logs will be written to. :return: An initialized logger instance. returns: Logger """ # Creating the logs folder if its doesn't exist if not os.path.exists(logs_directory_path): os.mkdir(logs_directory_path) logger = logging.getLogger(logger_name) formatter = logging.Formatter(LOG_FORMAT) # Adding a rotating file handler rotating_file_handler = RotatingFileHandler( os.path.join(logs_directory_path, '{0}.log'.format(logger_name)), maxBytes=LOG_FILE_MAX_BYTES, backupCount=BACKUP_LOGS_FILES_COUNT) rotating_file_handler.setFormatter(formatter) rotating_file_handler.setLevel(LOGGING_LEVEL) logger.addHandler(rotating_file_handler) return logger class AsyncURLFuzzer(object): """ An asynchronous http(s) website endpoint locator. Discovers active endpoints in websites, based on a list of common URLS. """ def __init__(self, base_url=DEFAULT_BASE_URL, list_file=DEFAULT_PATHS_LIST_FILE, async_workers_count=ASYNC_WORKERS_COUNT, output_file=VALID_ENDPOINTS_FILE, resource_exists_status_codes=RESOURCE_EXISTS_STATUS_CODES): """ Initializes a new member of this class. :param base_url: The base url of the website. :type base_url: str :param list_file: The path of a file, containing the paths to check. :type list_file: str :param async_workers_count: How many workers (threads) to use. :type async_workers_count: int :param output_file: The name of the active endpoints output file. :type output_file: str :param resource_exists_status_codes: A list of HTTP status codes to consider as valid. :type resource_exists_status_codes: list """ self._logger = LoggerFactory.get_logger(FUZZING_LOGGER_NAME) self._base_url = base_url self._list_file_path = list_file self._async_workers_count = async_workers_count self._output_file_path = output_file self._resource_exists_status_codes = resource_exists_status_codes self._active_paths_status_codes = {} self._checked_endpoints = {} self._endpoints_total_count = 0 self._session = requests.session() def start(self): """ Starts the fuzzing with the initialized parameters. """ self._get_website_endpoints() def _get_website_endpoints(self, async_workers_count=ASYNC_WORKERS_COUNT): """ Requests asynchronously for all the resources with a number of workers (threads). If it fails for HTTP overloads reasons, it retries with less workers, because it's probably a DDOS protection mechanism. :param async_workers_count: How many workers (threads) to use. :type async_workers_count: int """ self._load_paths_list() self._logger.info( 'Getting the endpoints of the website {0} with list file "{1}" and {2} async workers.'.format( self._base_url, self._list_file_path, async_workers_count)) if 0 >= async_workers_count: self._logger.error('Seems like the site does not support fuzzing, as it has a DDOS protection engine.') return pool = ThreadPool(async_workers_count) try: tasks = [] self._logger.debug('Preparing the workers...') for i, path in enumerate(self._paths): self._logger.debug('Started a worker for the endpoint {0}'.format(path)) if i > i and i % STARTED_JOB_LOG_INTERVAL == 0: self._logger.info('Started {0} workers'.format(i)) path = path.strip() full_path = '/'.join([self._base_url, path]) tasks.append(pool.apply_async(self.request_head, (full_path, path))) for t in tasks: status_code, full_path, path = t.get() self._checked_endpoints[path] = path if self._is_valid_status_code(status_code): self._active_paths_status_codes[path] = status_code self._logger.info( 'Fetched {0}/{1}; {2}; {3}'.format(len(self._checked_endpoints), self._endpoints_total_count, status_code, full_path)) self._save_output_log() except requests.ConnectionError as e: pool.terminate() self._logger.error(e) self._logger.warning('An error occured while fuzzing.' ' Retrying with less async workers to reduce the server load.') retry_workers_count = async_workers_count - WORKERS_DECREMENTED_COUNT_ON_ERROR self._get_website_endpoints(retry_workers_count) def _is_valid_status_code(self, status_code): """ Checks whether a HTTP status code implies that the resouce exists. :param status_code: :return: True if the status code implies that the resouce exists, False otherwise. """ return status_code in self._resource_exists_status_codes def _save_output_log(self): """ Saves the results to an output file. """ full_status_codes = {'/'.join([self._base_url, p]): code for p, code in self._active_paths_status_codes.items()} output_lines = ['{0} : {1}'.format(path, code) for path, code in full_status_codes.items()] if 1 >= len(output_lines): self._logger.warning( 'There were no discovered endpoints. consider using a different file from "words_list" directory') self._logger.info('The following endpoints are active:{0}{1}'.format(os.linesep, os.linesep.join(output_lines))) with open(self._output_file_path, 'a+') as output_file: output_lines.sort() output_file.write(os.linesep.join(output_lines)) self._logger.info('The endpoints were exported to "{0}"'.format(self._output_file_path)) def _load_paths_list(self): """ Loads the list of paths from the configured status. """ if not os.path.exists(self._list_file_path): raise FileNotFoundError('The file "{0}" does not exist.'.format(self._list_file_path)) with open(self._list_file_path) as paths_file: paths = [p.strip().lstrip('/').rstrip('/') for p in paths_file.readlines()] paths = [p for p in paths if p not in self._active_paths_status_codes] if not self._endpoints_total_count: self._endpoints_total_count = len(paths) self._paths = paths def request_head(self, url, path): """ Executes a http HEAD request to a url. :param url: The full url to contact. :param path: The uri of the request. :return: A tuple of 3 variables: the recieved status code (int), the url argument (str), the path argument (str). """ if url != '': res = self._session.head(url, verify=False, allow_redirects=True) return res.status_code, url, path if __name__ == '__main__': # Parsing the parameters. parser = OptionParser(description= 'An Asynchronous, robust websites endpoint discovery tool with smart error handling. ' 'Locates resources in websites based on a list of paths. ' 'Check out the "words_list"" directory for lists examples.', usage='%prog -u https://example.com/', version='%prog 0.1') parser.add_option('-u', '--url', dest='base_url', help='The target website to scan.', default=DEFAULT_BASE_URL) parser.add_option('-l', '--list', dest='list_file', help='A file containing the paths to check (separated with lines).', default=DEFAULT_PATHS_LIST_FILE) (options, args) = parser.parse_args() list_file = options.list_file base_url = options.base_url if base_url is None: parser.print_help() sys.exit() # Suspending warning logs from requests and urllib3 logging.getLogger("urllib3").setLevel(logging.ERROR) logging.getLogger("requests").setLevel(logging.ERROR) if (os.path.isdir(base_url) or os.path.isfile(base_url)): FilesFactory(base_url) for u in FilesFactory.urls: fuzzer = AsyncURLFuzzer(u, list_file) fuzzer.start() else: fuzzer = AsyncURLFuzzer(base_url, list_file) fuzzer.start()
41.8
126
0.647471
9,202
0.786227
0
0
1,590
0.135851
0
0
4,155
0.355007
acc20414e4e5bdf59a1324cd753233fca1a3b6c7
1,432
py
Python
models/cells/PC2018Zang/Purkinje.py
HarshKhilawala/cerebmodels
d2a2f2ef947ef9dc23ddce6e55159240cd3233cb
[ "BSD-3-Clause" ]
null
null
null
models/cells/PC2018Zang/Purkinje.py
HarshKhilawala/cerebmodels
d2a2f2ef947ef9dc23ddce6e55159240cd3233cb
[ "BSD-3-Clause" ]
9
2020-03-24T17:09:03.000Z
2021-05-17T16:11:17.000Z
models/cells/PC2018Zang/Purkinje.py
myHBPwork/cerebmodels
371ea7f1bbe388f1acade17c7128b8ca6ab8fb7a
[ "BSD-3-Clause" ]
1
2021-05-21T03:08:41.000Z
2021-05-21T03:08:41.000Z
#Template of the Purkinje cell model, Zang et al. 2018 #Templating by Lungsi 2019 based on ~/PC2018Zang/purkinje.hoc #purkinje.hoc has been converted from original purkinje_demo and using readme.html as a guide from neuron import h #from pdb import set_trace as breakpoint from random import randint class Purkinje(object): """Multi-compartment cell """ def __init__(self): h.xopen("purkinje.hoc") # There are 1088 compartments and the following are chosen as # attributes to this python class for potential recording self.soma = h.somaA self.ais = h.AIS # Based on last 50 or so lines of Purkinje19b972-1.nrn self.dend_root = h.dendA1_0 # see Fig.2A of paper # Reverse eng. from Purkinje19b972-1.nrn and dendv_arnd21.ses dend_sm = [ sec for sec in h.maindend ] # len(dend_sm) -> 30 dend_sp = [ sec for sec in h.spinydend ] # len(dend_sp) -> 1105 # note that for either self.dend_sm or self.dend_sp # the first element of its list is a dendrite section closest to soma # and last element is the dendrite section farthest away. # also potentially #self.cf = [ sec for sec in h.cf ] # for climbing fibre #self.pf = [ sec for sec in h.pf ] # for paraller fibre # self.dend_sm = dend_sm[ randint(0, len(dend_sm)-1) ] self.dend_sp = dend_sp[ randint(0, len(dend_sp)-1) ]
44.75
93
0.664106
1,130
0.789106
0
0
0
0
0
0
897
0.626397
acc30f8a7181a1c7790a5aea356612213e20f556
375
py
Python
octoprint_marlin_flasher/validation/validators/arduino.py
thinkyhead/OctoPrint-Marlin-Flasher
c43110226d1b9d4aa0df2fdfb8cffab47d687957
[ "MIT" ]
1
2021-09-20T22:17:22.000Z
2021-09-20T22:17:22.000Z
octoprint_marlin_flasher/validation/validators/arduino.py
thinkyhead/OctoPrint-Marlin-Flasher
c43110226d1b9d4aa0df2fdfb8cffab47d687957
[ "MIT" ]
null
null
null
octoprint_marlin_flasher/validation/validators/arduino.py
thinkyhead/OctoPrint-Marlin-Flasher
c43110226d1b9d4aa0df2fdfb8cffab47d687957
[ "MIT" ]
1
2021-12-10T03:37:29.000Z
2021-12-10T03:37:29.000Z
from flask_babel import gettext from marshmallow import ValidationError import intelhex import zipfile def is_correct_file_type(filename): try: with zipfile.ZipFile(filename, "r") as _: pass except zipfile.BadZipfile: try: ih = intelhex.IntelHex() ih.loadhex(filename) except intelhex.IntelHexError: raise ValidationError(gettext("Invalid file type."))
22.058824
55
0.770667
0
0
0
0
0
0
0
0
23
0.061333
acc3ba487d48e6f78d9730e1c98d383793fee0ba
3,203
py
Python
zazu/git_helper.py
stopthatcow/zazu
b4d64d872bf283e997642010bb0dbb88df0ce1c1
[ "MIT" ]
2
2017-01-03T20:22:17.000Z
2017-01-03T21:52:14.000Z
zazu/git_helper.py
stopthatcow/zazu
b4d64d872bf283e997642010bb0dbb88df0ce1c1
[ "MIT" ]
186
2017-01-03T20:24:50.000Z
2022-03-13T02:30:30.000Z
zazu/git_helper.py
stopthatcow/zazu
b4d64d872bf283e997642010bb0dbb88df0ce1c1
[ "MIT" ]
3
2017-01-03T21:42:32.000Z
2018-02-09T13:25:06.000Z
# -*- coding: utf-8 -*- """Git functions for zazu.""" import zazu.imports zazu.imports.lazy_import(locals(), [ 'filecmp', 'git', 'os', 'pkg_resources', 'shutil', 'zazu.util', ]) __author__ = 'Nicholas Wiles' __copyright__ = 'Copyright 2016' def get_repo_root(starting_dir): """Get the root directory of the git repo.""" try: repo = git.Repo(starting_dir, search_parent_directories=True) return repo.working_tree_dir except git.exc.InvalidGitRepositoryError: return None def get_hooks_path(repo_base): """Get the path for git hooks.""" g = git.Git(repo_base) git_dir = g.rev_parse('--git-dir') return os.path.join(repo_base, git_dir, 'hooks') def get_default_git_hooks(): """Get list of known git hooks to install.""" return { 'pre-commit': pkg_resources.resource_filename('zazu', 'githooks/pre-commit'), 'post-checkout': pkg_resources.resource_filename('zazu', 'githooks/post-checkout'), 'post-merge': pkg_resources.resource_filename('zazu', 'githooks/post-merge'), 'commit-msg': pkg_resources.resource_filename('zazu', 'githooks/commit-msg'), } def get_touched_files(repo): """Get list of files that are scheduled to be committed (Added, created, modified, or renamed).""" return [file for file in repo.git.diff('--cached', '--name-only', '--diff-filter=ACMR').split('\n') if file] def check_git_hooks(repo_base): """Check that all known git hooks are in place.""" have_hooks = True hooks_folder = get_hooks_path(repo_base) for name, file in get_default_git_hooks().items(): if not check_git_hook(hooks_folder, name, file): have_hooks = False break return have_hooks def check_git_hook(hooks_folder, hook_name, hook_resource_path): """Check that a git hook is in place.""" hook_path = os.path.join(hooks_folder, hook_name) exists = os.path.exists(hook_path) return exists and os.access(hook_path, os.X_OK) and filecmp.cmp(hook_path, hook_resource_path) def install_git_hooks(repo_base): """Enforce that all known git hooks are in place.""" hooks_folder = get_hooks_path(repo_base) for name, file in get_default_git_hooks().items(): install_git_hook(hooks_folder, name, file) def install_git_hook(hooks_folder, hook_name, hook_resource_path): """Enforce that a git hook is in place.""" if not check_git_hook(hooks_folder, hook_name, hook_resource_path): try: os.mkdir(hooks_folder) except OSError: pass hook_path = os.path.join(hooks_folder, hook_name) shutil.copy(hook_resource_path, hook_path) def merged_branches(repo, target_branch, remote=False): """Return set of branches that have been merged with the target_branch.""" args = ['--merged', target_branch] if remote: args.insert(0, '-r') return {b.strip() for b in repo.git.branch(args).strip().split('\n') if b and not b.startswith('*')} def read_staged(path): """Read the contents of the staged version of the file.""" return zazu.util.check_output(['git', 'show', ':{}'.format(path)], universal_newlines=True)
33.364583
112
0.67749
0
0
0
0
0
0
0
0
937
0.292538
acc3c65d44803dc8ba8ec360a7cc84ee71244960
4,284
py
Python
Gds/src/fprime_gds/common/data_types/event_data.py
SSteve/fprime
12c478bd79c2c4ba2d9f9e634e47f8b6557c54a8
[ "Apache-2.0" ]
2
2021-02-23T06:56:03.000Z
2021-02-23T07:03:53.000Z
Gds/src/fprime_gds/common/data_types/event_data.py
SSteve/fprime
12c478bd79c2c4ba2d9f9e634e47f8b6557c54a8
[ "Apache-2.0" ]
9
2021-02-21T07:27:44.000Z
2021-02-21T07:27:58.000Z
Gds/src/fprime_gds/common/data_types/event_data.py
SSteve/fprime
12c478bd79c2c4ba2d9f9e634e47f8b6557c54a8
[ "Apache-2.0" ]
1
2021-02-23T17:10:44.000Z
2021-02-23T17:10:44.000Z
""" @brief Class to store data from a specific event @date Created July 2, 2018 @author R. Joseph Paetz @bug No known bugs """ from fprime.common.models.serialize import time_type from fprime_gds.common.data_types import sys_data class EventData(sys_data.SysData): """ The EventData class stores a specific event message. """ def __init__(self, event_args, event_time, event_temp): """ Constructor. Args: event_args: The arguments of the event being stored. This should be a tuple where each element is an object of a class derived from the BaseType class with a filled in value. Each element's class should match the class of the corresponding argument type object in the event_temp object. This can be None. event_time: The time the event occurred (TimeType) event_temp: Event template instance for this event Returns: An initialized EventData object """ super().__init__() self.id = event_temp.get_id() self.args = event_args self.time = event_time self.template = event_temp def get_args(self): return self.args def get_severity(self): return self.template.get_severity() @staticmethod def get_empty_obj(event_temp): """ Obtains an event object that is empty (arguments = None) Args: event_temp: (EventTemplate obj) Template describing event Returns: An EventData object with argument value of None """ return EventData(None, time_type.TimeType(), event_temp) @staticmethod def get_csv_header(verbose=False): """ Get the header for a csv file containing event data Args: verbose: (boolean, default=False) Indicates if header should be for regular or verbose output Returns: String version of the channel data """ if verbose: return "Time,Raw Time,Name,ID,Severity,Args\n" else: return "Time,Name,Severity,Args\n" def get_str(self, time_zone=None, verbose=False, csv=False): """ Convert the event data to a string Args: time_zone: (tzinfo, default=None) Timezone to print time in. If time_zone=None, use local time. verbose: (boolean, default=False) Prints extra fields if True csv: (boolean, default=False) Prints each field with commas between if true Returns: String version of the event data """ time_str = self.time.to_readable(time_zone) raw_time_str = str(self.time) name = self.template.get_full_name() severity = self.template.get_severity() format_str = self.template.get_format_str() if self.args is None: arg_str = "EMPTY EVENT OBJ" else: # The arguments are currently serializable objects which cannot be # used to fill in a format string. Convert them to values that can be arg_val_list = [arg_obj.val for arg_obj in self.args] arg_str = format_str % tuple(arg_val_list) if verbose and csv: return "%s,%s,%s,%d,%s,%s" % ( time_str, raw_time_str, name, self.id, severity, arg_str, ) elif verbose and not csv: return "%s: %s (%d) %s %s : %s" % ( time_str, name, self.id, raw_time_str, severity, arg_str, ) elif not verbose and csv: return "{},{},{},{}".format(time_str, name, severity, arg_str) else: return "{}: {} {} : {}".format(time_str, name, severity, arg_str) def __str__(self): """ Convert the event data to a string Returns: String version of the channel data """ return self.get_str()
31.043478
81
0.554622
4,048
0.944911
0
0
873
0.203782
0
0
2,305
0.538049
acc42a599a73b9ca811d8ac2f1ade306f4cc9f45
5,607
py
Python
src/Plot.py
xin-huang/dadi-cli
d403e9dced19c3a71dc134a8993ad0ceba592c51
[ "Apache-2.0" ]
5
2021-12-07T23:27:40.000Z
2022-03-15T08:59:33.000Z
src/Plot.py
xin-huang/dadi-cli
d403e9dced19c3a71dc134a8993ad0ceba592c51
[ "Apache-2.0" ]
2
2022-01-15T09:27:12.000Z
2022-03-25T16:08:52.000Z
src/Plot.py
xin-huang/dadi-cli
d403e9dced19c3a71dc134a8993ad0ceba592c51
[ "Apache-2.0" ]
5
2021-03-31T19:22:23.000Z
2021-12-07T18:24:59.000Z
import dadi, pickle import numpy as np import matplotlib.pyplot as plt fig = plt.figure(figsize=(8,6)) def plot_single_sfs(fs, projections, output, vmin): fs = dadi.Spectrum.from_file(fs) fig = plt.figure(219033) if len(fs.sample_sizes) == 1: if projections == None: projections = [20] fs = fs.project(projections) dadi.Plotting.plot_1d_fs(fs) if len(fs.sample_sizes) == 2: if projections == None: projections = [20, 20] fs = fs.project(projections) dadi.Plotting.plot_single_2d_sfs(fs, vmin=vmin) fig.savefig(output) def plot_comparison(fs, fs2, projections, output, vmin, resid_range): fs = dadi.Spectrum.from_file(fs) fs2 = dadi.Spectrum.from_file(fs2) fig = plt.figure(219033) if len(fs.sample_sizes) == 1: if projections == None: projections = [20] fs = fs.project(projections) fs2 = fs2.project(projections) dadi.Plotting.plot_1d_comp_Poisson(model=fs, data=fs2) if len(fs.sample_sizes) == 2: if projections == None: projections = [20, 20] fs = fs.project(projections) fs2 = fs2.project(projections) dadi.Plotting.plot_2d_comp_Poisson(model=fs, data=fs2, vmin=vmin, resid_range=resid_range) fig.savefig(output) def plot_fitted_demography(fs, model, popt, projections, misid, output, vmin, resid_range): from src.Models import get_dadi_model_func func = get_dadi_model_func(model) popt, _ = _get_opts_and_theta(popt, False) fs = dadi.Spectrum.from_file(fs) if misid: func = dadi.Numerics.make_anc_state_misid_func(func) func_ex = dadi.Numerics.make_extrap_func(func) ns = fs.sample_sizes pts_l = [int(max(ns)+10), int(max(ns)+20), int(max(ns)+30)] model = func_ex(popt, ns, pts_l) fig = plt.figure(219033) if len(ns) == 1: if projections == None: projections = [20] fs = fs.project(projections) model = model.project(projections) dadi.Plotting.plot_1d_comp_multinom(model, fs) if len(ns) == 2: if projections == None: projections = [20, 20] fs = fs.project(projections) model = model.project(projections) dadi.Plotting.plot_2d_comp_multinom(model, fs, vmin=vmin, resid_range=resid_range) fig.savefig(output) def plot_fitted_dfe(fs, cache1d, cache2d, demo_popt, sele_popt, projections, pdf, pdf2, misid, output, vmin, resid_range): import dadi.DFE from src.Pdfs import get_dadi_pdf sele_popt, theta = _get_opts_and_theta(sele_popt, False) fs = dadi.Spectrum.from_file(fs) if pdf != None: pdf = get_dadi_pdf(pdf) if pdf2 != None: pdf2 = get_dadi_pdf(pdf2) if pdf == None: pdf=pdf2 ns = fs.sample_sizes # Integrate over a range of gammas pts_l = [max(ns)+10, max(ns)+20, max(ns)+30] if cache1d != None: spectra1d = pickle.load(open(cache1d, 'rb')) func = spectra1d.integrate if cache2d != None: spectra2d = pickle.load(open(cache2d, 'rb')) func = spectra2d.integrate if (cache1d != None) and (cache2d != None): func = dadi.DFE.mixture if misid: func = dadi.Numerics.make_anc_state_misid_func(func) # Get expected SFS for MLE if (cache1d != None) and (cache2d != None): model = func(sele_popt, None, spectra1d, spectra2d, pdf, pdf2, theta, None) else: model = func(sele_popt, None, pdf, theta, None) fig = plt.figure(219033) if len(ns) == 1: if projections == None: projections = [20] fs = fs.project(projections) model = model.project(projections) dadi.Plotting.plot_2d_comp_Poisson(model, fs) if len(ns) == 2: if projections == None: projections = [20, 20] fs = fs.project(projections) model = model.project(projections) dadi.Plotting.plot_2d_comp_Poisson(model, fs, vmin=vmin, resid_range=resid_range) fig.savefig(output) def plot_mut_prop(dfe_popt, pdf1d, misid, mut_rate, seq_len, ratio, output): from src.Pdfs import get_dadi_pdf dfe_params, theta = _get_opts_and_theta(dfe_popt, misid) Na = theta/(4*mut_rate*seq_len*(ratio/(1+ratio))) def mut_prop(shape, scale, Na): from scipy import stats scale = scale / (2*Na) p1 = stats.gamma.cdf(1e-5, a=shape, scale=scale) p2 = stats.gamma.cdf(1e-4, a=shape, scale=scale) p3 = stats.gamma.cdf(1e-3, a=shape, scale=scale) p4 = stats.gamma.cdf(1e-2, a=shape, scale=scale) return p1, p2-p1, p3-p2, p4-p3, 1-p4 props = mut_prop(dfe_params[0], dfe_params[1], Na) fig = plt.figure(219033) plt.bar([0,1,2,3,4],props,alpha=0.7) plt.ylabel('Proportion') plt.xlabel('Selection coefficient') plt.xticks([0,1,2,3,4], ['0<=|s|<1e-5', '1e-5<=|s|<1e-4', '1e-4<=|s|<1e-3', '1e-3<=|s|<1e-2', '1e-2<=|s|'], rotation=45) plt.grid(alpha=0.3) fig.savefig(output, bbox_inches='tight') def _get_opts_and_theta(filename, misid): opts = [] fid = open(filename, 'r') for line in fid.readlines(): if line.startswith('#'): continue elif line.startswith('# T'): break else: try: opts.append([float(_) for _ in line.rstrip().split()]) except ValueError: pass fid.close() if len(opts) == 0: print('No optimization results found') return if misid: return opts[0][1:-2], opts[0][-1] else: return opts[0][1:-1], opts[0][-1]
32.789474
122
0.620118
0
0
0
0
0
0
0
0
224
0.03995
acc468eb15f6052159fe5c3f86610a87918f8f91
291
py
Python
modules/accounts/__init__.py
vladpi/zenmoney-bot
280723a49979632811f585fb8dced3c396fe563a
[ "Apache-2.0" ]
null
null
null
modules/accounts/__init__.py
vladpi/zenmoney-bot
280723a49979632811f585fb8dced3c396fe563a
[ "Apache-2.0" ]
1
2022-02-16T22:29:36.000Z
2022-02-16T22:29:54.000Z
modules/accounts/__init__.py
vladpi/zenmoney-bot
280723a49979632811f585fb8dced3c396fe563a
[ "Apache-2.0" ]
null
null
null
from .exports import ( # noqa create_account_from_zenmoney_account, delete_account, get_accounts_by_user, get_user_account_by_id, get_user_account_by_title, update_account_transactions_count, ) from .schemas import AccountModel # noqa from .tables import * # noqa
26.454545
41
0.769759
0
0
0
0
0
0
0
0
18
0.061856
acc4d2bbf7663f421e601fc096c05198efab0608
5,136
py
Python
akita/metrics.py
michael-lazar/Akita
f94bc04c28e70abe7f85c014f11a4621db9743c7
[ "MIT" ]
18
2018-04-26T18:28:00.000Z
2021-08-22T11:49:12.000Z
akita/metrics.py
michael-lazar/Akita
f94bc04c28e70abe7f85c014f11a4621db9743c7
[ "MIT" ]
1
2020-01-10T06:16:17.000Z
2020-01-10T22:16:55.000Z
akita/metrics.py
michael-lazar/Akita
f94bc04c28e70abe7f85c014f11a4621db9743c7
[ "MIT" ]
5
2018-08-10T13:55:50.000Z
2021-11-30T22:08:50.000Z
import time import logging import threading from collections import Counter _logger = logging.getLogger('akita') class SlidingWindowBase: """ Data structure that keeps track of the total number of events that have occurred in a given time frame. Events are binned into time windows based on their timestamps, and the most recent N windows are kept in a rotating queue. This allows for monitoring the moving average of a time series data stream in real time. """ # The data structure that will be used to accumulate events, # must support +/- operations. datatype = None def __init__(self, window_size=1, n_windows=10): """ Params: window_size (int): The length of each event window, in seconds. n_windows (int): The number of windows kept in memory. """ self.window_size = window_size self.n_windows = n_windows self.head = None # Note: I decided to use a plain list instead of a deque because # fast random access is more important than the cost of inserting at # the head of the list, the latter of which will only happen at most # once per window_size. self.history = [self.datatype() for _ in range(self.n_windows)] self.buffer = self.datatype() self.total = self.datatype() self.lock = threading.Lock() def flush(self, timestamp=None): """ Re-align the head of the sliding window to the given timestamp. Params: timestamp (float): The time to align the head of the sliding window with, will default to the current time. """ timestamp = time.time() if timestamp is None else timestamp window = timestamp - timestamp % self.window_size if self.head is None: # The first flush initializes the window self.head = window elif window > self.head: # Add the current buffer to the history window self._history_update(self.buffer) offset = int((window - self.head) // self.window_size) # Pad with zeros if the gap is larger than 1 window for _ in range(offset-1): self._history_update(self.datatype()) self.buffer = self.datatype() self.head = window def _history_update(self, buffer): self.history.insert(0, buffer) self.total += buffer self.total -= self.history.pop() def add_point(self): """ Add a time series event at the given timestamp. """ raise NotImplementedError class CounterMetric(SlidingWindowBase): """ A sliding window that uses a integer to accumulate the number of events in each time increment. """ datatype = int def __init__(self, window_size=1, n_windows=10): super().__init__(window_size, n_windows) self.min = None self.max = None def _history_update(self, buffer): super()._history_update(buffer) self.min = buffer if self.min is None else min(self.min, buffer) self.max = buffer if self.max is None else max(self.max, buffer) def add_point(self): self.buffer += 1 class AlertMetric(CounterMetric): """ An extension of the CounterMetric that watches the avg. rate of events over the entire window, and returns an alert when the rate crosses a given threshold. """ ALERT_START = 'start' ALERT_STOP = 'stop' def __init__(self, window_size=1, n_windows=120, threshold=10): super().__init__(window_size, n_windows) self.threshold = threshold self.triggered = False self.triggered_rate = None self.triggered_at = None def flush(self, timestamp=None): super().flush(timestamp=timestamp) if self.head is None or not self.history: return elif self.triggered_at and self.head - self.triggered_at < self.n_windows: # Not enough time has elapsed since the previous alert return rate = self.total * self.window_size / self.n_windows if not self.triggered and rate >= self.threshold: self.triggered = True self.triggered_at = self.head self.triggered_rate = rate return self.ALERT_START elif self.triggered and rate < self.threshold: self.triggered = False self.triggered_at = self.head self.triggered_rate = rate return self.ALERT_STOP class TaggedCounterMetric(SlidingWindowBase): """ A sliding window that uses a collections.Counter object to accumulate the number of points in each time increment. This allows ``tags`` to be associated with events, with counts being tracked separately for each tag. """ datatype = Counter def add_point(self, tags=None): tags = tags or [] for tag in tags: self.buffer[tag] += 1 # None is a special tag that holds the combined total for all points self.buffer[None] += 1
31.127273
82
0.632983
5,009
0.975273
0
0
0
0
0
0
1,959
0.381425
acc55edad5b04c2e0b24f12c37f946370c522b36
62
py
Python
studentData.py
seanmacb/COMP-115-Exercises
fbe7e5b158f2db785b886b6c600f1a8beb19ab1f
[ "MIT" ]
null
null
null
studentData.py
seanmacb/COMP-115-Exercises
fbe7e5b158f2db785b886b6c600f1a8beb19ab1f
[ "MIT" ]
null
null
null
studentData.py
seanmacb/COMP-115-Exercises
fbe7e5b158f2db785b886b6c600f1a8beb19ab1f
[ "MIT" ]
null
null
null
Schmoe Joe 12345 90 91 94 87 89 Doe Jane 74836 91 99 82 81 100
31
31
0.758065
0
0
0
0
0
0
0
0
0
0
acc64880afae366e820de6d62f2717aea69a4649
2,360
py
Python
tests/project_creator.py
AlexandrovRoman/Flask-DJ
ecf6cb05a8115641cab6634e8d004801a96c314c
[ "MIT" ]
7
2020-03-12T03:09:12.000Z
2021-05-01T08:11:33.000Z
tests/project_creator.py
AlexandrovRoman/Flask-DJ
ecf6cb05a8115641cab6634e8d004801a96c314c
[ "MIT" ]
null
null
null
tests/project_creator.py
AlexandrovRoman/Flask-DJ
ecf6cb05a8115641cab6634e8d004801a96c314c
[ "MIT" ]
1
2020-12-17T07:24:55.000Z
2020-12-17T07:24:55.000Z
from os.path import exists, join import pytest from flask_dj import startproject from tests.basic_project_creator import ProjectCreate class TestBaseSettingProjectConstructor(ProjectCreate): def setup(self, need_static=False, need_templates=False): super().setup() def test_project_folder_exist(self): assert exists(self.project_path) def test_main_folder_exist(self): assert exists(join(self.project_path, self.project_name)) def test_main_init(self): self.main_file_test('__init__', ['from flask import Flask', 'from flask_login import LoginManager']) def test_main_config(self): self.main_file_test('config', ["HOST = '127.0.0.1'", "PORT = 5000", "Config = DevelopConfig"]) def main_file_test(self, filename, test_contents=['']): self._file_test(self.project_name, filename, test_contents) def test_main_urls(self): assert exists(join(join(self.project_path, self.project_name), 'urls.py')) def test_main_manage(self): self._file_test(self.project_path, 'manage', [f'from {self.project_name} import app, config']) def test_utils_folder_exist(self): assert exists(join(self.project_path, 'utils')) def test_utils_urls(self): self._file_test('utils', 'urls', [f'from {self.project_name} import app']) def test_templates_folder(self): assert exists(join(self.project_path, 'templates')) or not self.need_templates def test_static_folder(self): assert exists(join(self.project_path, 'static')) or not self.need_static class TestAdvancedProjectConfig(TestBaseSettingProjectConstructor): def setup(self): super().setup(need_templates=True, need_static=True) class UncorrectProjectName(ProjectCreate): def setup(self, project_name): super().setup(project_name=project_name, fast_start=False) def test_create(self): with pytest.raises(ValueError): startproject(self.project_name) def test_main_folder_not_exist(self): assert not exists(join(self.project_path, self.project_name)) def teardown(self): pass class TestNumUncorrectProjectName(UncorrectProjectName): def setup(self): super().setup("123project") class TestDashInProjectName(UncorrectProjectName): def setup(self): super().setup("pro-ject")
32.328767
108
0.717373
2,210
0.936441
0
0
0
0
0
0
302
0.127966
acc66314877bc34a87521880f075c9eaab6df4a4
1,075
py
Python
ACM ICPC/DP/subset_sum problem/subsetSum.py
shreejitverma/GeeksforGeeks
d7bcb166369fffa9a031a258e925b6aff8d44e6c
[ "MIT" ]
2
2022-02-18T05:14:28.000Z
2022-03-08T07:00:08.000Z
ACM ICPC/DP/subset_sum problem/subsetSum.py
shivaniverma1/Competitive-Programming-1
d7bcb166369fffa9a031a258e925b6aff8d44e6c
[ "MIT" ]
6
2022-01-13T04:31:04.000Z
2022-03-12T01:06:16.000Z
ACM ICPC/DP/subset_sum problem/subsetSum.py
shivaniverma1/Competitive-Programming-1
d7bcb166369fffa9a031a258e925b6aff8d44e6c
[ "MIT" ]
2
2022-02-14T19:53:53.000Z
2022-02-18T05:14:30.000Z
def isSubsetSum(arr, n, sum): ''' Returns true if there exists a subset with given sum in arr[] ''' # The value of subset[i%2][j] will be true # if there exists a subset of sum j in # arr[0, 1, ...., i-1] subset = [[False for j in range(sum + 1)] for i in range(3)] for i in range(n + 1): for j in range(sum + 1): # A subset with sum 0 is always possible if (j == 0): subset[i % 2][j] = True # If there exists no element no sum # is possible elif (i == 0): subset[i % 2][j] = False elif (arr[i - 1] <= j): subset[i % 2][j] = subset[ (i + 1) % 2][j - arr[i - 1]] or subset[(i + 1) % 2][j] else: subset[i % 2][j] = subset[(i + 1) % 2][j] return subset[n % 2][sum] # Driver code arr = [6, 2, 5] sum = 7 n = len(arr) if (isSubsetSum(arr, n, sum) is True): print("There exists a subset with given sum") else: print("No subset exists with given sum")
27.564103
74
0.477209
0
0
0
0
0
0
0
0
355
0.330233
acc67a5c015a68af894948efa11516825c4f63e6
3,156
py
Python
models/matching/ffm.py
charlesxu-aicoder/Recommendation-model
9f5af6cc4c3382f2c0667b2d51836e2ae7ae6404
[ "MIT" ]
null
null
null
models/matching/ffm.py
charlesxu-aicoder/Recommendation-model
9f5af6cc4c3382f2c0667b2d51836e2ae7ae6404
[ "MIT" ]
null
null
null
models/matching/ffm.py
charlesxu-aicoder/Recommendation-model
9f5af6cc4c3382f2c0667b2d51836e2ae7ae6404
[ "MIT" ]
null
null
null
# -*-coding: utf-8-*- # @Author : Charlesxu # @Email : charlesxu.ai@gmail.com from models.matching import * class FFM_Layer(Layer): def __init__(self, dense_feature_columns, sparse_feature_columns, k, w_reg=1e-4, v_reg=1e-4): super(FFM_Layer, self).__init__() self.dense_feature_columns = dense_feature_columns self.sparse_feature_columns = sparse_feature_columns self.k = k self.w_reg = w_reg self.v_reg = v_reg self.feature_num = sum([feat['feat_num'] for feat in self.sparse_feature_columns]) \ + len(self.dense_feature_columns) self.field_num = len(self.dense_feature_columns) + len(self.sparse_feature_columns) def build(self, input_shape): self.w0 = self.add_weight(name='w0', shape=(1,), initializer=tf.zeros_initializer(), trainable=True) self.w = self.add_weight(name='w', shape=(self.feature_num, 1), initializer=tf.random_normal_initializer(), regularizer=l2(self.w_reg), trainable=True) self.v = self.add_weight(name='v', shape=(self.feature_num, self.field_num, self.k), initializer=tf.random_normal_initializer(), regularizer=l2(self.v_reg), trainable=True) def call(self, inputs, **kwargs): dense_inputs, sparse_inputs = inputs stack = dense_inputs for i in range(sparse_inputs.shape[1]): stack = tf.concat( [stack, tf.one_hot(sparse_inputs[:, i], depth=self.sparse_feature_columns[i]['feat_num'])], axis=-1) first_order = self.w0 + tf.matmul(tf.concat(stack, axis=-1), self.w) second_order = 0 field_f = tf.tensordot(stack, self.v, axes=[1, 0]) for i in range(self.field_num): for j in range(i+1, self.field_num): second_order += tf.reduce_sum( tf.multiply(field_f[:, i], field_f[:, j]), axis=1, keepdims=True ) return first_order + second_order class FFM(tf.keras.Model): def __init__(self, feature_columns, k, w_reg=1e-4, v_reg=1e-4): super(FFM, self).__init__() self.dense_feature_columns, self.sparse_feature_columns = feature_columns self.ffm = FFM_Layer(self.dense_feature_columns, self.sparse_feature_columns, k, w_reg, v_reg) def call(self, inputs, **kwargs): result_ffm = self.ffm(inputs) outputs = tf.nn.sigmoid(result_ffm) return outputs def summary(self, **kwargs): dense_inputs = Input(shape=(len(self.dense_feature_columns),), dtype=tf.float32) sparse_inputs = Input(shape=(len(self.sparse_feature_columns),), dtype=tf.int32) tf.keras.Model(inputs=[dense_inputs, sparse_inputs], outputs=self.call([dense_inputs, sparse_inputs])).summary()
43.833333
97
0.576362
3,038
0.962611
0
0
0
0
0
0
108
0.034221
acc6a2044f16b81953b9b0b10f9e3d38dd9da534
1,822
py
Python
server/apps/authentication/tests.py
krishnasagar14/street_parking
5b5e13d94358c0c610b9c2188abb62e52598a3bb
[ "MIT" ]
null
null
null
server/apps/authentication/tests.py
krishnasagar14/street_parking
5b5e13d94358c0c610b9c2188abb62e52598a3bb
[ "MIT" ]
3
2020-02-11T23:45:37.000Z
2021-06-10T21:13:14.000Z
server/apps/authentication/tests.py
krishnasagar14/street_parking
5b5e13d94358c0c610b9c2188abb62e52598a3bb
[ "MIT" ]
null
null
null
from django.test import TestCase from rest_framework.test import APIRequestFactory from .views import LoginView, SignupView from common.tests import USER_DATA, prepare_dummy_user_data # Create your tests here. class ApiViewTests(TestCase): factory = APIRequestFactory() def test_signup(self): user_data = USER_DATA view = SignupView.as_view() req = self.factory.post('/register/', user_data, format='json') resp = view(req) st_code = resp.status_code self.assertEqual(st_code, 400) user_data['password'] = 'test01' req = self.factory.post('/register/', user_data, format='json') resp = view(req) st_code = resp.status_code rdata = resp.data.get('data') self.assertEqual(st_code, 201) self.assertEqual(rdata['message'], 'USER_REGISTER_SUCCESS') print("User register API test success") def test_login(self): prepare_dummy_user_data() view = LoginView.as_view() user_data = {} req = self.factory.post('/login/', user_data, format='json') resp = view(req) st_code = resp.status_code self.assertEqual(st_code, 400) user_data = { 'email': USER_DATA['email'], 'password': '', } req = self.factory.post('/login/', user_data, format='json') resp = view(req) st_code = resp.status_code self.assertEqual(st_code, 400) user_data['password'] = 'test123' req = self.factory.post('/login/', user_data, format='json') resp = view(req) st_code = resp.status_code rdata = resp.data.get('data') self.assertEqual(st_code, 200) self.assertEqual(len(rdata['token'].split('.')), 3) print("User login API test success")
31.964912
71
0.616905
1,609
0.883095
0
0
0
0
0
0
284
0.155873
acc6cf450962693d28429a214f8508d7ed98f2f8
5,205
py
Python
build/android/play_services/utils.py
TwistedCore/external_v8
c6725dab9be251fbfc6fd7d53c3513a23e78c36c
[ "BSD-3-Clause" ]
2
2019-01-28T08:09:58.000Z
2021-11-15T15:32:10.000Z
build/android/play_services/utils.py
TwistedCore/external_v8
c6725dab9be251fbfc6fd7d53c3513a23e78c36c
[ "BSD-3-Clause" ]
null
null
null
build/android/play_services/utils.py
TwistedCore/external_v8
c6725dab9be251fbfc6fd7d53c3513a23e78c36c
[ "BSD-3-Clause" ]
6
2020-09-23T08:56:12.000Z
2021-11-18T03:40:49.000Z
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. ''' Utility functions for all things related to manipulating google play services related files. ''' import argparse import filecmp import json import logging import os import re import sys sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir)) from devil.utils import cmd_helper _XML_VERSION_NUMBER_PATTERN = re.compile( r'<integer name="google_play_services_version">(\d+)<\/integer>') class DefaultsRawHelpFormatter(argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter): ''' Combines the features of RawDescriptionHelpFormatter and ArgumentDefaultsHelpFormatter, providing defaults for the arguments and raw text for the description. ''' pass class ConfigParser(object): '''Reads and writes the configuration files for play services related scripts The configuration files are JSON files. Here is the data they are expected to contain: - version_number Number. Mirrors @integer/google_play_services_version from the library. Example: 815000 - sdk_version Version of the Play Services SDK to retrieve, when preprocessing the library from a maven/gradle repository. Example: "8.1.0" - clients List of strings. Name of the clients (or play services modules) to include when preprocessing the library. Example: ["play-services-base", "play-services-cast"] - version_xml_path String. Path to the version.xml string describing the current version. Should be relative to the library base directory Example: "res/values/version.xml" - locale_whitelist List of strings. List of locales to keep from the resources. Can be obtained by generating an android build and looking at the content of `out/Debug/gen/chrome/java/res`; or looking at the android section in `//chrome/app/generated_resources.grd` Example: ["am", "ar", "bg", "ca", "cs"] - resource_whitelist List of strings. List of resource files to explicitely keep in the final output. Use it to keep drawables for example, as we currently remove them all. Example: ["play-services-base/res/drawables/foobar.xml"] ''' _VERSION_NUMBER_KEY = 'version_number' def __init__(self, path): self.path = path self._data = {} with open(path, 'r') as stream: self._data = json.load(stream) @property def version_number(self): return self._data.get(self._VERSION_NUMBER_KEY) @property def sdk_version(self): return self._data.get('sdk_version') @property def clients(self): return self._data.get('clients') or [] @property def version_xml_path(self): return self._data.get('version_xml_path') @property def locale_whitelist(self): return self._data.get('locale_whitelist') or [] @property def resource_whitelist(self): return self._data.get('resource_whitelist') or [] def UpdateVersionNumber(self, new_version_number): '''Updates the version number and saves it in the configuration file. ''' with open(self.path, 'w') as stream: self._data[self._VERSION_NUMBER_KEY] = new_version_number stream.write(DumpTrimmedJson(self._data)) def DumpTrimmedJson(json_data): ''' Default formatting when dumping json to string has trailing spaces and lacks a new line at the end. This function fixes that. ''' out = json.dumps(json_data, sort_keys=True, indent=2) out = out.replace(' ' + os.linesep, os.linesep) return out + os.linesep def FileEquals(expected_file, actual_file): ''' Returns whether the two files are equal. Returns False if any of the files doesn't exist. ''' if not os.path.isfile(actual_file) or not os.path.isfile(expected_file): return False return filecmp.cmp(expected_file, actual_file) def IsRepoDirty(repo_root): '''Returns True if there are no staged or modified files, False otherwise.''' # diff-index returns 1 if there are staged changes or modified files, # 0 otherwise cmd = ['git', 'diff-index', '--quiet', 'HEAD'] return cmd_helper.Call(cmd, cwd=repo_root) == 1 def GetVersionNumberFromLibraryResources(version_xml): ''' Extracts a Google Play services version number from its version.xml file. ''' with open(version_xml, 'r') as version_file: version_file_content = version_file.read() match = _XML_VERSION_NUMBER_PATTERN.search(version_file_content) if not match: raise AttributeError('A value for google_play_services_version was not ' 'found in ' + version_xml) return int(match.group(1)) def MakeLocalCommit(repo_root, files_to_commit, message): '''Makes a local git commit.''' logging.debug('Staging files (%s) for commit.', files_to_commit) if cmd_helper.Call(['git', 'add'] + files_to_commit, cwd=repo_root) != 0: raise Exception('The local commit failed.') logging.debug('Committing.') if cmd_helper.Call(['git', 'commit', '-m', message], cwd=repo_root) != 0: raise Exception('The local commit failed.')
30.438596
79
0.716427
2,802
0.538329
0
0
508
0.097598
0
0
2,856
0.548703
acc870850c0180a5f9b1ee2a3fbd63fa2971c376
4,179
py
Python
tagger/__main__.py
XYlearn/Tagger
1b74b44b28b3355f2f26613394cd6ef709ec9d97
[ "MIT" ]
null
null
null
tagger/__main__.py
XYlearn/Tagger
1b74b44b28b3355f2f26613394cd6ef709ec9d97
[ "MIT" ]
null
null
null
tagger/__main__.py
XYlearn/Tagger
1b74b44b28b3355f2f26613394cd6ef709ec9d97
[ "MIT" ]
null
null
null
# -*-coding: utf-8 import sys import argparse from tagger import FileTagger def tagger_add(args): tg = FileTagger() res = tg.add_tags(args.path, *args.tags) if not res: print("[-] Fail to add tags.") def tagger_rm(args): tg = FileTagger() res = tg.rm_tags(args.path, *args.tags) if not res: print("[-] Fail to remove tags.") def tagger_find(args): tg = FileTagger() found = tg.find_tags(args.path, *args.tags, top_only=args.top, depth=args.depth) print('\n'.join(found)) def tagger_get(args): tg = FileTagger() tags = tg.get_tags(args.path) print('\n'.join(tags)) def tagger_clear(args): tg = FileTagger() res = tg.clear_tags(args.path, recursive=args.recursive, depth=args.depth, top_only=args.top) if not res: print("[-] Fail to clear tags") def tagger_merge(args): tg = FileTagger() tg.merge_tags(args.path, args.dest_path, *args.tags) def tagger_sync(args): tg = FileTagger() tg.sync_tags(args.path, recursive=args.recursive, depth=args.depth, top_only=args.top) def get_parser(): parser = argparse.ArgumentParser(prog="tagger") subparsers = parser.add_subparsers() # tagger add parser_add = subparsers.add_parser("add", help="add tags to path") parser_add.add_argument("path", help="path to add tags") parser_add.add_argument("tags", nargs="+", help="tags to add") parser_add.set_defaults(func=tagger_add) # tagger rm parser_rm = subparsers.add_parser("rm", help="remove tags from path") parser_rm.add_argument("path", help="path to remove tags from") parser_rm.add_argument("tags", nargs="+", help="tags to remove") parser_rm.set_defaults(func=tagger_rm) # tagger get parser_get = subparsers.add_parser("get", help="get tags of path") parser_get.add_argument("path", help="path of tags") parser_get.set_defaults(func=tagger_get) # tagger find parser_find = subparsers.add_parser("find", help="find paths that have tags") parser_find.add_argument("path", help="path to find tags") parser_find.add_argument("tags", nargs="+", help="tags to find") parser_find.add_argument("-t", "--top", help="only find top directories that have tags", action="store_true") parser_find.add_argument("-d", "--depth", type=int, help="depth of folder to search") parser_find.set_defaults(func=tagger_find) # tagger clear parser_clear = subparsers.add_parser("clear", help="clear path's tags") parser_clear.add_argument("path", help="path to clear tags") parser_clear.add_argument("-r", "--recursive", help="recursively clear tags", action="store_true") parser_clear.add_argument("-t", "--top", help="top only mode, valid if -r is given", action='store_true') parser_clear.add_argument("-d", "--depth", type=int, help="recursive depth, valid if -r is given") parser_clear.set_defaults(func=tagger_clear) # tagger merge parser_merge = subparsers.add_parser("merge", help="merge file with same tags to dest directory") parser_merge.add_argument("path", help="path to search for tags") parser_merge.add_argument( "dest_path", help="dest directory to save copy of files") parser_merge.add_argument("tags", nargs="+", help="tags to merge") parser_merge.set_defaults(func=tagger_merge) # tagger sync parser_sync = subparsers.add_parser("sync", help="synchronize tags, remove tags of non-existent files") parser_sync.add_argument("path", help="path to synchronize tags") parser_sync.add_argument("-r", '--recursive', help='recursively sync tags', action='store_true') parser_sync.add_argument('-t', '--top', help='sync only top files or folders', action='store_true') parser_sync.add_argument('-d', '--depth', type=int, help='depth to sync') parser_sync.set_defaults(func=tagger_sync) return parser def main(): try: parser = get_parser() args = parser.parse_args() if 'func' in args: args.func(args) else: parser.parse_args(['-h']) except KeyboardInterrupt: print("[-] Cancelled by user") if __name__ == "__main__": main()
37.3125
113
0.680306
0
0
0
0
0
0
0
0
1,220
0.291936
accc42d5e9065c564b32d000eff2da58f6c77fb1
4,487
py
Python
lang_vec/langvec_classify.py
cordercorder/NMT
cbc5ad010ce04da7a82f05ad1a3b6c16f8467266
[ "MIT" ]
6
2020-08-17T16:11:18.000Z
2020-10-10T12:26:03.000Z
lang_vec/langvec_classify.py
cordercorder/NMT
cbc5ad010ce04da7a82f05ad1a3b6c16f8467266
[ "MIT" ]
1
2020-07-11T16:42:55.000Z
2020-07-12T12:58:18.000Z
lang_vec/langvec_classify.py
cordercorder/NMT
cbc5ad010ce04da7a82f05ad1a3b6c16f8467266
[ "MIT" ]
2
2020-10-10T12:22:46.000Z
2021-11-12T15:39:27.000Z
import numpy as np import argparse import pycountry import lang2vec.lang2vec as l2v from sklearn import preprocessing, linear_model, svm from lang_vec.lang_vec_tools import load_lang_vec from utils.tools import read_data from typing import Dict def get_language_alpha3(language_code: str): if len(language_code) == 2: ans = pycountry.languages.get(alpha_2 = language_code) elif len(language_code) == 3: ans = pycountry.languages.get(alpha_3 = language_code) else: raise Exception("language code is not valid") if ans is not None: return ans.alpha_3 else: return "unknown language" def check_alpha3(alpha3: str): if alpha3 != "unknown language" and alpha3 in l2v.LANGUAGES: return True return False def train(args: argparse.Namespace, lang_vec: Dict, lang_alpha3: Dict, features: Dict): print("Classify method: {}".format(args.classify_method)) # fix order lang_alpha3 = {k: lang_alpha3[k] for k in sorted(lang_alpha3.keys())} X = [lang_vec[lang] for lang in lang_alpha3.keys()] train_data_rate = 0.7 score_dict = {} f = open(args.output_file_path, "w") for feat in range(len(features["CODE"])): Y = [features[lang][feat] if features[lang][feat] != "--" else -1 for lang in lang_alpha3.values()] idx = [i for i in range(len(Y)) if Y[i] != -1] train_set = np.array([[X[i], Y[i]] for i in idx]) if len(train_set) == 0: print("Feature {} is not available in all 101 languages!".format(features["CODE"][feat])) f.write("Feature {} is not available in all 101 languages!\n".format(features["CODE"][feat])) continue lab_enc = preprocessing.LabelEncoder() train_set[:, 1] = lab_enc.fit_transform(train_set[:, 1]) x_train = train_set[:int(len(train_set) * train_data_rate), 0] y_train = train_set[:int(len(train_set) * train_data_rate), 1] x_test = train_set[int(len(train_set) * train_data_rate):, 0] y_test = train_set[int(len(train_set) * train_data_rate):, 1] if len(x_train) == 0: print("Feature {} has no train data!".format(features["CODE"][feat])) f.write("Feature {} has no train data!\n".format(features["CODE"][feat])) continue if len(x_test) == 0: print("Feature {} has no test data!".format(features["CODE"][feat])) f.write("Feature {} has no test data!\n".format(features["CODE"][feat])) continue if np.all(y_train == y_train[0]): print("Feature {} has only one class!".format(features["CODE"][feat])) f.write("Feature {} has only one class!\n".format(features["CODE"][feat])) continue if args.classify_method == "logistic": logistic_model = linear_model.LogisticRegression(max_iter=3000) clf = logistic_model.fit(x_train.tolist(), y_train.tolist()) else: svm_model = svm.SVC() clf = svm_model.fit(x_train.tolist(), y_train.tolist()) score = clf.score(x_test.tolist(), y_test.tolist()) score_dict[features["CODE"][feat]] = score print("Feature {} accuracy is {}, train dataset has {} element, test dataset has {} element".format( features["CODE"][feat], score, len(x_train), len(x_test))) f.write("Feature {} accuracy is {}, train dataset has {} element, test dataset has {} element\n".format( features["CODE"][feat], score, len(x_train), len(x_test))) f.close() def main(): parser = argparse.ArgumentParser() parser.add_argument("--feature_name", required=True) parser.add_argument("--classify_method", required=True, choices=["svm", "logistic"]) parser.add_argument("--lang_vec_path", required=True) parser.add_argument("--lang_name_path", required=True) parser.add_argument("--output_file_path", required=True) args, unknown = parser.parse_known_args() lang_name = read_data(args.lang_name_path) lang_vec = load_lang_vec(args.lang_vec_path) lang_alpha3 = {} for lang in lang_name: alpha3 = get_language_alpha3(lang[1:-1]) if check_alpha3(alpha3): lang_alpha3[lang] = alpha3 feature_name = args.feature_name features = l2v.get_features(list(lang_alpha3.values()), feature_name, header=True) train(args, lang_vec, lang_alpha3, features) if __name__ == "__main__": main()
35.330709
112
0.639848
0
0
0
0
0
0
0
0
770
0.171607
accceb5e8bd513c5d20c432215e4eda1165c3144
258
py
Python
hooks/post_gen_project.py
KernelA/cookiecutter-python
f609b1e6156dcbcedb296a329c86ef11edbe1b14
[ "MIT" ]
null
null
null
hooks/post_gen_project.py
KernelA/cookiecutter-python
f609b1e6156dcbcedb296a329c86ef11edbe1b14
[ "MIT" ]
null
null
null
hooks/post_gen_project.py
KernelA/cookiecutter-python
f609b1e6156dcbcedb296a329c86ef11edbe1b14
[ "MIT" ]
null
null
null
import os FILES_TO_REMOVE = ["setup.cfg", "pyproject.toml", "setup.py"] if "{{ cookiecutter.as_package }}" == "no": for file in FILES_TO_REMOVE: os.remove(file) if "{{ cookiecutter.remove_test_script }}" == "yes": os.remove("test_log.py")
23.454545
61
0.643411
0
0
0
0
0
0
0
0
129
0.5
accdf2b0268088db6e9b6be629bf68b1c39b46da
620
py
Python
register/forms/misc.py
Ajuajmal/heroku
f23aad8c392a273caf0da39cedeec4746ded29dc
[ "0BSD" ]
null
null
null
register/forms/misc.py
Ajuajmal/heroku
f23aad8c392a273caf0da39cedeec4746ded29dc
[ "0BSD" ]
null
null
null
register/forms/misc.py
Ajuajmal/heroku
f23aad8c392a273caf0da39cedeec4746ded29dc
[ "0BSD" ]
null
null
null
from django import forms from crispy_forms.helper import FormHelper class MiscForm(forms.Form): notes = forms.CharField( label='Notes for the registration team', help_text='Anything else you need to describe. ' 'The registration team will see this. ' 'The bursaries team will not.', widget=forms.Textarea(attrs={'rows': 3}), required=False, ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.helper = FormHelper() self.helper.form_tag = False self.helper.include_media = False
29.52381
57
0.619355
548
0.883871
0
0
0
0
0
0
146
0.235484
accfc0c80b0cdf98d793310ffa5f537f88f2a21e
948
py
Python
Practice/script.py
NVombat/WebCam-Motion-Detector
591235f66b390ce956a5b7538c3a9308281e6c3a
[ "MIT" ]
2
2021-08-02T06:03:02.000Z
2022-03-22T17:24:08.000Z
Practice/script.py
NVombat/WebCam-Motion-Detector
591235f66b390ce956a5b7538c3a9308281e6c3a
[ "MIT" ]
null
null
null
Practice/script.py
NVombat/WebCam-Motion-Detector
591235f66b390ce956a5b7538c3a9308281e6c3a
[ "MIT" ]
null
null
null
#Import libraries import cv2 #Read image file into n dimensional numpy array #Read as greyscale image -> 0, Read as color image w/o transparency -> 1, Read as color image w transparency -> -1 img = cv2.imread("Practice/images/galaxy.jpg", 0) print(type(img)) print(img) #To know the number of rows and columns print(img.shape) #To know the number of dimensions print(img.ndim) #Resize image to certain dimensions (width, height) #resized_img = cv2.resize(img,(500,900)) #Resize the image to half the original size using shape attribute resized_img = cv2.resize(img,(int(img.shape[1]/2), int(img.shape[0]/2))) #Display image on screen #cv2.imshow("Galaxy", img) cv2.imshow("Galaxy", resized_img) #Amount of time (in milliseconds) before window closes (0 -> closes window on key/button press) cv2.waitKey(0) #Remove all windows cv2.destroyAllWindows() #Write new image to jpg file cv2.imwrite("Practice/images/galaxy_resized.jpg", resized_img)
30.580645
114
0.754219
0
0
0
0
0
0
0
0
670
0.706751
acd027d5fe8415bf29567d076248cd34dc5cdc41
94
py
Python
tests/basic/tuple.py
MoonStarCZW/py2rb
89b247717d33d780fbf143e1583bfe9252984da4
[ "MIT" ]
null
null
null
tests/basic/tuple.py
MoonStarCZW/py2rb
89b247717d33d780fbf143e1583bfe9252984da4
[ "MIT" ]
null
null
null
tests/basic/tuple.py
MoonStarCZW/py2rb
89b247717d33d780fbf143e1583bfe9252984da4
[ "MIT" ]
null
null
null
tup = ('a','b',1,2,3) print(tup[0]) print(tup[1]) print(tup[2]) print(tup[3]) print(tup[4])
10.444444
21
0.56383
0
0
0
0
0
0
0
0
6
0.06383
acd101863dd3fde86f97b4f8931de856caab8a8b
101
py
Python
frameworks/Python/aiohttp/app/gunicorn.py
xsoheilalizadeh/FrameworkBenchmarks
855527008f7488e4fd508d1e72dfa9953874a2c6
[ "BSD-3-Clause" ]
5
2015-11-05T12:57:32.000Z
2021-02-24T05:03:05.000Z
frameworks/Python/aiohttp/app/gunicorn.py
xsoheilalizadeh/FrameworkBenchmarks
855527008f7488e4fd508d1e72dfa9953874a2c6
[ "BSD-3-Clause" ]
122
2021-04-16T02:04:24.000Z
2022-01-13T20:17:26.000Z
frameworks/Python/aiohttp/app/gunicorn.py
xsoheilalizadeh/FrameworkBenchmarks
855527008f7488e4fd508d1e72dfa9953874a2c6
[ "BSD-3-Clause" ]
2
2018-03-22T00:37:28.000Z
2018-03-22T00:56:57.000Z
import asyncio from .main import create_app loop = asyncio.get_event_loop() app = create_app(loop)
14.428571
31
0.782178
0
0
0
0
0
0
0
0
0
0
acd1ea063ff8bdb5abed599c2d4b292cd60dcfca
16,419
py
Python
keymap/klc_to_asm.py
MJoergen/x16-rom-floating-point
8258beffec3faac42b68eeda22e8e3055fd64b21
[ "MIT" ]
1
2020-05-30T17:58:13.000Z
2020-05-30T17:58:13.000Z
keymap/klc_to_asm.py
MJoergen/x16-rom-floating-point
8258beffec3faac42b68eeda22e8e3055fd64b21
[ "MIT" ]
null
null
null
keymap/klc_to_asm.py
MJoergen/x16-rom-floating-point
8258beffec3faac42b68eeda22e8e3055fd64b21
[ "MIT" ]
null
null
null
import io, re, codecs, sys, os.path import pprint REG = 0 SHFT = 1 CTRL = 2 ALT = 4 ALTGR = 6 COMPRESSED_OUTPUT=1 def get_kbd_layout(base_filename, load_patch = False): filename_klc = base_filename filename_changes = base_filename + 'patch' f = io.open(filename_klc, mode="r", encoding="utf-8") lines = f.readlines() f.close() lines = [x.strip() for x in lines] if (load_patch and os.path.isfile(filename_changes)): f = io.open(filename_changes, mode="r", encoding="utf-8") lines_changes = f.readlines() f.close() lines_changes = [x.strip() for x in lines_changes] else: lines_changes = [] keywords = [ 'KBD', 'COPYRIGHT', 'COMPANY', 'LOCALENAME', 'LOCALEID', 'VERSION', 'SHIFTSTATE', 'LAYOUT', 'DEADKEY', 'KEYNAME', 'KEYNAME_EXT', 'KEYNAME_DEAD', 'DESCRIPTIONS', 'LANGUAGENAMES', 'ENDKBD' ] sections = [] section = [] while len(lines) > 0: while True: line = lines[0] lines = lines[1:] i = line.find('//') if i != -1: line = line[:i] line = line.rstrip() if len(line) == 0: continue fields = re.split(r'\t', line) while '' in fields: fields.remove('') break if fields[0] in keywords: if (len(section)) > 0: sections.append(section) section = [] section.append(fields) section_changes = [] while len(lines_changes) > 0: line = lines_changes[0] lines_changes = lines_changes[1:] i = line.find('//') if i != -1: line = line[:i] line = line.rstrip() if len(line) == 0: continue fields = re.split(r'\t', line) while '' in fields: fields.remove('') section_changes.append(fields) kbd_layout = {} for lines in sections: fields = lines[0] if fields[0] == 'KBD': kbd_layout['short_id'] = fields[1] kbd_layout['name'] = fields[2].replace('"', '') elif fields[0] == 'COPYRIGHT': kbd_layout['copyright'] = fields[1].replace('"', '') elif fields[0] == 'COMPANY': kbd_layout['company'] = fields[1] elif fields[0] == 'LOCALENAME': kbd_layout['localename'] = fields[1].replace('"', '') elif fields[0] == 'LOCALEID': kbd_layout['localeid'] = fields[1].replace('"', '') elif fields[0] == 'VERSION': kbd_layout['version'] = fields[1] elif fields[0] == 'SHIFTSTATE': shiftstates = [] for fields in lines[1:]: shiftstates.append(int(fields[0])) # The US layout does not use "Alt" *at all*. We add it, so that the # .klcpatch file can define keys with "Alt" in an extra column. if not ALT in shiftstates: shiftstates.append(ALT) kbd_layout['shiftstates'] = shiftstates elif fields[0] == 'LAYOUT': all_originally_reachable_characters = "" layout = {} line_number = 0 for fields in lines[1:] + section_changes: if fields[0] == '-1': # TODO: 807 has extension lines we don't support continue chars = {} i = 3 for shiftstate in shiftstates: if i > len(fields) - 1: break c = fields[i] if c != '-1': if len(c) > 1: c = chr(int(c[0:4], 16)) chars[shiftstate] = c if (line_number < len(lines[1:])): all_originally_reachable_characters += c i += 1 # TODO: c[4] == '@' -> dead key layout[int(fields[0], 16)] = { #'vk_name': 'VK_' + fields[1], #'cap': int(fields[2]), 'chars': chars } line_number += 1 kbd_layout['layout'] = layout kbd_layout['all_originally_reachable_characters'] = ''.join(sorted(all_originally_reachable_characters)) elif fields[0] == 'DEADKEY': # TODO pass elif fields[0] == 'KEYNAME': # TODO pass elif fields[0] == 'KEYNAME_EXT': # TODO pass elif fields[0] == 'KEYNAME_DEAD': # TODO pass elif fields[0] == 'DESCRIPTIONS': # TODO pass elif fields[0] == 'LANGUAGENAMES': # TODO pass return kbd_layout def ps2_set2_code_from_hid_code(c): mapping = { 0x01: 0x76, 0x02: 0x16, 0x03: 0x1E, 0x04: 0x26, 0x05: 0x25, 0x06: 0x2E, 0x07: 0x36, 0x08: 0x3D, 0x09: 0x3E, 0x0A: 0x46, 0x0B: 0x45, 0x0C: 0x4E, 0x0D: 0x55, 0x0E: 0x66, 0x0F: 0x0D, 0x10: 0x15, 0x11: 0x1D, 0x12: 0x24, 0x13: 0x2D, 0x14: 0x2C, 0x15: 0x35, 0x16: 0x3C, 0x17: 0x43, 0x18: 0x44, 0x19: 0x4D, 0x1A: 0x54, 0x1B: 0x5B, 0x1C: 0x5A, 0x1E: 0x1C, 0x1F: 0x1B, 0x20: 0x23, 0x21: 0x2B, 0x22: 0x34, 0x23: 0x33, 0x24: 0x3B, 0x25: 0x42, 0x26: 0x4B, 0x27: 0x4C, 0x28: 0x52, 0x29: 0x0E, 0x2B: 0x5D, 0x2B: 0x5D, 0x2C: 0x1A, 0x2D: 0x22, 0x2E: 0x21, 0x2F: 0x2A, 0x30: 0x32, 0x31: 0x31, 0x32: 0x3A, 0x33: 0x41, 0x34: 0x49, 0x35: 0x4A, 0x39: 0x29, 0x3A: 0x58, 0x3B: 0x05, 0x3C: 0x06, 0x3D: 0x04, 0x3E: 0x0C, 0x3F: 0x03, 0x40: 0x0B, 0x41: 0x83, 0x42: 0x0A, 0x43: 0x01, 0x44: 0x09, 0x53: 0x71, 0x56: 0x61, 0x57: 0x78, 0x58: 0x07 } if c in mapping: return mapping[c] else: return 0 def petscii_from_unicode(c): if ord(c) >= 0xf800 and ord(c) <= 0xf8ff: # PETSCII code encoded into private Unicode area return chr(ord(c) - 0xf800) if c == '\\' or c == '|' or c == '_' or c == '{' or c == '}' or c == '~': return chr(0) if ord(c) == 0xa3: # '£' return chr(0x5c) if ord(c) == 0x2190: # '←' return chr(0x5f) if ord(c) == 0x03c0: # 'π' return chr(0xde) if ord(c) >= ord('A') and ord(c) <= ord('Z'): return chr(ord(c) + 0x80) if ord(c) >= ord('a') and ord(c) <= ord('z'): return chr(ord(c) - 0x20) if ord(c) < 0x20 and c != '\r': return chr(0) if ord(c) >= 0x7e: return chr(0) return c def latin15_from_unicode(c): # Latin-15 and 8 bit Unicode are almost the same if ord(c) <= 0xff: # Latin-1 characters (i.e. 8 bit Unicode) not included in Latin-15 if ord(c) in [0xA4, 0xA6, 0xA8, 0xB4, 0xB8, 0xBC, 0xBD, 0xBE]: #'¤¦¨´¸¼½¾' return chr(0); else: return c # Latin-15 supports some other Unicode characters latin15_from_unicode_tab = { 0x20ac: 0xa4, # '€' 0x160: 0xa6, # 'Š' 0x161: 0xa8, # 'š' 0x17d: 0xb4, # 'Ž' 0x17e: 0xb8, # 'ž' 0x152: 0xbc, # 'Œ' 0x153: 0xbd, # 'œ' 0x178: 0xbe # 'Ÿ' } if ord(c) in latin15_from_unicode_tab: return chr(latin15_from_unicode_tab[ord(c)]) # all other characters are unsupported return chr(0) def unicode_from_petscii(c): # only does the minumum if ord(c) == 0x5c: # '£' return chr(0xa3) if ord(c) == 0x5f: # '←' return chr(0x2190) if ord(c) == 0xde: # 'π' return chr(0x03c0) return c # constants # a string with all printable 7-bit PETSCII characters all_petscii_chars = " !\"#$%&'()*+,-./0123456789:;<=>?@" for c in "abcdefghijklmnopqrstuvwxyz": all_petscii_chars += chr(ord(c) - 0x20) all_petscii_chars += "[\]^_ABCDEFGHIJKLMNOPQRSTUVWXYZ" all_petscii_chars += "\xde" # π # all PETSCII control codes and their descriptions control_codes = { 0x03: 'RUN/STOP', 0x05: 'WHITE', 0x08: 'SHIFT_DISABLE', 0x09: 'SHIFT_ENABLE', 0x0d: 'CR', 0x0e: 'TEXT_MODE', 0x11: 'CURSOR_DOWN', 0x12: 'REVERSE_ON', 0x13: 'HOME', 0x14: 'DEL', 0x1c: 'RED', 0x1d: 'CURSOR_RIGHT', 0x1e: 'GREEN', 0x1f: 'BLUE', 0x81: 'ORANGE', 0x85: 'F1', 0x86: 'F3', 0x87: 'F5', 0x88: 'F7', 0x89: 'F2', 0x8a: 'F4', 0x8b: 'F6', 0x8c: 'F8', 0x8d: 'SHIFT+CR', 0x8e: 'GRAPHICS', 0x90: 'BLACK', 0x91: 'CURSOR_UP', 0x92: 'REVERSE_OFF', 0x93: 'CLR', 0x94: 'INSERT', 0x95: 'BROWN', 0x96: 'LIGHT_RED', 0x97: 'DARK_GRAY', 0x98: 'MIDDLE_GRAY', 0x99: 'LIGHT_GREEN', 0x9a: 'LIGHT_BLUE', 0x9b: 'LIGHT_GRAY', 0x9c: 'PURPLE', 0x9d: 'CURSOR_LEFT', 0x9e: 'YELLOW', 0x9f: 'CYAN', 0xa0: 'SHIFT+SPACE', } all_petscii_codes = "" for c in control_codes.keys(): all_petscii_codes += chr(c) # all printable PETSCII graphics characters all_petscii_graphs = "" for c in range(0xa1, 0xc0): all_petscii_graphs += chr(c) # the following PETSCII control codes do not have to be reachable # through the keyboard all_petscii_codes_ok_if_missing = [ chr(0x1d), # CURSOR_RIGHT - covered by cursor keys chr(0x8e), # GRAPHICS - not covered on C64 either chr(0x91), # CURSOR_UP - covered by cursor keys chr(0x93), # CLR - convered by E0-prefixed key chr(0x9d), # CURSOR_LEFT - covered by cursor keys ] if len(sys.argv) >= 3 and sys.argv[2] == '-iso': iso_mode = True else: iso_mode = False load_patch = not iso_mode kbd_layout = get_kbd_layout(sys.argv[1], load_patch) layout = kbd_layout['layout'] shiftstates = kbd_layout['shiftstates'] keytab = {} for shiftstate in shiftstates: keytab[shiftstate] = [ '\0' ] * 128 # some layouts don't define Alt at all if not ALT in keytab: keytab[ALT] = [ '\0' ] * 128 # create PS/2 Code 2 -> PETSCII tables for hid_scancode in layout.keys(): ps2_scancode = ps2_set2_code_from_hid_code(hid_scancode) l = layout[hid_scancode]['chars'] for shiftstate in keytab.keys(): if shiftstate in l: c_unicode = l[shiftstate] if iso_mode: keytab[shiftstate][ps2_scancode] = latin15_from_unicode(c_unicode) else: keytab[shiftstate][ps2_scancode] = petscii_from_unicode(c_unicode) # stamp in f-keys independent of shiftstate for shiftstate in keytab.keys(): keytab[shiftstate][5] = chr(0x85) # f1 keytab[shiftstate][6] = chr(0x89) # f2 keytab[shiftstate][4] = chr(0x86) # f3 keytab[shiftstate][12] = chr(0x8a) # f4 keytab[shiftstate][3] = chr(0x87) # f5 keytab[shiftstate][11] = chr(0x8b) # f6 keytab[shiftstate][2] = chr(0x88) # f7 keytab[shiftstate][10] = chr(0x8c) # f8 # C65 additions keytab[shiftstate][1] = chr(0x10) # f9 keytab[shiftstate][9] = chr(0x15) # f10 keytab[shiftstate][0x78] = chr(0x16) # f11 keytab[shiftstate][7] = chr(0x17) # f12 # * PS/2 keyboards don't have the C65 f13 (chr(0x19)) and f14 (chr(0x1a)) # -> we don't map them # * PS/2 keyboards don't have the C128/C65 "HELP" (chr(0x83)) # -> TODO # stamp in Ctrl/Alt color codes petscii_from_ctrl_scancode = [ # Ctrl (0x16, 0x90), # '1' (0x1e, 0x05), # '2' (0x26, 0x1c), # '3' (0x25, 0x9f), # '4' (0x2e, 0x9c), # '5' (0x36, 0x1e), # '6' (0x3d, 0x1f), # '7' (0x3e, 0x9e), # '8' (0x46, 0x12), # '9' REVERSE ON (0x45, 0x92), # '0' REVERSE OFF ] petscii_from_alt_scancode = [ # Alt (0x16, 0x81), # '1' (0x1e, 0x95), # '2' (0x26, 0x96), # '3' (0x25, 0x97), # '4' (0x2e, 0x98), # '5' (0x36, 0x99), # '6' (0x3d, 0x9a), # '7' (0x3e, 0x9b), # '8' ] for (scancode, petscii) in petscii_from_ctrl_scancode: if keytab[CTRL][scancode] == chr(0): # only if unassigned keytab[CTRL][scancode] = chr(petscii) for (scancode, petscii) in petscii_from_alt_scancode: if keytab[ALT][scancode] == chr(0): # only if unassigned keytab[ALT][scancode] = chr(petscii) # stamp in Alt graphic characters if not iso_mode: petscii_from_alt_scancode = [ (0x1c, 0xb0), # 'A' (0x32, 0xbf), # 'B' (0x21, 0xbc), # 'C' (0x23, 0xac), # 'D' (0x24, 0xb1), # 'E' (0x2b, 0xbb), # 'F' (0x34, 0xa5), # 'G' (0x33, 0xb4), # 'H' (0x43, 0xa2), # 'I' (0x3b, 0xb5), # 'J' (0x42, 0xa1), # 'K' (0x4b, 0xb6), # 'L' (0x3a, 0xa7), # 'M' (0x31, 0xaa), # 'N' (0x44, 0xb9), # 'O' (0x4d, 0xaf), # 'P' (0x15, 0xab), # 'Q' (0x2d, 0xb2), # 'R' (0x1b, 0xae), # 'S' (0x2c, 0xa3), # 'T' (0x3c, 0xb8), # 'U' (0x2a, 0xbe), # 'V' (0x1d, 0xb3), # 'W' (0x22, 0xbd), # 'X' (0x35, 0xb7), # 'Y' (0x1a, 0xad), # 'Z' ] for (scancode, petscii) in petscii_from_alt_scancode: if keytab[ALT][scancode] == chr(0): # only if unassigned keytab[ALT][scancode] = chr(petscii) # generate Ctrl codes for A-Z for i in range(0, len(keytab[REG])): c = keytab[REG][i] if iso_mode and ord(c) >= ord('a') and ord(c) <= ord('z'): c = chr(ord(c) - ord('a') + 1) elif not iso_mode and ord(c) >= ord('A') and ord(c) <= ord('Z'): c = chr(ord(c) - ord('A') + 1) else: c = None if c and keytab[CTRL][i] == chr(0): # only if unassigned keytab[CTRL][i] = c # stamp in backspace and TAB for shiftstate in keytab.keys(): if shiftstate == 0: keytab[shiftstate][0x66] = chr(0x14) # backspace keytab[shiftstate][0x0d] = chr(0x09) # TAB keytab[shiftstate][0x5a] = chr(0x0d) # CR keytab[shiftstate][0x29] = chr(0x20) # SPACE else: keytab[shiftstate][0x66] = chr(0x94) # insert keytab[shiftstate][0x0d] = chr(0x18) # shift-TAB keytab[shiftstate][0x5a] = chr(0x8d) # shift-CR keytab[shiftstate][0x29] = chr(0xA0) # shift-SPACE # analyze problems all_keytabs = keytab[REG] + keytab[SHFT] + keytab[CTRL] + keytab[ALT] if ALTGR in keytab: all_keytabs += keytab[ALTGR] petscii_chars_not_reachable = "" for c in all_petscii_chars: if not c in all_keytabs: petscii_chars_not_reachable += unicode_from_petscii(c) petscii_codes_not_reachable = "" for c in all_petscii_codes: if not c in all_keytabs: if not c in all_petscii_codes_ok_if_missing: petscii_codes_not_reachable += c petscii_graphs_not_reachable = "" for c in all_petscii_graphs: if not c in all_keytabs: petscii_graphs_not_reachable += c unicode_not_reachable = "" for c_unicode in kbd_layout['all_originally_reachable_characters']: if iso_mode: c_encoded = latin15_from_unicode(c_unicode) else: c_encoded = petscii_from_unicode(c_unicode) if (c_encoded == chr(0) or not c_encoded in all_keytabs) and not c_unicode in unicode_not_reachable: unicode_not_reachable += c_unicode petscii_chars_not_reachable = ''.join(sorted(petscii_chars_not_reachable)) petscii_codes_not_reachable = ''.join(sorted(petscii_codes_not_reachable)) petscii_graphs_not_reachable = ''.join(sorted(petscii_graphs_not_reachable)) unicode_not_reachable = ''.join(sorted(unicode_not_reachable)) # print name = kbd_layout['name'].replace(' - Custom', '') kbd_id = kbd_layout['short_id'].lower() print("; Name: " + name) print("; Locale: " + kbd_layout['localename']) print("; KLID: " + kbd_id) print(";") if len(petscii_chars_not_reachable) > 0 or len(petscii_codes_not_reachable) > 0 or len(petscii_graphs_not_reachable) > 0: print("; PETSCII characters reachable on a C64 keyboard that are not reachable with this layout:") if len(petscii_chars_not_reachable) > 0: print("; chars: " + pprint.pformat(petscii_chars_not_reachable)) if len(petscii_codes_not_reachable) > 0: print("; codes: ", end = '') for c in petscii_codes_not_reachable: if ord(c) in control_codes: print(control_codes[ord(c)] + ' ', end = '') else: print(hex(ord(c)) + ' ', end = '') print() if len(petscii_graphs_not_reachable) > 0: print("; graph: '", end = '') for c in petscii_graphs_not_reachable: print("\\x{0:02x}".format(ord(c)), end = '') print("'") if len(unicode_not_reachable) > 0: if iso_mode: print("; Unicode characters reachable with this layout on Windows but not covered by ISO-8859-15:") else: print("; Unicode characters reachable with this layout on Windows but not covered by PETSCII:") print("; '", end = '') for c in unicode_not_reachable: if ord(c) < 0x20: print("\\x{0:02x}".format(ord(c)), end = '') else: print(c, end = '') print("'") print() if iso_mode: print('.segment "IKBDMETA"\n') prefix = 'i' else: print('.segment "KBDMETA"\n') prefix = '' locale1 = kbd_layout['localename'][0:2].upper() locale2 = kbd_layout['localename'][3:5].upper() if locale1 != locale2: locale1 = kbd_layout['localename'].upper() if len(kbd_layout['localename']) != 5: sys.exit("unknown locale format: " + kbd_layout['localename']) print('\t.byte "' + locale1 + '"', end = '') for i in range(0, 6 - len(locale1)): print(", 0", end = '') print() for shiftstate in [SHFT, ALT, CTRL, ALTGR, REG]: if shiftstate == ALTGR and not ALTGR in keytab.keys(): print_shiftstate = ALT else: print_shiftstate = shiftstate print("\t.word {}kbtab_{}_{}".format(prefix, kbd_id, print_shiftstate), end = '') if shiftstate == REG: print() else: print("-13") print() if iso_mode: print('.segment "IKBDTABLES"\n') else: print('.segment "KBDTABLES"\n') for shiftstate in [REG, SHFT, CTRL, ALT, ALTGR]: if shiftstate == ALTGR and not ALTGR in keytab.keys(): continue print("{}kbtab_{}_{}: ; ".format(prefix, kbd_id, shiftstate), end = '') if shiftstate == 0: print('Unshifted', end='') if shiftstate & 1: print('Shft ', end='') if shiftstate & 6 == 6: print('AltGr ', end='') else: if shiftstate & 2: print('Ctrl ', end='') if shiftstate & 4: print('Alt ', end='') if COMPRESSED_OUTPUT == 1 and shiftstate != REG: start = 13 end = 104 else: start = 0 end = 128 for i in range(start, end): if i == start or i & 7 == 0: print() print('\t.byte ', end='') c = keytab[shiftstate][i] if ord(c) >= 0x20 and ord(c) <= 0x7e: print("'{}'".format(c), end = '') else: print("${:02x}".format(ord(c)), end = '') if i & 7 != 7: print(',', end = '') print()
28.90669
829
0.637859
0
0
0
0
0
0
0
0
3,958
0.240681
acd3f396f98f04bb660c487afcf7cf999a2fe26c
1,219
py
Python
src_py/testudpvilistus.py
paulharter/biofeed
fa83e5dcec568d1cd7350b2047c9b91891d1623e
[ "MIT" ]
null
null
null
src_py/testudpvilistus.py
paulharter/biofeed
fa83e5dcec568d1cd7350b2047c9b91891d1623e
[ "MIT" ]
null
null
null
src_py/testudpvilistus.py
paulharter/biofeed
fa83e5dcec568d1cd7350b2047c9b91891d1623e
[ "MIT" ]
null
null
null
import socket,time tcpSock=socket.socket(socket.AF_INET,socket.SOCK_STREAM) print "try connect",tcpSock tcpSock.connect(('169.254.1.1',2000)) print "connected - wait for response" print tcpSock.recv(8) time.sleep(0.1) tcpSock.send("$$$") time.sleep(0.5) print tcpSock.recv(1024) print "Param set" tcpSock.send("set com time 0\r") # set maximum wait between buffers tcpSock.send("set com size 140\r") # set buffer size tcpSock.send("set ip flags 3\r") # turn off TCP retries tcpSock.send("set ip host 169.254.1.10\r") # turn off TCP retries tcpSock.send("set ip remote 49990\r") # turn off TCP retries tcpSock.send("set sys autosleep 0\r") # turn off TCP retries tcpSock.send("set ip proto 3\r") # turn off TCP retries tcpSock.send("exit\r") tcpSock.send("RING\n") tcpSock.send("RING\n") tcpSock.close() HOST = '' # Symbolic name meaning all available interfaces PORT = 49990 # Arbitrary non-privileged port s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.bind((HOST,PORT)) s.settimeout(500) count=0 print 'UDP Listening on port ',PORT while 1: count=count+1 data,address=s.recvfrom(1024) print address,len(data),count print "end"
32.078947
75
0.694011
0
0
0
0
0
0
0
0
525
0.430681
acd5badd49e6f717d0eac28b84d7e3b325d5d993
20,582
py
Python
returns/context/requires_context_result.py
MichaelAOlson/returns
56e0df46846890b1c33dd26216086f94269292ad
[ "BSD-2-Clause" ]
1
2020-12-19T07:34:19.000Z
2020-12-19T07:34:19.000Z
returns/context/requires_context_result.py
MichaelAOlson/returns
56e0df46846890b1c33dd26216086f94269292ad
[ "BSD-2-Clause" ]
null
null
null
returns/context/requires_context_result.py
MichaelAOlson/returns
56e0df46846890b1c33dd26216086f94269292ad
[ "BSD-2-Clause" ]
null
null
null
from __future__ import annotations from typing import TYPE_CHECKING, Any, Callable, ClassVar, TypeVar from typing_extensions import final from returns.context import NoDeps from returns.interfaces.specific import reader_result from returns.primitives.container import BaseContainer from returns.primitives.hkt import Kind3, SupportsKind3, dekind from returns.result import Failure, Result, Success if TYPE_CHECKING: from returns.context.requires_context import RequiresContext # Context: _EnvType = TypeVar('_EnvType', contravariant=True) _NewEnvType = TypeVar('_NewEnvType') # Result: _ValueType = TypeVar('_ValueType', covariant=True) _NewValueType = TypeVar('_NewValueType') _ErrorType = TypeVar('_ErrorType', covariant=True) _NewErrorType = TypeVar('_NewErrorType') # Helpers: _FirstType = TypeVar('_FirstType') @final class RequiresContextResult( BaseContainer, SupportsKind3['RequiresContextResult', _ValueType, _ErrorType, _EnvType], reader_result.ReaderResultBasedN[_ValueType, _ErrorType, _EnvType], ): """ The ``RequiresContextResult`` combinator. See :class:`returns.context.requires_context.RequiresContext` for more docs. This is just a handy wrapper around ``RequiresContext[Result[a, b], env]`` which represents a context-dependent pure operation that might fail and return :class:`returns.result.Result`. It has several important differences from the regular ``Result`` classes. It does not have ``Success`` and ``Failure`` subclasses. Because, the computation is not yet performed. And we cannot know the type in advance. So, this is a thin wrapper, without any changes in logic. Why do we need this wrapper? That's just for better usability! .. code:: python >>> from returns.context import RequiresContext >>> from returns.result import Success, Result >>> def function(arg: int) -> Result[int, str]: ... return Success(arg + 1) >>> # Without wrapper: >>> assert RequiresContext.from_value(Success(1)).map( ... lambda result: result.bind(function), ... )(...) == Success(2) >>> # With wrapper: >>> assert RequiresContextResult.from_value(1).bind_result( ... function, ... )(...) == Success(2) This way ``RequiresContextResult`` allows to simply work with: - raw values and pure functions - ``RequiresContext`` values and pure functions returning it - ``Result`` and functions returning it .. rubric:: Important implementation details Due it is meaning, ``RequiresContextResult`` cannot have ``Success`` and ``Failure`` subclasses. We only have just one type. That's by design. Different converters are also not supported for this type. Use converters inside the ``RequiresContext`` context, not outside. See also: - https://dev.to/gcanti/getting-started-with-fp-ts-reader-1ie5 - https://en.wikipedia.org/wiki/Lazy_evaluation - https://bit.ly/2R8l4WK - https://bit.ly/2RwP4fp """ #: This field has an extra 'RequiresContext' just because `mypy` needs it. _inner_value: Callable[ [RequiresContextResult, _EnvType], Result[_ValueType, _ErrorType], ] #: A convenient placeholder to call methods created by `.from_value()`. no_args: ClassVar[NoDeps] = object() def __init__( self, inner_value: Callable[[_EnvType], Result[_ValueType, _ErrorType]], ) -> None: """ Public constructor for this type. Also required for typing. Only allows functions of kind ``* -> *`` and returning :class:`returns.result.Result` instances. .. code:: python >>> from returns.context import RequiresContextResult >>> from returns.result import Success >>> str(RequiresContextResult(lambda deps: Success(deps + 1))) '<RequiresContextResult: <function <lambda> at ...>>' """ super().__init__(inner_value) def __call__(self, deps: _EnvType) -> Result[_ValueType, _ErrorType]: """ Evaluates the wrapped function. .. code:: python >>> from returns.context import RequiresContextResult >>> from returns.result import Success >>> def first(lg: bool) -> RequiresContextResult[int, str, float]: ... # `deps` has `float` type here: ... return RequiresContextResult( ... lambda deps: Success(deps if lg else -deps), ... ) >>> instance = first(False) >>> assert instance(3.5) == Success(-3.5) In other things, it is a regular Python magic method. """ return self._inner_value(deps) def swap(self) -> RequiresContextResult[_ErrorType, _ValueType, _EnvType]: """ Swaps value and error types. So, values become errors and errors become values. It is useful when you have to work with errors a lot. And since we have a lot of ``.bind_`` related methods and only a single ``.lash`` - it is easier to work with values. .. code:: python >>> from returns.context import RequiresContextResult >>> from returns.result import Failure, Success >>> success = RequiresContextResult.from_value(1) >>> failure = RequiresContextResult.from_failure(1) >>> assert success.swap()(...) == Failure(1) >>> assert failure.swap()(...) == Success(1) """ return RequiresContextResult(lambda deps: self(deps).swap()) def map( # noqa: WPS125 self, function: Callable[[_ValueType], _NewValueType], ) -> RequiresContextResult[_NewValueType, _ErrorType, _EnvType]: """ Composes successful container with a pure function. .. code:: python >>> from returns.context import RequiresContextResult >>> from returns.result import Success, Failure >>> assert RequiresContextResult.from_value(1).map( ... lambda x: x + 1, ... )(...) == Success(2) >>> assert RequiresContextResult.from_failure(1).map( ... lambda x: x + 1, ... )(...) == Failure(1) """ return RequiresContextResult(lambda deps: self(deps).map(function)) def apply( self, container: Kind3[ RequiresContextResult, Callable[[_ValueType], _NewValueType], _ErrorType, _EnvType, ], ) -> RequiresContextResult[_NewValueType, _ErrorType, _EnvType]: """ Calls a wrapped function in a container on this container. .. code:: python >>> from returns.context import RequiresContextResult >>> from returns.result import Success, Failure, Result >>> def transform(arg: str) -> str: ... return arg + 'b' >>> assert RequiresContextResult.from_value('a').apply( ... RequiresContextResult.from_value(transform), ... )(...) == Success('ab') >>> assert RequiresContextResult.from_failure('a').apply( ... RequiresContextResult.from_value(transform), ... )(...) == Failure('a') >>> assert isinstance(RequiresContextResult.from_value('a').apply( ... RequiresContextResult.from_failure(transform), ... )(...), Result.failure_type) is True """ return RequiresContextResult( lambda deps: self(deps).apply(dekind(container)(deps)), ) def bind( self, function: Callable[ [_ValueType], Kind3[ RequiresContextResult, _NewValueType, _ErrorType, _EnvType, ], ], ) -> RequiresContextResult[_NewValueType, _ErrorType, _EnvType]: """ Composes this container with a function returning the same type. .. code:: python >>> from returns.context import RequiresContextResult >>> from returns.result import Success, Failure >>> def first(lg: bool) -> RequiresContextResult[int, int, float]: ... # `deps` has `float` type here: ... return RequiresContextResult( ... lambda deps: Success(deps) if lg else Failure(-deps), ... ) >>> def second( ... number: int, ... ) -> RequiresContextResult[str, int, float]: ... # `deps` has `float` type here: ... return RequiresContextResult( ... lambda deps: Success('>=' if number >= deps else '<'), ... ) >>> assert first(True).bind(second)(1) == Success('>=') >>> assert first(False).bind(second)(2) == Failure(-2) """ return RequiresContextResult( lambda deps: self(deps).bind( lambda inner: function(inner)(deps), # type: ignore ), ) #: Alias for `bind_context_result` method, it is the same as `bind` here. bind_context_result = bind def bind_result( self, function: Callable[[_ValueType], Result[_NewValueType, _ErrorType]], ) -> RequiresContextResult[_NewValueType, _ErrorType, _EnvType]: """ Binds ``Result`` returning function to current container. .. code:: python >>> from returns.context import RequiresContextResult >>> from returns.result import Success, Failure, Result >>> def function(num: int) -> Result[str, int]: ... return Success(num + 1) if num > 0 else Failure('<0') >>> assert RequiresContextResult.from_value(1).bind_result( ... function, ... )(RequiresContextResult.no_args) == Success(2) >>> assert RequiresContextResult.from_value(0).bind_result( ... function, ... )(RequiresContextResult.no_args) == Failure('<0') >>> assert RequiresContextResult.from_failure(':(').bind_result( ... function, ... )(RequiresContextResult.no_args) == Failure(':(') """ return RequiresContextResult(lambda deps: self(deps).bind(function)) def bind_context( self, function: Callable[ [_ValueType], 'RequiresContext[_NewValueType, _EnvType]', ], ) -> RequiresContextResult[_NewValueType, _ErrorType, _EnvType]: """ Binds ``RequiresContext`` returning function to current container. .. code:: python >>> from returns.context import RequiresContext >>> from returns.result import Success, Failure >>> def function(arg: int) -> RequiresContext[int, str]: ... return RequiresContext(lambda deps: len(deps) + arg) >>> assert function(2)('abc') == 5 >>> assert RequiresContextResult.from_value(2).bind_context( ... function, ... )('abc') == Success(5) >>> assert RequiresContextResult.from_failure(2).bind_context( ... function, ... )('abc') == Failure(2) """ return RequiresContextResult( lambda deps: self(deps).map( lambda inner: function(inner)(deps), # type: ignore ), ) def alt( self, function: Callable[[_ErrorType], _NewErrorType], ) -> RequiresContextResult[_ValueType, _NewErrorType, _EnvType]: """ Composes failed container with a pure function. .. code:: python >>> from returns.context import RequiresContextResult >>> from returns.result import Success, Failure >>> assert RequiresContextResult.from_value(1).alt( ... lambda x: x + 1, ... )(...) == Success(1) >>> assert RequiresContextResult.from_failure(1).alt( ... lambda x: x + 1, ... )(...) == Failure(2) """ return RequiresContextResult(lambda deps: self(deps).alt(function)) def lash( self, function: Callable[ [_ErrorType], Kind3[ RequiresContextResult, _ValueType, _NewErrorType, _EnvType, ], ], ) -> RequiresContextResult[_ValueType, _NewErrorType, _EnvType]: """ Composes this container with a function returning the same type. .. code:: python >>> from returns.context import RequiresContextResult >>> from returns.result import Success, Failure >>> def lashable(arg: str) -> RequiresContextResult[str, str, str]: ... if len(arg) > 1: ... return RequiresContextResult( ... lambda deps: Success(deps + arg), ... ) ... return RequiresContextResult( ... lambda deps: Failure(arg + deps), ... ) >>> assert RequiresContextResult.from_value('a').lash( ... lashable, ... )('c') == Success('a') >>> assert RequiresContextResult.from_failure('a').lash( ... lashable, ... )('c') == Failure('ac') >>> assert RequiresContextResult.from_failure('aa').lash( ... lashable, ... )('b') == Success('baa') """ return RequiresContextResult( lambda deps: self(deps).lash( lambda inner: function(inner)(deps), # type: ignore ), ) def modify_env( self, function: Callable[[_NewEnvType], _EnvType], ) -> RequiresContextResult[_ValueType, _ErrorType, _NewEnvType]: """ Allows to modify the environment type. .. code:: python >>> from returns.context import RequiresContextResultE >>> from returns.result import Success, safe >>> def div(arg: int) -> RequiresContextResultE[float, int]: ... return RequiresContextResultE( ... safe(lambda deps: arg / deps), ... ) >>> assert div(3).modify_env(int)('2') == Success(1.5) >>> assert div(3).modify_env(int)('0').failure() """ return RequiresContextResult(lambda deps: self(function(deps))) @classmethod def ask(cls) -> RequiresContextResult[_EnvType, _ErrorType, _EnvType]: """ Is used to get the current dependencies inside the call stack. Similar to :meth:`returns.context.requires_context.RequiresContext.ask`, but returns ``Result`` instead of a regular value. Please, refer to the docs there to learn how to use it. One important note that is worth duplicating here: you might need to provide ``_EnvType`` explicitly, so ``mypy`` will know about it statically. .. code:: python >>> from returns.context import RequiresContextResultE >>> from returns.result import Success >>> assert RequiresContextResultE[int, int].ask().map( ... str, ... )(1) == Success('1') """ return RequiresContextResult(Success) @classmethod def from_result( cls, inner_value: Result[_NewValueType, _NewErrorType], ) -> RequiresContextResult[_NewValueType, _NewErrorType, NoDeps]: """ Creates new container with ``Result`` as a unit value. .. code:: python >>> from returns.context import RequiresContextResult >>> from returns.result import Success, Failure >>> deps = RequiresContextResult.no_args >>> assert RequiresContextResult.from_result( ... Success(1), ... )(deps) == Success(1) >>> assert RequiresContextResult.from_result( ... Failure(1), ... )(deps) == Failure(1) """ return RequiresContextResult(lambda _: inner_value) @classmethod def from_typecast( cls, inner_value: 'RequiresContext[Result[_NewValueType, _NewErrorType], _EnvType]', ) -> RequiresContextResult[_NewValueType, _NewErrorType, _EnvType]: """ You might end up with ``RequiresContext[Result[...]]`` as a value. This method is designed to turn it into ``RequiresContextResult``. It will save all the typing information. It is just more useful! .. code:: python >>> from returns.context import RequiresContext >>> from returns.result import Success, Failure >>> assert RequiresContextResult.from_typecast( ... RequiresContext.from_value(Success(1)), ... )(RequiresContextResult.no_args) == Success(1) >>> assert RequiresContextResult.from_typecast( ... RequiresContext.from_value(Failure(1)), ... )(RequiresContextResult.no_args) == Failure(1) """ return RequiresContextResult(inner_value) @classmethod def from_context( cls, inner_value: 'RequiresContext[_NewValueType, _NewEnvType]', ) -> RequiresContextResult[_NewValueType, Any, _NewEnvType]: """ Creates new container from ``RequiresContext`` as a success unit. .. code:: python >>> from returns.context import RequiresContext >>> from returns.result import Success >>> assert RequiresContextResult.from_context( ... RequiresContext.from_value(1), ... )(...) == Success(1) """ return RequiresContextResult(lambda deps: Success(inner_value(deps))) @classmethod def from_failed_context( cls, inner_value: 'RequiresContext[_NewValueType, _NewEnvType]', ) -> RequiresContextResult[Any, _NewValueType, _NewEnvType]: """ Creates new container from ``RequiresContext`` as a failure unit. .. code:: python >>> from returns.context import RequiresContext >>> from returns.result import Failure >>> assert RequiresContextResult.from_failed_context( ... RequiresContext.from_value(1), ... )(...) == Failure(1) """ return RequiresContextResult(lambda deps: Failure(inner_value(deps))) @classmethod def from_result_context( cls, inner_value: RequiresContextResult[_NewValueType, _NewErrorType, _NewEnvType], ) -> RequiresContextResult[_NewValueType, _NewErrorType, _NewEnvType]: """ Creates ``RequiresContextResult`` from another instance of it. .. code:: python >>> from returns.context import ReaderResult >>> from returns.result import Success, Failure >>> assert ReaderResult.from_result_context( ... ReaderResult.from_value(1), ... )(...) == Success(1) >>> assert ReaderResult.from_result_context( ... ReaderResult.from_failure(1), ... )(...) == Failure(1) """ return inner_value @classmethod def from_value( cls, inner_value: _FirstType, ) -> RequiresContextResult[_FirstType, Any, NoDeps]: """ Creates new container with ``Success(inner_value)`` as a unit value. .. code:: python >>> from returns.context import RequiresContextResult >>> from returns.result import Success >>> assert RequiresContextResult.from_value(1)(...) == Success(1) """ return RequiresContextResult(lambda _: Success(inner_value)) @classmethod def from_failure( cls, inner_value: _FirstType, ) -> RequiresContextResult[Any, _FirstType, NoDeps]: """ Creates new container with ``Failure(inner_value)`` as a unit value. .. code:: python >>> from returns.context import RequiresContextResult >>> from returns.result import Failure >>> assert RequiresContextResult.from_failure(1)(...) == Failure(1) """ return RequiresContextResult(lambda _: Failure(inner_value)) # Aliases: #: Alias for a popular case when ``Result`` has ``Exception`` as error type. RequiresContextResultE = RequiresContextResult[ _ValueType, Exception, _EnvType, ] #: Alias to save you some typing. Uses original name from Haskell. ReaderResult = RequiresContextResult #: Alias to save you some typing. Has ``Exception`` as error type. ReaderResultE = RequiresContextResult[_ValueType, Exception, _EnvType]
33.304207
80
0.596492
19,323
0.93883
0
0
19,330
0.93917
0
0
14,372
0.69828
acd72b70de8a62feaf26ad1334f4a7736fd2cbd8
86,104
py
Python
save.py
brunnatorino/FEC_app
d9dec2ae0e4a3eb2f44976b1429596c657073a31
[ "MIT" ]
null
null
null
save.py
brunnatorino/FEC_app
d9dec2ae0e4a3eb2f44976b1429596c657073a31
[ "MIT" ]
null
null
null
save.py
brunnatorino/FEC_app
d9dec2ae0e4a3eb2f44976b1429596c657073a31
[ "MIT" ]
null
null
null
import pandas as pd from gooey import Gooey, GooeyParser import numpy as np import xlsxwriter import xlrd @Gooey(program_name="FEC FILE FOR FRANCE", required_cols= 4,default_size=(710, 700),navigation='TABBED', header_bg_color = '#48a7fa') def parse_args(): parser = GooeyParser() FilesGL = parser.add_argument_group('GL Posted Items') FilesGL.add_argument('GL', action='store', widget='FileChooser', help="Excel File From SAP G/L View: Normal Items") FileNOTE = parser.add_argument_group('Entry View Parked Items') FileNOTE.add_argument('Parked', action='store', widget='FileChooser', help="Excel File From SAP Entry View: Only Parked and Noted Items") choose = parser.add_argument_group('FEC Name') choose.add_argument('Choose_File_Name', action='store', help="File name with .xlsx in the end. Standard for FEC is 533080222FECYYYYMMDD", gooey_options={ 'validator': { 'test': 'user_input.endswith(".xlsx") == True', 'message': 'Must contain .xlsx at the end!' } }) args = parser.parse_args() return args def combine(file, file2): gl_df = pd.read_excel(file) parked_df = pd.read_excel(file2) numbers = gl_df['Document Number'].tolist() gl = gl_df.append(parked_df[~parked_df['Document Number'].isin(numbers)]) gl = gl.reset_index() return gl def transform(gl): gl['JournalCode'] = gl['Document Type'] gl['JournalLib'] = gl['Document Header Text'] gl['EcritureNum'] = gl['Document Number'] gl['EcritureDate'] = gl['Posting Date'] gl['CompteNum'] = gl['G/L Account'] gl['CompteLib'] = gl['G/L Account'] gl['CompAuxLib'] = gl['Offsetting acct no.'] gl['PieceRef'] = gl['Reference'] gl['EcritureLib'] = gl['Text'] gl['Amount'] = gl['Amount in local currency'] gl['MontantDevise'] = '' gl['Idevise'] = '' gl['PieceDate'] = gl['Document Date'] gl['ValidDate'] = gl['Entry Date'] gl['EcritureLet'] = gl['Assignment'] gl['DateLet'] = gl['Entry Date'] gl = gl.dropna(subset=['Amount']) gl.loc[gl["Amount"] < 0 ,'Credit'] = gl['Amount'] gl.loc[gl["Amount"] > 0 ,'Debit'] = gl['Amount'] gl.loc[gl["Debit"].isnull() ,'Debit'] = 0 gl.loc[gl["Credit"].isnull() ,'Credit'] = 0 gl.loc[gl["EcritureLet"].isnull(),'DateLet'] = '' gl.loc[gl["EcritureLet"].isnull(),'DateLet'] = '' del gl['Amount'] del gl['Amount in local currency'] accounts = pd.read_excel("mapping-accounts.xlsx") accounts1 = accounts[['G/L Account #','FrMap']] accounts2 = accounts[['G/L Account #','FEC Compliant']] accounts1 = accounts1.set_index('G/L Account #').to_dict()['FrMap'] accounts2 = accounts2.set_index('G/L Account #').to_dict()['FEC Compliant'] gl['CompteLib'] = gl['CompteLib'].replace(accounts2) gl['CompteNum'] = (gl['CompteNum'].map(accounts1).astype('Int64').astype(str) + gl['CompteNum'].astype(str)) gl['CompteNum'] = gl['CompteNum'].str.replace('\.0$', '') journals = pd.read_excel("test128.xlsx") codes = pd.read_excel('mapping-journal.xlsx') journals = journals.set_index('DocHeader').to_dict()['JournalLib_FR'] codes = codes.set_index('JournalCode').to_dict()["JournalLib_FR"] gl.loc[gl["JournalLib"].isnull(),'JournalLib'] = gl["JournalCode"].map(str) gl['JournalLib'] = gl['JournalLib'].replace(journals) gl['JournalLib'] = gl['JournalLib'].replace(codes) vendors = pd.read_excel("Vendors1.xlsx") vendors = vendors.set_index('No').to_dict()['Name'] gl['CompAuxLib'] = gl['CompAuxLib'].map(vendors) gl['CompAuxNum'] = "F" + gl['CompAuxLib'] gl.loc[(~gl.CompAuxLib.isnull()) & (gl["EcritureLib"].isnull()),'EcritureLib'] = gl['JournalLib'].map(str) + " de " + gl['CompAuxLib'].map(str) gl.loc[(gl.CompAuxLib.isnull()) & (gl["EcritureLib"].isnull()),'EcritureLib'] = gl['JournalLib'].map(str) + gl['EcritureNum'].map(str) gl['EcritureLib'] = gl['EcritureLib'].str.replace('^\d+', '') return gl def translate(gl): journals = pd.read_excel("test128.xlsx") codes = pd.read_excel('mapping-journal.xlsx') journals = journals.set_index('DocHeader').to_dict()['JournalLib_FR'] codes = codes.set_index('JournalCode').to_dict()["JournalLib_FR"] mapping_Valuation = {" Valuation on": " Évaluation sur"," Valuation on Reverse":" Évaluation sur Contre Passation", " Reverse Posting":" Contre-Passation d'Ecriture - Conversion de devise sur", " Translation Using":" Conversion de devise sur"} mapping_AA = {"Reclass from": " Reclassification de", "reclass from": " Reclassification de", "ZEE MEDIA":"ZEE MEDIA Campaignes Numériques", "TRAINING CONTRI. ER JANUARY '19":"FORMATION CONTRI. ER JANVIER' 19", "TAX FEES":"Taxes","SOCIAL SECURITY: URSSAF":"SÉCURITÉ SOCIALE: URSSAF","SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION", "SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE","RSM":"SERVICES DE PAIE RSM EF18","RSA":"SERVICES DE PAIE RSA OCT-JAN", "PRIVATE HEALTH":"SANTÉ PRIVÉE: ASSURANCE MÉDICALE-AXA/","PENSION: PENSION CONTRIBUTIONS - REUNICA":"PENSION: COTISATIONS DE RETRAITE-REUNICA","PENSION: LIFE & DISABILITY INSURANCE - R":"PENSION: ASSURANCE VIE & INVALIDITÉ-R", "PENSION JANUARY '19":"PENSION JANVIER '19", "ON CALL JANUARY '19":"Disponible Janvier'19", "NRE + PROJECT INITIATION FEES":"NRE + FRAIS D’INITIATION AU PROJET (PO 750003","NET PAY JANUARY '19":"Payeante Janvier'19","JANUARY'19":"JANVIER'19", "LUNCH VOUCHER- WITHHOLDING":"BON DÉJEUNER-RETENUE","HOLIDAY BONUS ACCRUAL FY18/19":"CUMUL DES PRIMES DE VACANCES EF18/19", "GROSS SALARY JANUARY '19":"SALAIRE BRUT JANVIER' 19","EMEA ACCRUAL P8FY19":"P8FY19 D’ACCUMULATION EMEA","COMMISSION RE-ACCRUAL":"COMMISSION RÉ-ACCUMULATION", "COMMISSION ACCRUAL":"COMMISSION D’ACCUMULATION","MARCH":"MARS","MAY":"MAI","APRIL":"AVRIL","AUDIT FEES":"HONORAIRES D’AUDIT", "UNSUBMITTED_UNPOSTED BOA ACCRUAL":"Accumulation BOA non soumise non exposée","UNASSIGNED CREDITCARD BOA ACCRUAL":"NON ASSIGNÉ CREDITCARD BOA ACCUMULATION ", "EMEA ACCRUAL":"ACCUMULATION EMEA","Exhibit Expenses":"Frais d'exposition","Hotel Tax":"Taxe hôtelière","Company Events":"Événements d'entreprise", "Public Transport":"Transport public", "Agency Booking Fees":"Frais de réservation d'agence","Working Meals (Employees Only)":"Repas de travail (employés seulement)", "Airfare":"Billet d'avion","Office Supplies":"Fournitures de bureau","Tolls":"Péages", "write off difference see e-mail attached":"radiation de la différence voir e-mail ci-joint", "Manual P/ment and double payment to be deduct":"P/ment manuel et double paiement à déduire","FX DIFFERENCE ON RSU":"DIFFERENCE FX SUR RSU", "DEFINED BENEFIT LIABILITY-TRUE UP":"RESPONSABILITÉ À PRESTATIONS DÉTERMINÉES-TRUE UP","EXTRA RELEASE FOR STORAGE REVERSED":"EXTRA LIBERATION POUR STOCKAGE CONTREPASSATION", "RECLASS BANK CHARGES TO CORRECT COST CEN":"RECLASSER LES FRAIS BANCAIRES POUR CORRIGER","PAYROLL INCOME TAXES":"IMPÔTS SUR LES SALAIRES", "TRAINING TAX TRUE UP":"TAXE DE FORMATION", "FX DIFFERENCE ON STOCK OPTION EXERCISES":"FX DIFFERENCE SUR LES EXERCICES D'OPTIONS STOCK", "Airline Frais":"Frais de Transport Aérien","Agency Booking Fees":"Frais de Réservation d'Agence","Computer Supplies":"Fournitures informatiques", "AUDIT FEES":"FRAIS D'AUDIT", "HOLIDAY BONUS ACCRUAL ":"ACCUMULATION DE BONUS DE VACANCES","TAX FEES":"FRAIS D'IMPÔT", "SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUITION À L’APPRENTISSAGE", "SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION", "TRAVEL COST":"FRAIS DE VOYAGE", "HOUSING TAX":"TAXE SUR LE LOGEMENT", "PAYROLL INCOME TAXES":"IMPÔTS SUR LE REVENU DE LA PAIE","INCOME TAX-PAS":"IMPÔT SUR LE REVENU-PAS", "IC SETTLEMENT":"Règlement Interentreprises", "VACATION TAKEN":"VACANCES PRISES", "SOCIAL SECURITY: APPR. CONTR.":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE", "POST OF AVRIL DEC IN CORRECT SIGN":"CORRECTION D'ECRITURE AVRIL DEC"} gl = gl.replace({"EcritureLib":mapping_Valuation}, regex=True) gl = gl.replace({"EcritureLib":mapping_AA}, regex=True) gl['EcritureLib'] = gl["EcritureLib"].str.replace('COST-PLUS', 'Revient Majoré') gl['EcritureLib'] = gl["EcritureLib"].str.replace('PRITVAE HEALTH: MEDICAL INSURANCE', 'SANTÉ PRIVÉE: ASSURANCE MÉDICALE') gl['EcritureLib'] = gl["EcritureLib"].str.replace('MEDICAL INSURANCE', 'ASSURANCE MÉDICALE') gl['EcritureLib'] = gl["EcritureLib"].str.replace('UNASSIGNED', 'NON ATTRIBUÉ') gl['EcritureLib'] = gl["EcritureLib"].str.replace('Payout', 'Paiement') gl['EcritureLib'] = gl["EcritureLib"].str.replace('FRINGE COST', 'COÛT MARGINAL') gl['EcritureLib'] = gl["EcritureLib"].str.replace('PROJECT INITIATION', 'LANCEMENT DU PROJET') gl['EcritureLib'] = gl["EcritureLib"].str.replace('ACCRUAL', 'ACCUMULATION') gl['EcritureLib'] = gl["EcritureLib"].str.replace('CREDITCARD', 'CARTE DE CRÉDIT') gl['EcritureLib'] = gl["EcritureLib"].str.replace('ACCR ', 'ACCUM ') gl['EcritureLib'] = gl["EcritureLib"].str.replace('VAT ', 'TVA ') gl['EcritureLib'] = gl["EcritureLib"].str.replace('SOCIAL SECURITY ', 'SÉCURITÉ SOCIALE') gl['EcritureLib'] = gl["EcritureLib"].str.replace('SEPTEMBER', 'SEPT') gl['EcritureLib'] = gl["EcritureLib"].str.replace('TAXBACK', 'Reboursement') gl['EcritureLib'] = gl["EcritureLib"].str.replace('REPORT', '') gl['EcritureLib'] = gl["EcritureLib"].str.replace("Reverse Posting", "Contre Passation d'Ecriture") gl['EcritureLib'] = gl["EcritureLib"].str.replace("BASE RENT", "Location Base") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Rent ", "Location ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("RENT ", "Location ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("CLEARING", "compensation ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("clearing", "compensation ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("BILLING CHARGES", "FRAIS DE FACTURATION ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("UNPAID", "NON PAYÉ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("PROPERTY TAX", "IMPÔT FONCIER ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Trans. Using", "Conversion sur") gl['EcritureLib'] = gl["EcritureLib"].str.replace("SALARIES", "Salaires") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Refund", "Remboursement") gl['EcritureLib'] = gl["EcritureLib"].str.replace("REFUND", "Remboursement") gl['EcritureLib'] = gl["EcritureLib"].str.replace("no invoice", "pas de facture") gl['EcritureLib'] = gl["EcritureLib"].str.replace("COST-PLUS SERVICE REVENUE", "Revenus de service Revient Majoré") gl['EcritureLib'] = gl["EcritureLib"].str.replace("SETTLEMENT", "RÈGLEMENT ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("PURCHASE", "ACHAT") gl['EcritureLib'] = gl["EcritureLib"].str.replace("NON-CP SETTLE", "RÈGLEMENT NON-CP") gl['EcritureLib'] = gl["EcritureLib"].str.replace("PAID ", " Payé ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("FEES ", "Frais") gl['EcritureLib'] = gl["EcritureLib"].str.replace("January", "Janvier") gl['EcritureLib'] = gl["EcritureLib"].str.replace("February", "Février") gl['EcritureLib'] = gl["EcritureLib"].str.replace("March", "Mars") gl['EcritureLib'] = gl["EcritureLib"].str.replace("April", "Avril") gl['EcritureLib'] = gl["EcritureLib"].str.replace("May", "Mai") gl['EcritureLib'] = gl["EcritureLib"].str.replace("June", "Juin") gl['EcritureLib'] = gl["EcritureLib"].str.replace("July", "Juillet") gl['EcritureLib'] = gl["EcritureLib"].str.replace("September", "Septembre") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Aug.", "Août") gl['EcritureLib'] = gl["EcritureLib"].str.replace("JANUARY", "Janvier") gl['EcritureLib'] = gl["EcritureLib"].str.replace("FEBRUARY", "Février") gl['EcritureLib'] = gl["EcritureLib"].str.replace("MARCH", "Mars") gl['EcritureLib'] = gl["EcritureLib"].str.replace("APRIL", "Avril") gl['EcritureLib'] = gl["EcritureLib"].str.replace("MAY", "Mai") gl['EcritureLib'] = gl["EcritureLib"].str.replace("JUNE", "Juin") gl['EcritureLib'] = gl["EcritureLib"].str.replace("JULY", "Juillet") gl['EcritureLib'] = gl["EcritureLib"].str.replace("SEPTEMBER", "Septembre") gl['EcritureLib'] = gl["EcritureLib"].str.replace("AUGUST.", "Août") gl['EcritureLib'] = gl["EcritureLib"].str.replace("NOVEMBER.", "Novembre") gl['EcritureLib'] = gl["EcritureLib"].str.replace("DECEMBER.", "Décembre") gl['EcritureLib'] = gl["EcritureLib"].str.replace("December", "Décembre") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Feb.", "Fév.") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Mar.", "Mars") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Apr.", "Avril") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Aug.", "Août") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Aug.", "Août") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Reverse ", "Contre-passation ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("INTEREST CHARGE", "CHARGE D'INTÉRÊT") gl['EcritureLib'] = gl["EcritureLib"].str.replace("-SICK LEAVE PAY", "-Paiement congé maladie") gl['EcritureLib'] = gl["EcritureLib"].str.replace("RECLASSEMENTIFICATION", "RECLASSIFICATION") gl['EcritureLib'] = gl["EcritureLib"].str.replace("INSTALMENT", "VERSEMENT") gl['EcritureLib'] = gl["EcritureLib"].str.replace("FIRST", "1ere") gl['EcritureLib'] = gl["EcritureLib"].str.replace("FINE LATE PAY.", "Amende pour retard de paiement") gl['EcritureLib'] = gl["EcritureLib"].str.replace("-PATERNITY PAY", "Indemnités de paternité") gl['EcritureLib'] = gl["EcritureLib"].str.replace("SOCIAL SECURITY:", "SÉCURITÉ SOCIALE:") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Trip from", "Voyage de:") gl['EcritureLib'] = gl["EcritureLib"].str.replace(" To ", " à") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Shipping", "Livraison") gl['EcritureLib'] = gl["EcritureLib"].str.replace("VOXEET INTEGRATION COSTS", "COÛTS D'INTÉGRATION DE VOXEET") gl['EcritureLib'] = gl["EcritureLib"].str.replace("INCOME TAX", "IMPÔT SUR LE REVENU") gl['EcritureLib'] = gl["EcritureLib"].str.replace('Rideshare', 'Covoiturage') gl['EcritureLib'] = gl["EcritureLib"].str.replace('Travel Meals', 'Repas de Travail') gl['EcritureLib'] = gl["EcritureLib"].str.replace('Fees', 'Frais') gl['EcritureLib'] = gl["EcritureLib"].str.replace('Phone', 'Téléphone') gl['EcritureLib'] = gl["EcritureLib"].str.replace("Books", "Abonnements") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Subcriptions", "Location Base") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Meals", "Repas") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Entertainment", "divertissement ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Third Party", "tiers ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Training Fees", "Frais d0 Formation") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Conferences/Tradeshows Registratio", "Conférences/Tradeshows Enregistrement") gl['EcritureLib'] = gl["EcritureLib"].str.replace("FOR", "POUR") gl['EcritureLib'] = gl["EcritureLib"].str.replace("ROUNDING", "ARRONDISSEMENT") gl['EcritureLib'] = gl["EcritureLib"].str.replace("STORAGE", "STOCKAGE") gl['EcritureLib'] = gl["EcritureLib"].str.replace("VACATION ACCURAL", "Vacances Accumulées") gl['EcritureLib'] = gl["EcritureLib"].str.replace("RECEIVABLE ", "Recevables") gl['EcritureLib'] = gl["EcritureLib"].str.replace("AFTER PAYOUT ", "APRÈS PAIEMENT") gl['EcritureLib'] = gl["EcritureLib"].str.replace("CLEAN UP ", "APUREMENT") gl['EcritureLib'] = gl["EcritureLib"].str.replace("EMPLOYEE TRAVEL INSUR ", "ASSURANCE DE VOYAGE DES EMPLOYÉS") gl['EcritureLib'] = gl["EcritureLib"].str.replace("CORRECTION OF", "CORRECTION DE") gl['EcritureLib'] = gl["EcritureLib"].str.replace("TAXES PAYROLL", "IMPÔTS SUR LA MASSE SALARIALE") gl['EcritureLib'] = gl["EcritureLib"].str.replace("ACCOUNT", "COMPTE") gl['EcritureLib'] = gl["EcritureLib"].str.replace("TAX", "Impôt") gl['EcritureLib'] = gl["EcritureLib"].str.replace("life disab", "Incapacité de vie") gl['EcritureLib'] = gl["EcritureLib"].str.replace("HOUSING TAX","TAXE D'HABITATION") gl['EcritureLib'] = gl["EcritureLib"].str.replace("GROSS SALARY","SALAIRE BRUT") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Cleaning Services","Nettoyage") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Freight","Fret") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Membership","adhésion") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Air cooling Maintenance","Entretien de refroidissement de l'air") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Power on Demand Platform","Plateforme d'energie à la demande") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Sanitaire room installation"," Installation de la salle sanitaire") gl['EcritureLib'] = gl["EcritureLib"].str.replace("subscription","abonnement") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Coffee supplies "," Fournitures de café") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Duty and Tax ","Devoir et fiscalité") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Electricity ","Electricité ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Lunch vouchers ","Bons déjeuner") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Security monitoring","Surveillance de la sécurité") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Water", "L'EAU") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Statutory Audit", "Audit statutaire") gl['EcritureLib'] = gl["EcritureLib"].str.replace(" Meeting room screen installation", "Installation de l'écran de la salle de réunion") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Water", "L'EAU") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Water", "L'EAU") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Tax Credit FY 2016", "Crédit d'impôt Exercice 2016") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Bank of America Merill Lynch-T&E statement","Déclaration de Merill Lynch") gl['EcritureLib'] = gl["EcritureLib"].str.replace("English Translation", "Traduction anglaise") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Office Rent", "Location de Bureau") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Annual Electrical Verification", "Vérification électrique annuelle ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Health costs ", "Coûts santé") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Unlimited-receipt and policy audit", "Vérification illimitée des reçus et audites") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Water fountain ", "Fontaine d'eau") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Quartely control visit", "Visite de contrôle trimestrielle") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Fire extinguishers annual check", "Vérification annuelle des extincteurs") gl['EcritureLib'] = gl["EcritureLib"].str.replace("showroom rent", "location de salle d'exposition") gl['EcritureLib'] = gl["EcritureLib"].str.replace("AND ACTUAL RECEIV","ET RECETTES RÉELLES") gl['EcritureLib'] = gl["EcritureLib"].str.replace("FILING","DÉPÔT") gl['EcritureLib'] = gl["EcritureLib"].str.replace("ORDERS","ORDRES") gl['EcritureLib'] = gl["EcritureLib"].str.replace("EXCLUDED -DUMMY CREDIT","EXCLU") gl['EcritureLib'] = gl["EcritureLib"].str.replace("RELARING TO","RELATIF À") gl['EcritureLib'] = gl["EcritureLib"].str.replace("CLEAN UP-","APUREMENT-") gl['EcritureLib'] = gl["EcritureLib"].str.replace("2ND INSTALLEMENT","2ème versement") gl['EcritureLib'] = gl["EcritureLib"].str.replace("DOUBLE PAYMENT","DOUBLE PAIEMENT") gl['EcritureLib'] = gl["EcritureLib"].str.replace("CLEAN UP-","APUREMENT-") gl['EcritureLib'] = gl["EcritureLib"].str.replace("DUTIES","DROITS") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Previous balance","Solde Précédent") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Cash fx","Cash FX") gl['EcritureLib'] = gl["EcritureLib"].str.replace("PAYROLL INCOME","REVENU DE PAIE") gl['EcritureLib'] = gl["EcritureLib"].str.replace("TELEPHONE CHARGES","Frais de Téléphone") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Clearing","Compensation") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Hotel","Hôtel") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Miscellaneous","Divers") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Corporate Card-Out-of-Poc","") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Traveling Dolby Empl","Employé itinérant de Dolby") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Tools-Equipment-Lab Supplies","Outils-Equipement-Fournitures de laboratoire") gl['EcritureLib'] = gl["EcritureLib"].str.replace("rounding","Arrondissement") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Building Supplies-Maintenance","Matériaux de construction-Entretien") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Expensed Furniture","Mobilier Dépensé") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Credit for Charges","Crédit pour frais") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Manual P-ment and double payment to be deduct","P-mnt manuel et double paiement à déduire") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Employee insurance travel","Assurance de voyage des employés 2019") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Rent ","Location ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Lunch vouchers ","Bons déjeuner") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Store Room ","Chambre Stocke") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Evaluation ","Évaluation ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Charges ","Frais ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("On Line ","En ligne ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Building Supplies/Maintenance","/ Matériaux de construction / Entretien") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Music Instruments","Instruments Musicales") gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Employee Awards/Recognition", "/ Récompenses des employés / Reconnaissance") gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Daily Allowance","/Indemnité journalière") gl['EcritureLib'] = gl["EcritureLib"].str.replace("RECLASS ", "RECLASSIFICATION ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Purchase Accounting", "Comptabilité d'achat") gl['EcritureLib'] = gl["EcritureLib"].str.replace( "EXPAT ", " Expatrié ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("FROM ", "DE ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("INVOICE", "FACTURE") gl['EcritureLib'] = gl["EcritureLib"].str.replace("CLEANUP", "APUREMENT") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Repayment", "Restitution") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Office Furniture", "Meubles de bureau") gl['EcritureLib'] = gl["EcritureLib"].str.replace("anti-stress treatments", "traitements anti-stress") gl['EcritureLib'] = gl["EcritureLib"].str.replace("UK Tax Return", "Décl. d'impôt Royaume-Uni") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Office Location", "Location de bureau") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Deliver Service", "Service de livraison") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Foreign Office Support", "Soutien aux bureaux étrangères") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Showroom", "Salle d'exposition") gl['EcritureLib'] = gl["EcritureLib"].str.replace("aditional Services", "Services supplémentaires ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Cofee consumption Paris office", "Consommation de café Bureau de Paris") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Consultant ", "Expert-conseil") gl['EcritureLib'] = gl["EcritureLib"].str.replace("INVOICE", "FACTURE") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Rent-", "Location-") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Corporate", "Entreprise") gl['EcritureLib'] = gl["EcritureLib"].str.replace("COST ", "COÛT ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("TRAINING", "Formation") gl['EcritureLib'] = gl["EcritureLib"].str.replace("LIFE DISAB", "Invalidité") gl['EcritureLib'] = gl["EcritureLib"].str.replace("INSU ", "ASSURANCE ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("PATENT AWARD", "BREVET") gl['EcritureLib'] = gl["EcritureLib"].str.replace("EQUIVALENT POUR UNUSED VACATION POUR LEAVE", "CONGÉ DE VACANCES INUTILISÉS") gl['EcritureLib'] = gl["EcritureLib"].str.replace("SPOT ", "") gl['EcritureLib'] = gl["EcritureLib"].str.replace("AIRFARE TRANSFER TO PREPAIDS", "TRANSFERT DE TRANSPORT AÉRIEN À PAYÉ D'AVANCE") gl['EcritureLib'] = gl["EcritureLib"].str.replace("WITHHOLDING", "RETRAIT") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Clear ", "Reglement ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Clear ", "Reglement ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Rent/", "Location/") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Pay ", "Paiement ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("PAYMENT", "Paiement ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("French Income Tax Return;", "Déclaration de revenus française;") gl['EcritureLib'] = gl["EcritureLib"].str.replace("REVESERVICES", "SERVICES") gl['EcritureLib'] = gl["EcritureLib"].str.replace("INCLUDED DOUBLE", "DOUBLE INCLUS") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Bank", "Banque") gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Promotional Expenses", "/Frais de promotion") gl['EcritureLib'] = gl["EcritureLib"].str.replace(" ACTIVITY ", " activité ") gl['EcritureLib'] = gl["EcritureLib"].str.replace(" DEFINED BENEFIT LIABILITY", "PASSIF À AVANTAGES DÉTERMINÉES") gl['EcritureLib'] = gl["EcritureLib"].str.replace("COÛT PLUS ", "Revient Majoré") gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Airline Frais", "/Tarifs aériens") gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Tools/Equipment/Lab Supplies", "/Outils / Équipement / Fournitures de laboratoire") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Rent/", "Location/") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Payment Posting", "Paiements") gl['EcritureLib'] = gl["EcritureLib"].str.replace("COMMISSION D’ACCUMULATION", "ACCUMULATIONS DE COMISSIONS") gl['EcritureLib'] = gl["EcritureLib"].str.replace("ImpôtE", "Impôt") gl['EcritureLib'] = gl["EcritureLib"].str.replace("MED.INSU", "MED.ASSURANCE") gl['EcritureLib'] = gl["EcritureLib"].str.replace("APPRENTICESHIP_CONTRIBUTIONS_TRUE_UP", "CONTRIBUTIONS À L'APPRENTISSAGE/TRUE UP") gl['EcritureLib'] = gl["EcritureLib"].str.replace("NET PAY", "SALAIRE NET") gl['EcritureLib'] = gl["EcritureLib"].str.replace("CASH ", "ARGENT ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Repayment ", "Repaiement ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Acct. ", "Comptab. ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("ACCR ", "ACC ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Accr ", "Acc.") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Cash Balance", "Solde de caisse") gl['EcritureLib'] = gl["EcritureLib"].str.replace("RECLASS ", "RECLASSEMENT ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("VAT FILING ", "Dépôt de TVA ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Needs to be re-booked due", "KI") gl['EcritureLib'] = gl["EcritureLib"].str.replace("reclass from", "reclasser de") gl['EcritureLib'] = gl["EcritureLib"].str.replace("RECLASS FROM", "reclasser de") gl['EcritureLib'] = gl["EcritureLib"].str.replace("PAYROLL", "PAIE") gl['EcritureLib'] = gl["EcritureLib"].str.replace("RECLASS ", "Reclasser") gl['EcritureLib'] = gl["EcritureLib"].str.replace("DEDICTION","DEDUCTION") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Cash","Argent ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("cash ","argent ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("ReclasserIFICATIO","RECLASSEMENT ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("ImpôtS ","Impôts ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Working Repas (Employees Only) ","Repas de travail (employés seulement) ") gl['EcritureLib'] = gl["EcritureLib"].str.replace("/Banque Frais","/Frais Bancaires") gl['EcritureLib'] = gl["EcritureLib"].str.replace("MED. INS.","ASSURANCE MED.") gl['EcritureLib'] = gl["EcritureLib"].str.replace("Facture - Brut'","Facture - Brute'") gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20181130_ MK063850","FACTURE COUPA") gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20181130_ MS063849","FACTURE COUPA") gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20181130_ MB063846","FACTURE COUPA") gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20181231_ MK063850","FACTURE COUPA") gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20181231_ MK063850","FACTURE COUPA") gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20190228_ MK063850","FACTURE COUPA") gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20190331_ MB063846","FACTURE COUPA") gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20190430_ MS063849","FACTURE COUPA") gl['EcritureLib'] = gl["EcritureLib"].str.replace("_20190430_ MB063846","FACTURE COUPA") gl['EcritureLib'] = gl['EcritureLib'].str.replace('-', '') gl['EcritureLib'] = gl['EcritureLib'].str.replace('/', '') gl['EcritureLib'] = gl['EcritureLib'].str.replace('Contre Passation', 'CP') mapping_Valuation1 = {" Valuation on": " Évaluation sur"," Valuation on Reverse":" Évaluation sur Contre Passation", " Reverse Posting":" Contre-Passation d'Ecriture - Conversion de devise sur", " Translation Using":" Conversion de devise sur"} mapping_AA1 = {"Reclass from": " Reclassification de", "reclass from": " Reclassification de", "ZEE MEDIA":"ZEE MEDIA Campaignes Numériques", "TRAINING CONTRI. ER JANUARY '19":"FORMATION CONTRI. ER JANVIER' 19", "TAX FEES":"Taxes","SOCIAL SECURITY: URSSAF":"SÉCURITÉ SOCIALE: URSSAF","SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION", "SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE","RSM":"SERVICES DE PAIE RSM EF18","RSA":"SERVICES DE PAIE RSA OCT-JAN", "PRIVATE HEALTH":"SANTÉ PRIVÉE: ASSURANCE MÉDICALE-AXA/","PENSION: PENSION CONTRIBUTIONS - REUNICA":"PENSION: COTISATIONS DE RETRAITE-REUNICA","PENSION: LIFE & DISABILITY INSURANCE - R":"PENSION: ASSURANCE VIE & INVALIDITÉ-R", "PENSION JANUARY '19":"PENSION JANVIER '19", "ON CALL JANUARY '19":"Disponible Janvier'19", "NRE + PROJECT INITIATION FEES":"NRE + FRAIS D’INITIATION AU PROJET (PO 750003","NET PAY JANUARY '19":"Payeante Janvier'19","JANUARY'19":"JANVIER'19", "LUNCH VOUCHER- WITHHOLDING":"BON DÉJEUNER-RETENUE","HOLIDAY BONUS ACCRUAL FY18/19":"CUMUL DES PRIMES DE VACANCES EF18/19", "GROSS SALARY JANUARY '19":"SALAIRE BRUT JANVIER' 19","EMEA ACCRUAL P8FY19":"P8FY19 D’ACCUMULATION EMEA","COMMISSION RE-ACCRUAL":"COMMISSION RÉ-ACCUMULATION", "COMMISSION ACCRUAL":"COMMISSION D’ACCUMULATION","MARCH":"MARS","MAY":"MAI","APRIL":"AVRIL","AUDIT FEES":"HONORAIRES D’AUDIT", "UNSUBMITTED_UNPOSTED BOA ACCRUAL":"Accumulation BOA non soumise non exposée","UNASSIGNED CREDITCARD BOA ACCRUAL":"NON ASSIGNÉ CREDITCARD BOA ACCUMULATION ", "EMEA ACCRUAL":"ACCUMULATION EMEA","Exhibit Expenses":"Frais d'exposition","Hotel Tax":"Taxe hôtelière","Company Events":"Événements d'entreprise", "Public Transport":"Transport public", "Agency Booking Fees":"Frais de réservation d'agence","Working Meals (Employees Only)":"Repas de travail (employés seulement)", "Airfare":"Billet d'avion","Office Supplies":"Fournitures de bureau","Tolls":"Péages", "write off difference see e-mail attached":"radiation de la différence voir e-mail ci-joint", "Manual P/ment and double payment to be deduct":"P/ment manuel et double paiement à déduire","FX DIFFERENCE ON RSU":"DIFFERENCE FX SUR RSU", "DEFINED BENEFIT LIABILITY-TRUE UP":"RESPONSABILITÉ À PRESTATIONS DÉTERMINÉES-TRUE UP","EXTRA RELEASE FOR STORAGE REVERSED":"EXTRA LIBERATION POUR STOCKAGE CONTREPASSATION", "RECLASS BANK CHARGES TO CORRECT COST CEN":"RECLASSER LES FRAIS BANCAIRES POUR CORRIGER","PAYROLL INCOME TAXES":"IMPÔTS SUR LES SALAIRES", "TRAINING TAX TRUE UP":"TAXE DE FORMATION", "FX DIFFERENCE ON STOCK OPTION EXERCISES":"FX DIFFERENCE SUR LES EXERCICES D'OPTIONS STOCK", "Airline Frais":"Frais de Transport Aérien","Agency Booking Fees":"Frais de Réservation d'Agence","Computer Supplies":"Fournitures informatiques", "AUDIT FEES":"FRAIS D'AUDIT", "HOLIDAY BONUS ACCRUAL ":"ACCUMULATION DE BONUS DE VACANCES","TAX FEES":"FRAIS D'IMPÔT", "SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUITION À L’APPRENTISSAGE", "SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION", "TRAVEL COST":"FRAIS DE VOYAGE", "HOUSING TAX":"TAXE SUR LE LOGEMENT", "PAYROLL INCOME TAXES":"IMPÔTS SUR LE REVENU DE LA PAIE","INCOME TAX-PAS":"IMPÔT SUR LE REVENU-PAS", "IC SETTLEMENT":"Règlement Interentreprises", "VACATION TAKEN":"VACANCES PRISES", "SOCIAL SECURITY: APPR. CONTR.":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE", "POST OF AVRIL DEC IN CORRECT SIGN":"CORRECTION D'ECRITURE AVRIL DEC"} gl = gl.replace({"JournalLib":mapping_Valuation1}, regex=True) gl = gl.replace({"JournalLib":mapping_AA1}, regex=True) gl['JournalLib'] = gl["JournalLib"].str.replace('COST-PLUS', 'Revient Majoré') gl['JournalLib'] = gl["JournalLib"].str.replace('PRITVAE HEALTH: MEDICAL INSURANCE', 'SANTÉ PRIVÉE: ASSURANCE MÉDICALE') gl['JournalLib'] = gl["JournalLib"].str.replace('MEDICAL INSURANCE', 'ASSURANCE MÉDICALE') gl['JournalLib'] = gl["JournalLib"].str.replace('UNASSIGNED', 'NON ATTRIBUÉ') gl['JournalLib'] = gl["JournalLib"].str.replace('Payout', 'Paiement') gl['JournalLib'] = gl["JournalLib"].str.replace('FRINGE COST', 'COÛT MARGINAL') gl['JournalLib'] = gl["JournalLib"].str.replace('PROJECT INITIATION', 'LANCEMENT DU PROJET') gl['JournalLib'] = gl["JournalLib"].str.replace('ACCRUAL', 'ACCUMULATION') gl['JournalLib'] = gl["JournalLib"].str.replace('CREDITCARD', 'CARTE DE CRÉDIT') gl['JournalLib'] = gl["JournalLib"].str.replace('ACCR ', 'ACCUM ') gl['JournalLib'] = gl["JournalLib"].str.replace('VAT ', 'TVA ') gl['JournalLib'] = gl["JournalLib"].str.replace('SOCIAL SECURITY ', 'SÉCURITÉ SOCIALE') gl['JournalLib'] = gl["JournalLib"].str.replace('SEPTEMBER', 'SEPT') gl['JournalLib'] = gl["JournalLib"].str.replace('TAXBACK', 'Reboursement') gl['JournalLib'] = gl["JournalLib"].str.replace('REPORT', '') gl['JournalLib'] = gl["JournalLib"].str.replace("Reverse Posting", "Contre Passation d'Ecriture") gl['JournalLib'] = gl["JournalLib"].str.replace("BASE RENT", "Location Base") gl['JournalLib'] = gl["JournalLib"].str.replace("Rent ", "Location ") gl['JournalLib'] = gl["JournalLib"].str.replace("RENT ", "Location ") gl['JournalLib'] = gl["JournalLib"].str.replace("CLEARING", "compensation ") gl['JournalLib'] = gl["JournalLib"].str.replace("clearing", "compensation ") gl['JournalLib'] = gl["JournalLib"].str.replace("BILLING CHARGES", "FRAIS DE FACTURATION ") gl['JournalLib'] = gl["JournalLib"].str.replace("UNPAID", "NON PAYÉ") gl['JournalLib'] = gl["JournalLib"].str.replace("PROPERTY TAX", "IMPÔT FONCIER ") gl['JournalLib'] = gl["JournalLib"].str.replace("Trans. Using", "Conversion sur") gl['JournalLib'] = gl["JournalLib"].str.replace("SALARIES", "Salaires") gl['JournalLib'] = gl["JournalLib"].str.replace("Refund", "Remboursement") gl['JournalLib'] = gl["JournalLib"].str.replace("REFUND", "Remboursement") gl['JournalLib'] = gl["JournalLib"].str.replace("no invoice", "pas de facture") gl['JournalLib'] = gl["JournalLib"].str.replace("COST-PLUS SERVICE REVENUE", "Revenus de service Revient Majoré") gl['JournalLib'] = gl["JournalLib"].str.replace("SETTLEMENT", "RÈGLEMENT ") gl['JournalLib'] = gl["JournalLib"].str.replace("PURCHASE", "ACHAT") gl['JournalLib'] = gl["JournalLib"].str.replace("NON-CP SETTLE", "RÈGLEMENT NON-CP") gl['JournalLib'] = gl["JournalLib"].str.replace("PAID ", " Payé ") gl['JournalLib'] = gl["JournalLib"].str.replace("FEES ", "Frais") gl['JournalLib'] = gl["JournalLib"].str.replace("January", "Janvier") gl['JournalLib'] = gl["JournalLib"].str.replace("February", "Février") gl['JournalLib'] = gl["JournalLib"].str.replace("March", "Mars") gl['JournalLib'] = gl["JournalLib"].str.replace("April", "Avril") gl['JournalLib'] = gl["JournalLib"].str.replace("May", "Mai") gl['JournalLib'] = gl["JournalLib"].str.replace("June", "Juin") gl['JournalLib'] = gl["JournalLib"].str.replace("July", "Juillet") gl['JournalLib'] = gl["JournalLib"].str.replace("September", "Septembre") gl['JournalLib'] = gl["JournalLib"].str.replace("Aug.", "Août") gl['JournalLib'] = gl["JournalLib"].str.replace("JANUARY", "Janvier") gl['JournalLib'] = gl["JournalLib"].str.replace("FEBRUARY", "Février") gl['JournalLib'] = gl["JournalLib"].str.replace("MARCH", "Mars") gl['JournalLib'] = gl["JournalLib"].str.replace("APRIL", "Avril") gl['JournalLib'] = gl["JournalLib"].str.replace("MAY", "Mai") gl['JournalLib'] = gl["JournalLib"].str.replace("JUNE", "Juin") gl['JournalLib'] = gl["JournalLib"].str.replace("JULY", "Juillet") gl['JournalLib'] = gl["JournalLib"].str.replace("SEPTEMBER", "Septembre") gl['JournalLib'] = gl["JournalLib"].str.replace("AUGUST.", "Août") gl['JournalLib'] = gl["JournalLib"].str.replace("NOVEMBER.", "Novembre") gl['JournalLib'] = gl["JournalLib"].str.replace("DECEMBER.", "Décembre") gl['JournalLib'] = gl["JournalLib"].str.replace("December", "Décembre") gl['JournalLib'] = gl["JournalLib"].str.replace("Feb.", "Fév.") gl['JournalLib'] = gl["JournalLib"].str.replace("Mar.", "Mars") gl['JournalLib'] = gl["JournalLib"].str.replace("Apr.", "Avril") gl['JournalLib'] = gl["JournalLib"].str.replace("Aug.", "Août") gl['JournalLib'] = gl["JournalLib"].str.replace("Aug.", "Août") gl['JournalLib'] = gl["JournalLib"].str.replace("Reverse ", "Contre-passation ") gl['JournalLib'] = gl["JournalLib"].str.replace("INTEREST CHARGE", "CHARGE D'INTÉRÊT") gl['JournalLib'] = gl["JournalLib"].str.replace("-SICK LEAVE PAY", "-Paiement congé maladie") gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASSEMENTIFICATION", "RECLASSIFICATION") gl['JournalLib'] = gl["JournalLib"].str.replace("INSTALMENT", "VERSEMENT") gl['JournalLib'] = gl["JournalLib"].str.replace("FIRST", "1ere") gl['JournalLib'] = gl["JournalLib"].str.replace("FINE LATE PAY.", "Amende pour retard de paiement") gl['JournalLib'] = gl["JournalLib"].str.replace("-PATERNITY PAY", "Indemnités de paternité") gl['JournalLib'] = gl["JournalLib"].str.replace("SOCIAL SECURITY:", "SÉCURITÉ SOCIALE:") gl['JournalLib'] = gl["JournalLib"].str.replace("Trip from", "Voyage de:") gl['JournalLib'] = gl["JournalLib"].str.replace(" To ", " à") gl['JournalLib'] = gl["JournalLib"].str.replace("Shipping", "Livraison") gl['JournalLib'] = gl["JournalLib"].str.replace("VOXEET INTEGRATION COSTS", "COÛTS D'INTÉGRATION DE VOXEET") gl['JournalLib'] = gl["JournalLib"].str.replace("INCOME TAX", "IMPÔT SUR LE REVENU") gl['JournalLib'] = gl["JournalLib"].str.replace('Rideshare', 'Covoiturage') gl['JournalLib'] = gl["JournalLib"].str.replace('Travel Meals', 'Repas de Travail') gl['JournalLib'] = gl["JournalLib"].str.replace('Fees', 'Frais') gl['JournalLib'] = gl["JournalLib"].str.replace('Phone', 'Téléphone') gl['JournalLib'] = gl["JournalLib"].str.replace("Books", "Abonnements") gl['JournalLib'] = gl["JournalLib"].str.replace("Subcriptions", "Location Base") gl['JournalLib'] = gl["JournalLib"].str.replace("Meals", "Repas") gl['JournalLib'] = gl["JournalLib"].str.replace("Entertainment", "divertissement ") gl['JournalLib'] = gl["JournalLib"].str.replace("Third Party", "tiers ") gl['JournalLib'] = gl["JournalLib"].str.replace("Training Fees", "Frais d0 Formation") gl['JournalLib'] = gl["JournalLib"].str.replace("Conferences/Tradeshows Registratio", "Conférences/Tradeshows Enregistrement") gl['JournalLib'] = gl["JournalLib"].str.replace("FOR", "POUR") gl['JournalLib'] = gl["JournalLib"].str.replace("ROUNDING", "ARRONDISSEMENT") gl['JournalLib'] = gl["JournalLib"].str.replace("STORAGE", "STOCKAGE") gl['JournalLib'] = gl["JournalLib"].str.replace("VACATION ACCURAL", "Vacances Accumulées") gl['JournalLib'] = gl["JournalLib"].str.replace("RECEIVABLE ", "Recevables") gl['JournalLib'] = gl["JournalLib"].str.replace("AFTER PAYOUT ", "APRÈS PAIEMENT") gl['JournalLib'] = gl["JournalLib"].str.replace("CLEAN UP ", "APUREMENT") gl['JournalLib'] = gl["JournalLib"].str.replace("EMPLOYEE TRAVEL INSUR ", "ASSURANCE DE VOYAGE DES EMPLOYÉS") gl['JournalLib'] = gl["JournalLib"].str.replace("CORRECTION OF", "CORRECTION DE") gl['JournalLib'] = gl["JournalLib"].str.replace("TAXES PAYROLL", "IMPÔTS SUR LA MASSE SALARIALE") gl['JournalLib'] = gl["JournalLib"].str.replace("ACCOUNT", "COMPTE") gl['JournalLib'] = gl["JournalLib"].str.replace("TAX", "Impôt") gl['JournalLib'] = gl["JournalLib"].str.replace("life disab", "Incapacité de vie") gl['JournalLib'] = gl["JournalLib"].str.replace("HOUSING TAX","TAXE D'HABITATION") gl['JournalLib'] = gl["JournalLib"].str.replace("GROSS SALARY","SALAIRE BRUT") gl['JournalLib'] = gl["JournalLib"].str.replace("Cleaning Services","Nettoyage") gl['JournalLib'] = gl["JournalLib"].str.replace("Freight","Fret") gl['JournalLib'] = gl["JournalLib"].str.replace("Membership","adhésion") gl['JournalLib'] = gl["JournalLib"].str.replace("Air cooling Maintenance","Entretien de refroidissement de l'air") gl['JournalLib'] = gl["JournalLib"].str.replace("Power on Demand Platform","Plateforme d'energie à la demande") gl['JournalLib'] = gl["JournalLib"].str.replace("Sanitaire room installation"," Installation de la salle sanitaire") gl['JournalLib'] = gl["JournalLib"].str.replace("subscription","abonnement") gl['JournalLib'] = gl["JournalLib"].str.replace("Coffee supplies "," Fournitures de café") gl['JournalLib'] = gl["JournalLib"].str.replace("Duty and Tax ","Devoir et fiscalité") gl['JournalLib'] = gl["JournalLib"].str.replace("Electricity ","Electricité ") gl['JournalLib'] = gl["JournalLib"].str.replace("Lunch vouchers ","Bons déjeuner") gl['JournalLib'] = gl["JournalLib"].str.replace("Security monitoring","Surveillance de la sécurité") gl['JournalLib'] = gl["JournalLib"].str.replace("Water", "L'EAU") gl['JournalLib'] = gl["JournalLib"].str.replace("Statutory Audit", "Audit statutaire") gl['JournalLib'] = gl["JournalLib"].str.replace(" Meeting room screen installation", "Installation de l'écran de la salle de réunion") gl['JournalLib'] = gl["JournalLib"].str.replace("Water", "L'EAU") gl['JournalLib'] = gl["JournalLib"].str.replace("Water", "L'EAU") gl['JournalLib'] = gl["JournalLib"].str.replace("Tax Credit FY 2016", "Crédit d'impôt Exercice 2016") gl['JournalLib'] = gl["JournalLib"].str.replace("Bank of America Merill Lynch-T&E statement","Déclaration de Merill Lynch") gl['JournalLib'] = gl["JournalLib"].str.replace("English Translation", "Traduction anglaise") gl['JournalLib'] = gl["JournalLib"].str.replace("Office Rent", "Location de Bureau") gl['JournalLib'] = gl["JournalLib"].str.replace("Annual Electrical Verification", "Vérification électrique annuelle ") gl['JournalLib'] = gl["JournalLib"].str.replace("Health costs ", "Coûts santé") gl['JournalLib'] = gl["JournalLib"].str.replace("Unlimited-receipt and policy audit", "Vérification illimitée des reçus et audites") gl['JournalLib'] = gl["JournalLib"].str.replace("Water fountain ", "Fontaine d'eau") gl['JournalLib'] = gl["JournalLib"].str.replace("Quartely control visit", "Visite de contrôle trimestrielle") gl['JournalLib'] = gl["JournalLib"].str.replace("Fire extinguishers annual check", "Vérification annuelle des extincteurs") gl['JournalLib'] = gl["JournalLib"].str.replace("showroom rent", "location de salle d'exposition") gl['JournalLib'] = gl["JournalLib"].str.replace("AND ACTUAL RECEIV","ET RECETTES RÉELLES") gl['JournalLib'] = gl["JournalLib"].str.replace("FILING","DÉPÔT") gl['JournalLib'] = gl["JournalLib"].str.replace("ORDERS","ORDRES") gl['JournalLib'] = gl["JournalLib"].str.replace("EXCLUDED -DUMMY CREDIT","EXCLU") gl['JournalLib'] = gl["JournalLib"].str.replace("RELARING TO","RELATIF À") gl['JournalLib'] = gl["JournalLib"].str.replace("CLEAN UP-","APUREMENT-") gl['JournalLib'] = gl["JournalLib"].str.replace("2ND INSTALLEMENT","2ème versement") gl['JournalLib'] = gl["JournalLib"].str.replace("DOUBLE PAYMENT","DOUBLE PAIEMENT") gl['JournalLib'] = gl["JournalLib"].str.replace("CLEAN UP-","APUREMENT-") gl['JournalLib'] = gl["JournalLib"].str.replace("DUTIES","DROITS") gl['JournalLib'] = gl["JournalLib"].str.replace("Previous balance","Solde Précédent") gl['JournalLib'] = gl["JournalLib"].str.replace("Cash fx","Cash FX") gl['JournalLib'] = gl["JournalLib"].str.replace("PAYROLL INCOME","REVENU DE PAIE") gl['JournalLib'] = gl["JournalLib"].str.replace("TELEPHONE CHARGES","Frais de Téléphone") gl['JournalLib'] = gl["JournalLib"].str.replace("Clearing","Compensation") gl['JournalLib'] = gl["JournalLib"].str.replace("Hotel","Hôtel") gl['JournalLib'] = gl["JournalLib"].str.replace("Miscellaneous","Divers") gl['JournalLib'] = gl["JournalLib"].str.replace("Corporate Card-Out-of-Poc","") gl['JournalLib'] = gl["JournalLib"].str.replace("Traveling Dolby Empl","Employé itinérant de Dolby") gl['JournalLib'] = gl["JournalLib"].str.replace("Tools-Equipment-Lab Supplies","Outils-Equipement-Fournitures de laboratoire") gl['JournalLib'] = gl["JournalLib"].str.replace("rounding","Arrondissement") gl['JournalLib'] = gl["JournalLib"].str.replace("Building Supplies-Maintenance","Matériaux de construction-Entretien") gl['JournalLib'] = gl["JournalLib"].str.replace("Expensed Furniture","Mobilier Dépensé") gl['JournalLib'] = gl["JournalLib"].str.replace("Credit for Charges","Crédit pour frais") gl['JournalLib'] = gl["JournalLib"].str.replace("Manual P-ment and double payment to be deduct","P-mnt manuel et double paiement à déduire") gl['JournalLib'] = gl["JournalLib"].str.replace("Employee insurance travel","Assurance de voyage des employés 2019") gl['JournalLib'] = gl["JournalLib"].str.replace("Rent ","Location ") gl['JournalLib'] = gl["JournalLib"].str.replace("Lunch vouchers ","Bons déjeuner") gl['JournalLib'] = gl["JournalLib"].str.replace("Store Room ","Chambre Stocke") gl['JournalLib'] = gl["JournalLib"].str.replace("Evaluation ","Évaluation ") gl['JournalLib'] = gl["JournalLib"].str.replace("Charges ","Frais ") gl['JournalLib'] = gl["JournalLib"].str.replace("On Line ","En ligne ") gl['JournalLib'] = gl["JournalLib"].str.replace("/Building Supplies/Maintenance","/ Matériaux de construction / Entretien") gl['JournalLib'] = gl["JournalLib"].str.replace("Music Instruments","Instruments Musicales") gl['JournalLib'] = gl["JournalLib"].str.replace("/Employee Awards/Recognition", "/ Récompenses des employés / Reconnaissance") gl['JournalLib'] = gl["JournalLib"].str.replace("/Daily Allowance","/Indemnité journalière") gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASS ", "RECLASSIFICATION ") gl['JournalLib'] = gl["JournalLib"].str.replace("Purchase Accounting", "Comptabilité d'achat") gl['JournalLib'] = gl["JournalLib"].str.replace( "EXPAT ", " Expatrié ") gl['JournalLib'] = gl["JournalLib"].str.replace("FROM ", "DE ") gl['JournalLib'] = gl["JournalLib"].str.replace("INVOICE", "FACTURE") gl['JournalLib'] = gl["JournalLib"].str.replace("CLEANUP", "APUREMENT") gl['JournalLib'] = gl["JournalLib"].str.replace("Repayment", "Restitution") gl['JournalLib'] = gl["JournalLib"].str.replace("Office Furniture", "Meubles de bureau") gl['JournalLib'] = gl["JournalLib"].str.replace("anti-stress treatments", "traitements anti-stress") gl['JournalLib'] = gl["JournalLib"].str.replace("UK Tax Return", "Décl. d'impôt Royaume-Uni") gl['JournalLib'] = gl["JournalLib"].str.replace("Office Location", "Location de bureau") gl['JournalLib'] = gl["JournalLib"].str.replace("Deliver Service", "Service de livraison") gl['JournalLib'] = gl["JournalLib"].str.replace("Foreign Office Support", "Soutien aux bureaux étrangères") gl['JournalLib'] = gl["JournalLib"].str.replace("Showroom", "Salle d'exposition") gl['JournalLib'] = gl["JournalLib"].str.replace("aditional Services", "Services supplémentaires ") gl['JournalLib'] = gl["JournalLib"].str.replace("Cofee consumption Paris office", "Consommation de café Bureau de Paris") gl['JournalLib'] = gl["JournalLib"].str.replace("Consultant ", "Expert-conseil") gl['JournalLib'] = gl["JournalLib"].str.replace("INVOICE", "FACTURE") gl['JournalLib'] = gl["JournalLib"].str.replace("Rent-", "Location-") gl['JournalLib'] = gl["JournalLib"].str.replace("Corporate", "Entreprise") gl['JournalLib'] = gl["JournalLib"].str.replace("COST ", "COÛT ") gl['JournalLib'] = gl["JournalLib"].str.replace("TRAINING", "Formation") gl['JournalLib'] = gl["JournalLib"].str.replace("LIFE DISAB", "Invalidité") gl['JournalLib'] = gl["JournalLib"].str.replace("INSU ", "ASSURANCE ") gl['JournalLib'] = gl["JournalLib"].str.replace("PATENT AWARD", "BREVET") gl['JournalLib'] = gl["JournalLib"].str.replace("EQUIVALENT POUR UNUSED VACATION POUR LEAVE", "CONGÉ DE VACANCES INUTILISÉS") gl['JournalLib'] = gl["JournalLib"].str.replace("SPOT ", "") gl['JournalLib'] = gl["JournalLib"].str.replace("AIRFARE TRANSFER TO PREPAIDS", "TRANSFERT DE TRANSPORT AÉRIEN À PAYÉ D'AVANCE") gl['JournalLib'] = gl["JournalLib"].str.replace("WITHHOLDING", "RETRAIT") gl['JournalLib'] = gl["JournalLib"].str.replace("Clear ", "Reglement ") gl['JournalLib'] = gl["JournalLib"].str.replace("Clear ", "Reglement ") gl['JournalLib'] = gl["JournalLib"].str.replace("Rent/", "Location/") gl['JournalLib'] = gl["JournalLib"].str.replace("Pay ", "Paiement ") gl['JournalLib'] = gl["JournalLib"].str.replace("PAYMENT", "Paiement ") gl['JournalLib'] = gl["JournalLib"].str.replace("French Income Tax Return;", "Déclaration de revenus française;") gl['JournalLib'] = gl["JournalLib"].str.replace("REVESERVICES", "SERVICES") gl['JournalLib'] = gl["JournalLib"].str.replace("INCLUDED DOUBLE", "DOUBLE INCLUS") gl['JournalLib'] = gl["JournalLib"].str.replace("Bank", "Banque") gl['JournalLib'] = gl["JournalLib"].str.replace("/Promotional Expenses", "/Frais de promotion") gl['JournalLib'] = gl["JournalLib"].str.replace(" ACTIVITY ", " activité ") gl['JournalLib'] = gl["JournalLib"].str.replace(" DEFINED BENEFIT LIABILITY", "PASSIF À AVANTAGES DÉTERMINÉES") gl['JournalLib'] = gl["JournalLib"].str.replace("COÛT PLUS ", "Revient Majoré") gl['JournalLib'] = gl["JournalLib"].str.replace("/Airline Frais", "/Tarifs aériens") gl['JournalLib'] = gl["JournalLib"].str.replace("/Tools/Equipment/Lab Supplies", "/Outils / Équipement / Fournitures de laboratoire") gl['JournalLib'] = gl["JournalLib"].str.replace("Rent/", "Location/") gl['JournalLib'] = gl["JournalLib"].str.replace("Payment Posting", "Paiements") gl['JournalLib'] = gl["JournalLib"].str.replace("COMMISSION D’ACCUMULATION", "ACCUMULATIONS DE COMISSIONS") gl['JournalLib'] = gl["JournalLib"].str.replace("ImpôtE", "Impôt") gl['JournalLib'] = gl["JournalLib"].str.replace("MED.INSU", "MED.ASSURANCE") gl['JournalLib'] = gl["JournalLib"].str.replace("APPRENTICESHIP_CONTRIBUTIONS_TRUE_UP", "CONTRIBUTIONS À L'APPRENTISSAGE/TRUE UP") gl['JournalLib'] = gl["JournalLib"].str.replace("NET PAY", "SALAIRE NET") gl['JournalLib'] = gl["JournalLib"].str.replace("CASH ", "ARGENT ") gl['JournalLib'] = gl["JournalLib"].str.replace("Repayment ", "Repaiement ") gl['JournalLib'] = gl["JournalLib"].str.replace("Acct. ", "Comptab. ") gl['JournalLib'] = gl["JournalLib"].str.replace("ACCR ", "ACC ") gl['JournalLib'] = gl["JournalLib"].str.replace("Accr ", "Acc.") gl['JournalLib'] = gl["JournalLib"].str.replace("Cash Balance", "Solde de caisse") gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASS ", "RECLASSEMENT ") gl['JournalLib'] = gl["JournalLib"].str.replace("VAT FILING ", "Dépôt de TVA ") gl['JournalLib'] = gl["JournalLib"].str.replace("Needs to be re-booked due", "KI") gl['JournalLib'] = gl["JournalLib"].str.replace("reclass from", "reclasser de") gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASS FROM", "reclasser de") gl['JournalLib'] = gl["JournalLib"].str.replace("PAYROLL", "PAIE") gl['JournalLib'] = gl["JournalLib"].str.replace("RECLASS ", "Reclasser") gl['JournalLib'] = gl["JournalLib"].str.replace("DEDICTION","DEDUCTION") gl['JournalLib'] = gl["JournalLib"].str.replace("Cash","Argent ") gl['JournalLib'] = gl["JournalLib"].str.replace("cash ","argent ") gl['JournalLib'] = gl["JournalLib"].str.replace("ReclasserIFICATIO","RECLASSEMENT ") gl['JournalLib'] = gl["JournalLib"].str.replace("ImpôtS ","Impôts ") gl['JournalLib'] = gl["JournalLib"].str.replace("Working Repas (Employees Only) ","Repas de travail (employés seulement) ") gl['JournalLib'] = gl["JournalLib"].str.replace("/Banque Frais","/Frais Bancaires") gl['JournalLib'] = gl["JournalLib"].str.replace("MED. INS.","ASSURANCE MED.") gl['JournalLib'] = gl["JournalLib"].str.replace("AJE WIRE LOG TRAN","AJE VERSEMENT") gl['JournalLib'] = gl["JournalLib"].str.replace("JUN'","JUIN'") gl['JournalLib'] = gl["JournalLib"].str.replace("Deferred Rent18 rue de Lo","Loyer différé 18 Rue de Lo") gl['JournalLib'] = gl["JournalLib"].str.replace("Facture - Brut'","Facture - Brute") gl['JournalLib'] = gl["JournalLib"].str.replace("T&E","VD") gl['JournalLib'] = gl["JournalLib"].str.replace("/","") gl['JournalLib'] = gl["JournalLib"].str.replace("Inv","Facture") gl['JournalLib'] = gl["JournalLib"].str.replace("2019`","2019") gl['JournalLib'] = gl["JournalLib"].str.replace("-2014V","") mapping_Valuation1 = {" Valuation on": " Évaluation sur"," Valuation on Reverse":" Évaluation sur Contre Passation", " Reverse Posting":" Contre-Passation d'Ecriture - Conversion de devise sur", " Translation Using":" Conversion de devise sur"} mapping_AA1 = {"Reclass from": " Reclassification de", "reclass from": " Reclassification de", "ZEE MEDIA":"ZEE MEDIA Campaignes Numériques", "TRAINING CONTRI. ER JANUARY '19":"FORMATION CONTRI. ER JANVIER' 19", "TAX FEES":"Taxes","SOCIAL SECURITY: URSSAF":"SÉCURITÉ SOCIALE: URSSAF","SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION", "SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE","RSM":"SERVICES DE PAIE RSM EF18","RSA":"SERVICES DE PAIE RSA OCT-JAN", "PRIVATE HEALTH":"SANTÉ PRIVÉE: ASSURANCE MÉDICALE-AXA/","PENSION: PENSION CONTRIBUTIONS - REUNICA":"PENSION: COTISATIONS DE RETRAITE-REUNICA","PENSION: LIFE & DISABILITY INSURANCE - R":"PENSION: ASSURANCE VIE & INVALIDITÉ-R", "PENSION JANUARY '19":"PENSION JANVIER '19", "ON CALL JANUARY '19":"Disponible Janvier'19", "NRE + PROJECT INITIATION FEES":"NRE + FRAIS D’INITIATION AU PROJET (PO 750003","NET PAY JANUARY '19":"Payeante Janvier'19","JANUARY'19":"JANVIER'19", "LUNCH VOUCHER- WITHHOLDING":"BON DÉJEUNER-RETENUE","HOLIDAY BONUS ACCRUAL FY18/19":"CUMUL DES PRIMES DE VACANCES EF18/19", "GROSS SALARY JANUARY '19":"SALAIRE BRUT JANVIER' 19","EMEA ACCRUAL P8FY19":"P8FY19 D’ACCUMULATION EMEA","COMMISSION RE-ACCRUAL":"COMMISSION RÉ-ACCUMULATION", "COMMISSION ACCRUAL":"COMMISSION D’ACCUMULATION","MARCH":"MARS","MAY":"MAI","APRIL":"AVRIL","AUDIT FEES":"HONORAIRES D’AUDIT", "UNSUBMITTED_UNPOSTED BOA ACCRUAL":"Accumulation BOA non soumise non exposée","UNASSIGNED CREDITCARD BOA ACCRUAL":"NON ASSIGNÉ CREDITCARD BOA ACCUMULATION ", "EMEA ACCRUAL":"ACCUMULATION EMEA","Exhibit Expenses":"Frais d'exposition","Hotel Tax":"Taxe hôtelière","Company Events":"Événements d'entreprise", "Public Transport":"Transport public", "Agency Booking Fees":"Frais de réservation d'agence","Working Meals (Employees Only)":"Repas de travail (employés seulement)", "Airfare":"Billet d'avion","Office Supplies":"Fournitures de bureau","Tolls":"Péages", "write off difference see e-mail attached":"radiation de la différence voir e-mail ci-joint", "Manual P/ment and double payment to be deduct":"P/ment manuel et double paiement à déduire","FX DIFFERENCE ON RSU":"DIFFERENCE FX SUR RSU", "DEFINED BENEFIT LIABILITY-TRUE UP":"RESPONSABILITÉ À PRESTATIONS DÉTERMINÉES-TRUE UP","EXTRA RELEASE FOR STORAGE REVERSED":"EXTRA LIBERATION POUR STOCKAGE CONTREPASSATION", "RECLASS BANK CHARGES TO CORRECT COST CEN":"RECLASSER LES FRAIS BANCAIRES POUR CORRIGER","PAYROLL INCOME TAXES":"IMPÔTS SUR LES SALAIRES", "TRAINING TAX TRUE UP":"TAXE DE FORMATION", "FX DIFFERENCE ON STOCK OPTION EXERCISES":"FX DIFFERENCE SUR LES EXERCICES D'OPTIONS STOCK", "Airline Frais":"Frais de Transport Aérien","Agency Booking Fees":"Frais de Réservation d'Agence","Computer Supplies":"Fournitures informatiques", "AUDIT FEES":"FRAIS D'AUDIT", "HOLIDAY BONUS ACCRUAL ":"ACCUMULATION DE BONUS DE VACANCES","TAX FEES":"FRAIS D'IMPÔT", "SOCIAL SECURITY: APPRENTICESHIP CONTRIBU":"SÉCURITÉ SOCIALE: CONTRIBUITION À L’APPRENTISSAGE", "SOCIAL SECURITY: TRAINING CONTRIBUTIONS":"SÉCURITÉ SOCIALE: CONTRIBUTIONS À LA FORMATION", "TRAVEL COST":"FRAIS DE VOYAGE", "HOUSING TAX":"TAXE SUR LE LOGEMENT", "PAYROLL INCOME TAXES":"IMPÔTS SUR LE REVENU DE LA PAIE","INCOME TAX-PAS":"IMPÔT SUR LE REVENU-PAS", "IC SETTLEMENT":"Règlement Interentreprises", "VACATION TAKEN":"VACANCES PRISES", "SOCIAL SECURITY: APPR. CONTR.":"SÉCURITÉ SOCIALE: CONTRIBUTION À L’APPRENTISSAGE", "POST OF AVRIL DEC IN CORRECT SIGN":"CORRECTION D'ECRITURE AVRIL DEC"} gl = gl.replace({"PieceRef":mapping_Valuation1}, regex=True) gl = gl.replace({"PieceRef":mapping_AA1}, regex=True) gl['PieceRef'] = gl["PieceRef"].str.replace('COST-PLUS', 'Revient Majoré') gl['PieceRef'] = gl["PieceRef"].str.replace('PRITVAE HEALTH: MEDICAL INSURANCE', 'SANTÉ PRIVÉE: ASSURANCE MÉDICALE') gl['PieceRef'] = gl["PieceRef"].str.replace('MEDICAL INSURANCE', 'ASSURANCE MÉDICALE') gl['PieceRef'] = gl["PieceRef"].str.replace('UNASSIGNED', 'NON ATTRIBUÉ') gl['PieceRef'] = gl["PieceRef"].str.replace('Payout', 'Paiement') gl['PieceRef'] = gl["PieceRef"].str.replace('FRINGE COST', 'COÛT MARGINAL') gl['PieceRef'] = gl["PieceRef"].str.replace('PROJECT INITIATION', 'LANCEMENT DU PROJET') gl['PieceRef'] = gl["PieceRef"].str.replace('ACCRUAL', 'ACCUMULATION') gl['PieceRef'] = gl["PieceRef"].str.replace('CREDITCARD', 'CARTE DE CRÉDIT') gl['PieceRef'] = gl["PieceRef"].str.replace('ACCR ', 'ACCUM ') gl['PieceRef'] = gl["PieceRef"].str.replace('VAT ', 'TVA ') gl['PieceRef'] = gl["PieceRef"].str.replace('SOCIAL SECURITY ', 'SÉCURITÉ SOCIALE') gl['PieceRef'] = gl["PieceRef"].str.replace('SEPTEMBER', 'SEPT') gl['PieceRef'] = gl["PieceRef"].str.replace('TAXBACK', 'Reboursement') gl['PieceRef'] = gl["PieceRef"].str.replace('REPORT', '') gl['PieceRef'] = gl["PieceRef"].str.replace("Reverse Posting", "Contre Passation d'Ecriture") gl['PieceRef'] = gl["PieceRef"].str.replace("BASE RENT", "Location Base") gl['PieceRef'] = gl["PieceRef"].str.replace("Rent ", "Location ") gl['PieceRef'] = gl["PieceRef"].str.replace("RENT ", "Location ") gl['PieceRef'] = gl["PieceRef"].str.replace("CLEARING", "compensation ") gl['PieceRef'] = gl["PieceRef"].str.replace("clearing", "compensation ") gl['PieceRef'] = gl["PieceRef"].str.replace("BILLING CHARGES", "FRAIS DE FACTURATION ") gl['PieceRef'] = gl["PieceRef"].str.replace("UNPAID", "NON PAYÉ") gl['PieceRef'] = gl["PieceRef"].str.replace("PROPERTY TAX", "IMPÔT FONCIER ") gl['PieceRef'] = gl["PieceRef"].str.replace("Trans. Using", "Conversion sur") gl['PieceRef'] = gl["PieceRef"].str.replace("SALARIES", "Salaires") gl['PieceRef'] = gl["PieceRef"].str.replace("Refund", "Remboursement") gl['PieceRef'] = gl["PieceRef"].str.replace("REFUND", "Remboursement") gl['PieceRef'] = gl["PieceRef"].str.replace("no invoice", "pas de facture") gl['PieceRef'] = gl["PieceRef"].str.replace("COST-PLUS SERVICE REVENUE", "Revenus de service Revient Majoré") gl['PieceRef'] = gl["PieceRef"].str.replace("SETTLEMENT", "RÈGLEMENT ") gl['PieceRef'] = gl["PieceRef"].str.replace("PURCHASE", "ACHAT") gl['PieceRef'] = gl["PieceRef"].str.replace("NON-CP SETTLE", "RÈGLEMENT NON-CP") gl['PieceRef'] = gl["PieceRef"].str.replace("PAID ", " Payé ") gl['PieceRef'] = gl["PieceRef"].str.replace("FEES ", "Frais") gl['PieceRef'] = gl["PieceRef"].str.replace("January", "Janvier") gl['PieceRef'] = gl["PieceRef"].str.replace("February", "Février") gl['PieceRef'] = gl["PieceRef"].str.replace("March", "Mars") gl['PieceRef'] = gl["PieceRef"].str.replace("April", "Avril") gl['PieceRef'] = gl["PieceRef"].str.replace("May", "Mai") gl['PieceRef'] = gl["PieceRef"].str.replace("June", "Juin") gl['PieceRef'] = gl["PieceRef"].str.replace("July", "Juillet") gl['PieceRef'] = gl["PieceRef"].str.replace("September", "Septembre") gl['PieceRef'] = gl["PieceRef"].str.replace("Aug.", "Août") gl['PieceRef'] = gl["PieceRef"].str.replace("JANUARY", "Janvier") gl['PieceRef'] = gl["PieceRef"].str.replace("FEBRUARY", "Février") gl['PieceRef'] = gl["PieceRef"].str.replace("MARCH", "Mars") gl['PieceRef'] = gl["PieceRef"].str.replace("APRIL", "Avril") gl['PieceRef'] = gl["PieceRef"].str.replace("MAY", "Mai") gl['PieceRef'] = gl["PieceRef"].str.replace("JUNE", "Juin") gl['PieceRef'] = gl["PieceRef"].str.replace("JULY", "Juillet") gl['PieceRef'] = gl["PieceRef"].str.replace("SEPTEMBER", "Septembre") gl['PieceRef'] = gl["PieceRef"].str.replace("AUGUST.", "Août") gl['PieceRef'] = gl["PieceRef"].str.replace("NOVEMBER.", "Novembre") gl['PieceRef'] = gl["PieceRef"].str.replace("DECEMBER.", "Décembre") gl['PieceRef'] = gl["PieceRef"].str.replace("December", "Décembre") gl['PieceRef'] = gl["PieceRef"].str.replace("Feb.", "Fév.") gl['PieceRef'] = gl["PieceRef"].str.replace("Mar.", "Mars") gl['PieceRef'] = gl["PieceRef"].str.replace("Apr.", "Avril") gl['PieceRef'] = gl["PieceRef"].str.replace("Aug.", "Août") gl['PieceRef'] = gl["PieceRef"].str.replace("Aug.", "Août") gl['PieceRef'] = gl["PieceRef"].str.replace("Reverse ", "Contre-passation ") gl['PieceRef'] = gl["PieceRef"].str.replace("INTEREST CHARGE", "CHARGE D'INTÉRÊT") gl['PieceRef'] = gl["PieceRef"].str.replace("-SICK LEAVE PAY", "-Paiement congé maladie") gl['PieceRef'] = gl["PieceRef"].str.replace("RECLASSEMENTIFICATION", "RECLASSIFICATION") gl['PieceRef'] = gl["PieceRef"].str.replace("INSTALMENT", "VERSEMENT") gl['PieceRef'] = gl["PieceRef"].str.replace("FIRST", "1ere") gl['PieceRef'] = gl["PieceRef"].str.replace("FINE LATE PAY.", "Amende pour retard de paiement") gl['PieceRef'] = gl["PieceRef"].str.replace("-PATERNITY PAY", "Indemnités de paternité") gl['PieceRef'] = gl["PieceRef"].str.replace("SOCIAL SECURITY:", "SÉCURITÉ SOCIALE:") gl['PieceRef'] = gl["PieceRef"].str.replace("Trip from", "Voyage de:") gl['PieceRef'] = gl["PieceRef"].str.replace(" To ", " à") gl['PieceRef'] = gl["PieceRef"].str.replace("Shipping", "Livraison") gl['PieceRef'] = gl["PieceRef"].str.replace("VOXEET INTEGRATION COSTS", "COÛTS D'INTÉGRATION DE VOXEET") gl['PieceRef'] = gl["PieceRef"].str.replace("INCOME TAX", "IMPÔT SUR LE REVENU") gl['PieceRef'] = gl["PieceRef"].str.replace('Rideshare', 'Covoiturage') gl['PieceRef'] = gl["PieceRef"].str.replace('Travel Meals', 'Repas de Travail') gl['PieceRef'] = gl["PieceRef"].str.replace('Fees', 'Frais') gl['PieceRef'] = gl["PieceRef"].str.replace('Phone', 'Téléphone') gl['PieceRef'] = gl["PieceRef"].str.replace("Books", "Abonnements") gl['PieceRef'] = gl["PieceRef"].str.replace("Subcriptions", "Location Base") gl['PieceRef'] = gl["PieceRef"].str.replace("Meals", "Repas") gl['PieceRef'] = gl["PieceRef"].str.replace("Entertainment", "divertissement ") gl['PieceRef'] = gl["PieceRef"].str.replace("Third Party", "tiers ") gl['PieceRef'] = gl["PieceRef"].str.replace("Training Fees", "Frais d0 Formation") gl['PieceRef'] = gl["PieceRef"].str.replace("Conferences/Tradeshows Registratio", "Conférences/Tradeshows Enregistrement") gl['PieceRef'] = gl["PieceRef"].str.replace("FOR", "POUR") gl['PieceRef'] = gl["PieceRef"].str.replace("ROUNDING", "ARRONDISSEMENT") gl['PieceRef'] = gl["PieceRef"].str.replace("STORAGE", "STOCKAGE") gl['PieceRef'] = gl["PieceRef"].str.replace("VACATION ACCURAL", "Vacances Accumulées") gl['PieceRef'] = gl["PieceRef"].str.replace("RECEIVABLE ", "Recevables") gl['PieceRef'] = gl["PieceRef"].str.replace("AFTER PAYOUT ", "APRÈS PAIEMENT") gl['PieceRef'] = gl["PieceRef"].str.replace("CLEAN UP ", "APUREMENT") gl['PieceRef'] = gl["PieceRef"].str.replace("EMPLOYEE TRAVEL INSUR ", "ASSURANCE DE VOYAGE DES EMPLOYÉS") gl['PieceRef'] = gl["PieceRef"].str.replace("CORRECTION OF", "CORRECTION DE") gl['PieceRef'] = gl["PieceRef"].str.replace("TAXES PAYROLL", "IMPÔTS SUR LA MASSE SALARIALE") gl['PieceRef'] = gl["PieceRef"].str.replace("ACCOUNT", "COMPTE") gl['PieceRef'] = gl["PieceRef"].str.replace("TAX", "Impôt") gl['PieceRef'] = gl["PieceRef"].str.replace("life disab", "Incapacité de vie") gl['PieceRef'] = gl["PieceRef"].str.replace("HOUSING TAX","TAXE D'HABITATION") gl['PieceRef'] = gl["PieceRef"].str.replace("GROSS SALARY","SALAIRE BRUT") gl['PieceRef'] = gl["PieceRef"].str.replace("Cleaning Services","Nettoyage") gl['PieceRef'] = gl["PieceRef"].str.replace("Freight","Fret") gl['PieceRef'] = gl["PieceRef"].str.replace("Membership","adhésion") gl['PieceRef'] = gl["PieceRef"].str.replace("Air cooling Maintenance","Entretien de refroidissement de l'air") gl['PieceRef'] = gl["PieceRef"].str.replace("Power on Demand Platform","Plateforme d'energie à la demande") gl['PieceRef'] = gl["PieceRef"].str.replace("Sanitaire room installation"," Installation de la salle sanitaire") gl['PieceRef'] = gl["PieceRef"].str.replace("subscription","abonnement") gl['PieceRef'] = gl["PieceRef"].str.replace("Coffee supplies "," Fournitures de café") gl['PieceRef'] = gl["PieceRef"].str.replace("Duty and Tax ","Devoir et fiscalité") gl['PieceRef'] = gl["PieceRef"].str.replace("Electricity ","Electricité ") gl['PieceRef'] = gl["PieceRef"].str.replace("Lunch vouchers ","Bons déjeuner") gl['PieceRef'] = gl["PieceRef"].str.replace("Security monitoring","Surveillance de la sécurité") gl['PieceRef'] = gl["PieceRef"].str.replace("Water", "L'EAU") gl['PieceRef'] = gl["PieceRef"].str.replace("Statutory Audit", "Audit statutaire") gl['PieceRef'] = gl["PieceRef"].str.replace(" Meeting room screen installation", "Installation de l'écran de la salle de réunion") gl['PieceRef'] = gl["PieceRef"].str.replace("Water", "L'EAU") gl['PieceRef'] = gl["PieceRef"].str.replace("Water", "L'EAU") gl['PieceRef'] = gl["PieceRef"].str.replace("Tax Credit FY 2016", "Crédit d'impôt Exercice 2016") gl['PieceRef'] = gl["PieceRef"].str.replace("Bank of America Merill Lynch-T&E statement","Déclaration de Merill Lynch") gl['PieceRef'] = gl["PieceRef"].str.replace("English Translation", "Traduction anglaise") gl['PieceRef'] = gl["PieceRef"].str.replace("Office Rent", "Location de Bureau") gl['PieceRef'] = gl["PieceRef"].str.replace("Annual Electrical Verification", "Vérification électrique annuelle ") gl['PieceRef'] = gl["PieceRef"].str.replace("Health costs ", "Coûts santé") gl['PieceRef'] = gl["PieceRef"].str.replace("Unlimited-receipt and policy audit", "Vérification illimitée des reçus et audites") gl['PieceRef'] = gl["PieceRef"].str.replace("Water fountain ", "Fontaine d'eau") gl['PieceRef'] = gl["PieceRef"].str.replace("Quartely control visit", "Visite de contrôle trimestrielle") gl['PieceRef'] = gl["PieceRef"].str.replace("Fire extinguishers annual check", "Vérification annuelle des extincteurs") gl['PieceRef'] = gl["PieceRef"].str.replace("showroom rent", "location de salle d'exposition") gl['PieceRef'] = gl["PieceRef"].str.replace("AND ACTUAL RECEIV","ET RECETTES RÉELLES") gl['PieceRef'] = gl["PieceRef"].str.replace("FILING","DÉPÔT") gl['PieceRef'] = gl["PieceRef"].str.replace("ORDERS","ORDRES") gl['PieceRef'] = gl["PieceRef"].str.replace("EXCLUDED -DUMMY CREDIT","EXCLU") gl['PieceRef'] = gl["PieceRef"].str.replace("RELARING TO","RELATIF À") gl['PieceRef'] = gl["PieceRef"].str.replace("CLEAN UP-","APUREMENT-") gl['PieceRef'] = gl["PieceRef"].str.replace("2ND INSTALLEMENT","2ème versement") gl['PieceRef'] = gl["PieceRef"].str.replace("DOUBLE PAYMENT","DOUBLE PAIEMENT") gl['PieceRef'] = gl["PieceRef"].str.replace("CLEAN UP-","APUREMENT-") gl['PieceRef'] = gl["PieceRef"].str.replace("DUTIES","DROITS") gl['PieceRef'] = gl["PieceRef"].str.replace("Previous balance","Solde Précédent") gl['PieceRef'] = gl["PieceRef"].str.replace("Cash fx","Cash FX") gl['PieceRef'] = gl["PieceRef"].str.replace("PAYROLL INCOME","REVENU DE PAIE") gl['PieceRef'] = gl["PieceRef"].str.replace("TELEPHONE CHARGES","Frais de Téléphone") gl['PieceRef'] = gl["PieceRef"].str.replace("Clearing","Compensation") gl['PieceRef'] = gl["PieceRef"].str.replace("Hotel","Hôtel") gl['PieceRef'] = gl["PieceRef"].str.replace("Miscellaneous","Divers") gl['PieceRef'] = gl["PieceRef"].str.replace("Corporate Card-Out-of-Poc","") gl['PieceRef'] = gl["PieceRef"].str.replace("Traveling Dolby Empl","Employé itinérant de Dolby") gl['PieceRef'] = gl["PieceRef"].str.replace("Tools-Equipment-Lab Supplies","Outils-Equipement-Fournitures de laboratoire") gl['PieceRef'] = gl["PieceRef"].str.replace("rounding","Arrondissement") gl['PieceRef'] = gl["PieceRef"].str.replace("Building Supplies-Maintenance","Matériaux de construction-Entretien") gl['PieceRef'] = gl["PieceRef"].str.replace("Expensed Furniture","Mobilier Dépensé") gl['PieceRef'] = gl["PieceRef"].str.replace("Credit for Charges","Crédit pour frais") gl['PieceRef'] = gl["PieceRef"].str.replace("Manual P-ment and double payment to be deduct","P-mnt manuel et double paiement à déduire") gl['PieceRef'] = gl["PieceRef"].str.replace("Employee insurance travel","Assurance de voyage des employés 2019") gl['PieceRef'] = gl["PieceRef"].str.replace("Rent ","Location ") gl['PieceRef'] = gl["PieceRef"].str.replace("Lunch vouchers ","Bons déjeuner") gl['PieceRef'] = gl["PieceRef"].str.replace("Store Room ","Chambre Stocke") gl['PieceRef'] = gl["PieceRef"].str.replace("Evaluation ","Évaluation ") gl['PieceRef'] = gl["PieceRef"].str.replace("Charges ","Frais ") gl['PieceRef'] = gl["PieceRef"].str.replace("On Line ","En ligne ") gl['PieceRef'] = gl["PieceRef"].str.replace("/Building Supplies/Maintenance","/ Matériaux de construction / Entretien") gl['PieceRef'] = gl["PieceRef"].str.replace("Music Instruments","Instruments Musicales") gl['PieceRef'] = gl["PieceRef"].str.replace("/Employee Awards/Recognition", "/ Récompenses des employés / Reconnaissance") gl['PieceRef'] = gl["PieceRef"].str.replace("/Daily Allowance","/Indemnité journalière") gl['PieceRef'] = gl["PieceRef"].str.replace("RECLASS ", "RECLASSIFICATION ") gl['PieceRef'] = gl["PieceRef"].str.replace("Purchase Accounting", "Comptabilité d'achat") gl['PieceRef'] = gl["PieceRef"].str.replace( "EXPAT ", " Expatrié ") gl['PieceRef'] = gl["PieceRef"].str.replace("FROM ", "DE ") gl['PieceRef'] = gl["PieceRef"].str.replace("INVOICE", "FACTURE") gl['PieceRef'] = gl["PieceRef"].str.replace("CLEANUP", "APUREMENT") gl['PieceRef'] = gl["PieceRef"].str.replace("Repayment", "Restitution") gl['PieceRef'] = gl["PieceRef"].str.replace("Office Furniture", "Meubles de bureau") gl['PieceRef'] = gl["PieceRef"].str.replace("anti-stress treatments", "traitements anti-stress") gl['PieceRef'] = gl["PieceRef"].str.replace("UK Tax Return", "Décl. d'impôt Royaume-Uni") gl['PieceRef'] = gl["PieceRef"].str.replace("Office Location", "Location de bureau") gl['PieceRef'] = gl["PieceRef"].str.replace("Deliver Service", "Service de livraison") gl['PieceRef'] = gl["PieceRef"].str.replace("Foreign Office Support", "Soutien aux bureaux étrangères") gl['PieceRef'] = gl["PieceRef"].str.replace("Showroom", "Salle d'exposition") gl['PieceRef'] = gl["PieceRef"].str.replace("aditional Services", "Services supplémentaires ") gl['PieceRef'] = gl["PieceRef"].str.replace("Cofee consumption Paris office", "Consommation de café Bureau de Paris") gl['PieceRef'] = gl["PieceRef"].str.replace("Consultant ", "Expert-conseil") gl['PieceRef'] = gl["PieceRef"].str.replace("INVOICE", "FACTURE") gl['PieceRef'] = gl["PieceRef"].str.replace("Rent-", "Location-") gl['PieceRef'] = gl["PieceRef"].str.replace("Corporate", "Entreprise") gl['PieceRef'] = gl["PieceRef"].str.replace("COST ", "COÛT ") gl['PieceRef'] = gl["PieceRef"].str.replace("TRAINING", "Formation") gl['PieceRef'] = gl["PieceRef"].str.replace("LIFE DISAB", "Invalidité") gl['PieceRef'] = gl["PieceRef"].str.replace("INSU ", "ASSURANCE ") gl['PieceRef'] = gl["PieceRef"].str.replace("PATENT AWARD", "BREVET") gl['PieceRef'] = gl["PieceRef"].str.replace("EQUIVALENT POUR UNUSED VACATION POUR LEAVE", "CONGÉ DE VACANCES INUTILISÉS") gl['PieceRef'] = gl["PieceRef"].str.replace("SPOT ", "") gl['PieceRef'] = gl["PieceRef"].str.replace("AIRFARE TRANSFER TO PREPAIDS", "TRANSFERT DE TRANSPORT AÉRIEN À PAYÉ D'AVANCE") gl['PieceRef'] = gl["PieceRef"].str.replace("WITHHOLDING", "RETRAIT") gl['PieceRef'] = gl["PieceRef"].str.replace("Clear ", "Reglement ") gl['PieceRef'] = gl["PieceRef"].str.replace("Clear ", "Reglement ") gl['PieceRef'] = gl["PieceRef"].str.replace("Rent/", "Location/") gl['PieceRef'] = gl["PieceRef"].str.replace("Pay ", "Paiement ") gl['PieceRef'] = gl["PieceRef"].str.replace("PAYMENT", "Paiement ") gl['PieceRef'] = gl["PieceRef"].str.replace("French Income Tax Return;", "Déclaration de revenus française;") gl['PieceRef'] = gl["PieceRef"].str.replace("REVESERVICES", "SERVICES") gl['PieceRef'] = gl["PieceRef"].str.replace("INCLUDED DOUBLE", "DOUBLE INCLUS") gl['PieceRef'] = gl["PieceRef"].str.replace("Bank", "Banque") gl['PieceRef'] = gl["PieceRef"].str.replace("/Promotional Expenses", "/Frais de promotion") gl['PieceRef'] = gl["PieceRef"].str.replace(" ACTIVITY ", " activité ") gl['PieceRef'] = gl["PieceRef"].str.replace(" DEFINED BENEFIT LIABILITY", "PASSIF À AVANTAGES DÉTERMINÉES") gl['PieceRef'] = gl["PieceRef"].str.replace("COÛT PLUS ", "Revient Majoré") gl['PieceRef'] = gl["PieceRef"].str.replace("/Airline Frais", "/Tarifs aériens") gl['PieceRef'] = gl["PieceRef"].str.replace("/Tools/Equipment/Lab Supplies", "/Outils / Équipement / Fournitures de laboratoire") gl['PieceRef'] = gl["PieceRef"].str.replace("Rent/", "Location/") gl['PieceRef'] = gl["PieceRef"].str.replace("Payment Posting", "Paiements") gl['PieceRef'] = gl["PieceRef"].str.replace("COMMISSION D’ACCUMULATION", "ACCUMULATIONS DE COMISSIONS") gl['PieceRef'] = gl["PieceRef"].str.replace("ImpôtE", "Impôt") gl['PieceRef'] = gl["PieceRef"].str.replace("MED.INSU", "MED.ASSURANCE") gl['PieceRef'] = gl["PieceRef"].str.replace("APPRENTICESHIP_CONTRIBUTIONS_TRUE_UP", "CONTRIBUTIONS À L'APPRENTISSAGE/TRUE UP") gl['PieceRef'] = gl["PieceRef"].str.replace("NET PAY", "SALAIRE NET") gl['PieceRef'] = gl["PieceRef"].str.replace("CASH ", "ARGENT ") gl['PieceRef'] = gl["PieceRef"].str.replace("Repayment ", "Repaiement ") gl['PieceRef'] = gl["PieceRef"].str.replace("Acct. ", "Comptab. ") gl['PieceRef'] = gl["PieceRef"].str.replace("ACCR ", "ACC ") gl['PieceRef'] = gl["PieceRef"].str.replace("Accr ", "Acc.") gl['PieceRef'] = gl["PieceRef"].str.replace("Cash Balance", "Solde de caisse") gl['PieceRef'] = gl["PieceRef"].str.replace("RECLASS ", "RECLASSEMENT ") gl['PieceRef'] = gl["PieceRef"].str.replace("VAT FILING ", "Dépôt de TVA ") gl['PieceRef'] = gl["PieceRef"].str.replace("Needs to be re-booked due", "KI") gl['PieceRef'] = gl["PieceRef"].str.replace("reclass from", "reclasser de") gl['PieceRef'] = gl["PieceRef"].str.replace("RECLASS FROM", "reclasser de") gl['PieceRef'] = gl["PieceRef"].str.replace("PAYROLL", "PAIE") gl['PieceRef'] = gl["PieceRef"].str.replace("RECLASS ", "Reclasser") gl['PieceRef'] = gl["PieceRef"].str.replace("DEDICTION","DEDUCTION") gl['PieceRef'] = gl["PieceRef"].str.replace("Cash","Argent ") gl['PieceRef'] = gl["PieceRef"].str.replace("cash ","argent ") gl['PieceRef'] = gl["PieceRef"].str.replace("ReclasserIFICATIO","RECLASSEMENT ") gl['PieceRef'] = gl["PieceRef"].str.replace("ImpôtS ","Impôts ") gl['PieceRef'] = gl["PieceRef"].str.replace("Working Repas (Employees Only) ","Repas de travail (employés seulement) ") gl['PieceRef'] = gl["PieceRef"].str.replace("/Banque Frais","/Frais Bancaires") gl['PieceRef'] = gl["PieceRef"].str.replace("MED. INS.","ASSURANCE MED.") gl['PieceRef'] = gl["PieceRef"].str.replace("AJE WIRE LOG TRAN","AJE VERSEMENT") gl['PieceRef'] = gl["PieceRef"].str.replace("JUN'","JUIN'") gl['PieceRef'] = gl["PieceRef"].str.replace("Deferred Rent18 rue de Lo","Loyer différé 18 Rue de Lo") gl['PieceRef'] = gl["PieceRef"].str.replace("Facture - Brut'","Facture - Brute") gl['PieceRef'] = gl["PieceRef"].str.replace("T&E","VD") gl['PieceRef'] = gl["PieceRef"].str.replace("/","") gl['PieceRef'] = gl["PieceRef"].str.replace("Inv","Facture") gl['PieceRef'] = gl["PieceRef"].str.replace("RECUR DEF RENT","LOCATION DIFFÉRÉE RECUR") gl['PieceRef'] = gl["PieceRef"].str.replace(" NaT ","") gl['JournalLib'] = gl["JournalLib"].str.replace(" NaT ","") gl['EcritureLib'] = gl["EcritureLib"].str.replace(" NaT ","") gl['PieceRef'] = gl["PieceRef"].str.replace(" NAN ","") gl['JournalLib'] = gl["JournalLib"].str.replace(" NAN ","") gl['EcritureLib'] = gl["EcritureLib"].str.replace(" NAN ","") gl['PieceRef'] = gl["PieceRef"].str.replace(" nan ","") gl['JournalLib'] = gl["JournalLib"].str.replace(" nan ","") gl['EcritureLib'] = gl["EcritureLib"].str.replace(" nan ","") gl['PieceRef'] = gl["PieceRef"].str.replace(" nannan ","") gl['JournalLib'] = gl["JournalLib"].str.replace(" nannan ","") gl['EcritureLib'] = gl["EcritureLib"].str.replace(" nannan ","") gl.loc[gl["JournalLib"].str.isnumeric(),'JournalLib'] = gl['JournalCode'] gl['JournalLib'] = gl['JournalLib'].replace(codes) gl['JournalLib'] = gl["JournalLib"].str.replace("-2014123456789","-2014V") gl['JournalLib'] = gl["JournalLib"].str.replace("T/&E","VD") gl['EcritureLib'] = gl["EcritureLib"].str.replace("T/&E","VD") gl['DocDate'] = gl['Document Date'] gl.loc[gl["PieceRef"].isnull(),'PieceRef'] = gl["JournalLib"].map(str) + " " + gl.DocDate.dt.strftime('%Y%m%d').astype(str) gl.loc[gl["EcritureLib"].str.isnumeric(),'EcritureLib'] = gl['JournalLib'].map(str) + gl['EcritureNum'].map(str) gl['Document Date'] = gl['DocDate'] del gl['DocDate'] gl['EcritureLib'] = gl['EcritureLib'].apply(lambda x: x.upper()) gl['Credit'] = gl['Credit'].abs() gl = gl.sort_values('EcritureNum') return gl def save_results(df, output): del df['Amount in doc. curr.'] del df['Assignment'] del df['Document Date'] del df['Reference'] del df['Text'] del df['Posting Date'] del df['Document Number'] del df['Document Type'] del df['Document currency'] del df['G/L Account'] del df['Local Currency'] del df['Local currency 2'] del df['Offsetting acct no.'] writer = pd.ExcelWriter(output, engine='xlsxwriter', datetime_format='yyyymmdd', date_format='yyyymmdd') df.to_excel(writer, index = False,sheet_name = ('Sheet 1'), columns =['JournalCode','JournalLib','EcritureNum','EcritureDate','CompteNum', 'CompteLib','CompAuxNum','CompAuxLib','PieceRef','PieceDate','EcritureLib', 'Debit','Credit','EcritureLet','DateLet','ValidDate','MontantDevise','Idevise']) workbook = writer.book worksheet = writer.sheets['Sheet 1'] worksheet.set_column('A:AV', 40) writer.save() if __name__ == '__main__': args = parse_args() gl_items = args.GL parked = args.Parked output_file = args.Choose_File_Name output_df = combine(gl_items,parked) print("Reading data and combining with parked and deleted items") print("Separating Debits and Credits") print("Mapping Vendors") output_df_transformed = transform(output_df) output_df_translated = translate(output_df_transformed) print("Translating to French") print("Mapping French Accounts") print("Filling in blanks") save_results(output_df_translated,output_file) z = output_df_translated['Debit'].sum(axis = 0,skipna = True) y = output_df_translated['Credit'].sum(axis = 0, skipna = True) h = z - y if h != 0: print("WARNING: Debits and Credits are not balanced!")
75.99647
246
0.653315
0
0
0
0
1,353
0.015609
0
0
54,440
0.628035
acd798e93b86e3f13ffd93fb336c588b276e8edf
25,479
py
Python
kits21/annotation/postprocessing.py
bakry230/kits21
7961ec1210d7fa18de5ef1d406609025313baf59
[ "MIT" ]
null
null
null
kits21/annotation/postprocessing.py
bakry230/kits21
7961ec1210d7fa18de5ef1d406609025313baf59
[ "MIT" ]
null
null
null
kits21/annotation/postprocessing.py
bakry230/kits21
7961ec1210d7fa18de5ef1d406609025313baf59
[ "MIT" ]
null
null
null
"""Code for turning user delineations into dense segmentations.""" import json import numpy as np import nibabel as nib from PIL import Image, ImageDraw from numpy.core.fromnumeric import cumsum import torch import torch.nn.functional from scipy import signal from skimage import measure import cv2 #pylint: disable=no-member def load_json(json_path): with json_path.open() as f: return json.loads(f.read()) def write_json(json_path, data): with json_path.open("w") as f: return f.write(json.dumps(data, indent=2)) def get_containing_box(dln, shape): annotated_frames = set([]) maxs = [0, 0] mins = [np.inf, np.inf] max_sz = 0 for ann in dln["annotations"]: annotated_frames.add(ann["frame"]) for pt in ann["spatial_payload"]: if pt[0] > maxs[0]: maxs[0] = pt[0] if pt[1] > maxs[1]: maxs[1] = pt[1] if pt[0] < mins[0]: mins[0] = pt[0] if pt[1] < mins[1]: mins[1] = pt[1] if ann["line_size"] > max_sz: max_sz = ann["line_size"] afrms = sorted(list(annotated_frames)) last = afrms[0] min_step = np.inf for afrm in afrms[1:]: if afrm - last < min_step: min_step = afrm - last last = afrm abs_zmin = 0 abs_zmax = shape[0] - 1 return { "xmin": max(0, int(np.floor(mins[0] - max_sz))), "xmax": min(shape[2] - 1, int(np.ceil(maxs[0] + max_sz))), "ymin": max(0, int(np.floor(mins[1] - max_sz))), "ymax": min(shape[1] - 1, int(np.ceil(maxs[1] + max_sz))), "zmin": max(abs_zmin, min(afrms) - min_step), "zmax": min(abs_zmax, max(afrms) + min_step), "step": min_step, "xdim": shape[2], "ydim": shape[1], "zdim": shape[0] } def get_cropped_scan(cbox, img_nib): return img_nib.get_fdata()[ cbox["zmin"]:cbox["zmax"] + 1, cbox["ymin"]:cbox["ymax"] + 1, cbox["xmin"]:cbox["xmax"] + 1 ] def generate_cropped_drawing_interior(cbox, dln): ret = np.zeros(( cbox["zmax"] - cbox["zmin"] + 1, cbox["ymax"] - cbox["ymin"] + 1, cbox["xmax"] - cbox["xmin"] + 1 ), dtype=np.int) for i in range(ret.shape[0]): with Image.new("L", (ret.shape[2]*10, ret.shape[1]*10)) as im: draw = ImageDraw.Draw(im) drew = False for stroke in dln["annotations"]: if stroke["deprecated"]: continue if i + cbox["zmin"] == stroke["frame"]: drew = True draw.line( [ ( int(round((x[0] - cbox["xmin"])*10)), int(round((x[1] - cbox["ymin"])*10)) ) for x in stroke["spatial_payload"] ], fill=128, width=int(round(stroke["line_size"]*10))+4, joint="curve" ) srt = stroke["spatial_payload"][0] draw.ellipse( [ ( int(round((srt[0] - cbox["xmin"] - stroke["line_size"]/2)*10))-2, int(round((srt[1] - cbox["ymin"] - stroke["line_size"]/2)*10))-2 ), ( int(round((srt[0] - cbox["xmin"] + stroke["line_size"]/2)*10))+2, int(round((srt[1] - cbox["ymin"] + stroke["line_size"]/2)*10))+2 ) ], fill=128 ) end = stroke["spatial_payload"][-1] draw.ellipse( [ ( int(round((end[0] - cbox["xmin"] - stroke["line_size"]/2)*10))-2, int(round((end[1] - cbox["ymin"] - stroke["line_size"]/2)*10))-2 ), ( int(round((end[0] - cbox["xmin"] + stroke["line_size"]/2)*10))+2, int(round((end[1] - cbox["ymin"] + stroke["line_size"]/2)*10))+2 ) ], fill=128 ) if drew: ImageDraw.floodfill(im, (0,0), 128, thresh=63.5) rszd = im.resize((ret.shape[2], ret.shape[1]), Image.BILINEAR) ret[i,:,:] = np.less(np.array(rszd), 63.9).astype(np.int) return ret def get_contour(bin_seg): if bin_seg is None: return None contours, hierarchy = cv2.findContours(bin_seg.astype(np.uint8)*255, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) return contours[0] def distance(p1, p2): return (p1[0][0] - p2[0][0])*(p1[0][0] - p2[0][0]) + (p1[0][1] - p2[0][1])*(p1[0][1] - p2[0][1]) def find_nearest_neighbors_slow_v2(lg_cntr, sm_cntr): matches = np.zeros_like(lg_cntr) step = sm_cntr.shape[0]/lg_cntr.shape[0] mini = None mind = np.inf for i in range(lg_cntr.shape[0]): candidate_matches = np.zeros_like(lg_cntr) offset = i*step for j in range(lg_cntr.shape[0]): candidate_matches[j] = sm_cntr[int(np.round(offset + j*step)) % sm_cntr.shape[0]] dist = np.square(lg_cntr - candidate_matches).sum() if dist < mind: mini = i matches = candidate_matches.copy() mind = dist return matches def draw_filled_contour(ind, bef_i, aft_i, drw_c, bef_bin, aft_bin, float_contour): blown_up = np.zeros((drw_c.shape[1]*10, drw_c.shape[2]*10), dtype=np.uint8) points = np.round(float_contour*10).astype(np.int32) + 1 cv2.fillPoly(blown_up, pts=[points], color=128) drw_c[ind,:,:] = np.logical_or( drw_c[ind,:,:], np.logical_or( np.greater(cv2.resize(blown_up, (drw_c.shape[2], drw_c.shape[1]), cv2.INTER_LINEAR), 32), np.multiply(bef_bin, aft_bin) ) ) def get_group(istr, bef_to_aft, aft_to_bef): bef_grp = set([istr]) aft_grp = set([]) bef_ln = len(bef_grp) aft_ln = len(aft_grp) while True: for ai in aft_grp: for atb in aft_to_bef[ai]: if atb["ovr_sz"] > 0: bef_grp.add(str(atb["ind"])) for bi in bef_grp: for bta in bef_to_aft[bi]: if bta["ovr_sz"] > 0: aft_grp.add(str(bta["ind"])) if len(bef_grp) != bef_ln or len(aft_grp) != aft_ln: bef_ln = len(bef_grp) aft_ln = len(aft_grp) else: break return list(bef_grp), list(aft_grp) def splice_contour(spliced, stretches, cntr, cur_sz, ctr_ind): # Get nearest pair mini = None minj = None mind = np.inf for i in range(cur_sz): for j in range(cntr.shape[0]): dst = distance(spliced[i], cntr[j]) if dst < mind: mini = i minj = j mind = dst ret_sp = spliced.copy() ret_sp[mini+1:mini+cntr.shape[0]+1] = cntr ret_sp[mini+cntr.shape[0]+1:cur_sz+cntr.shape[0]] = spliced[mini+1:cur_sz] ret_st = stretches.copy() ret_st[mini+1:mini+cntr.shape[0]+1] = ctr_ind*np.ones((cntr.shape[0], 1)) ret_st[mini+cntr.shape[0]+1:cur_sz+cntr.shape[0]] = stretches[mini+1:cur_sz] return ret_sp, ret_st def splice_contours(cntrs): lengths = [cr.shape[0] for cr in cntrs] stretches = -1*np.ones( (sum(lengths),1), dtype=np.int32 ) spliced = np.zeros( (sum(lengths),) + cntrs[0].shape[1:], dtype=cntrs[0].dtype ) spliced[0:cntrs[0].shape[0]] = cntrs[0].copy() stretches[0:cntrs[0].shape[0]] = np.zeros((cntrs[0].shape[0], 1)) for i in range(1, len(cntrs)): spliced, stretches = splice_contour(spliced, stretches, cntrs[i], sum(lengths[:i]), i) return spliced, stretches def slice_matches(matches, splice_inds): ret = [] for i in range(np.max(splice_inds)+1): ret += [matches[splice_inds == i,:].reshape((-1,1,2))] return ret def interpolate_merge_association(bef_grp, aft_grp, bef_lbl, aft_lbl, drw_c, bef_i, aft_i, step): # Get composites for each tot_bef_bin = np.zeros_like(bef_lbl) for lbl in bef_grp: tot_bef_bin = np.logical_or( tot_bef_bin, np.equal(bef_lbl, int(lbl)) ) tot_aft_bin = np.zeros_like(aft_lbl) for lbl in aft_grp: tot_aft_bin = np.logical_or( tot_aft_bin, np.equal(aft_lbl, int(lbl)) ) # Get individual values bef_bins = [ np.equal(bef_lbl, int(x)) for x in bef_grp ] aft_bins = [ np.equal(aft_lbl, int(x)) for x in aft_grp ] bef_cntrs = [ get_contour(bef_bin) for bef_bin in bef_bins ] aft_cntrs = [ get_contour(aft_bin) for aft_bin in aft_bins ] if len(bef_grp) > len(aft_grp): nonref_cntrs = bef_cntrs spliced_nonref, splice_inds = splice_contours(bef_cntrs) ref_cntrs = aft_cntrs start = aft_i inc = -1 else: nonref_cntrs = aft_cntrs spliced_nonref, splice_inds = splice_contours(aft_cntrs) ref_cntrs = bef_cntrs start = bef_i inc = 1 for ref_cntr in ref_cntrs: matches = find_nearest_neighbors_slow_v2(ref_cntr, spliced_nonref) rev_matches = find_nearest_neighbors_slow_v2(spliced_nonref, ref_cntr) sliced_matches = slice_matches(rev_matches, splice_inds) for i in range(1, int(np.ceil((aft_i - bef_i)/2))): draw_filled_contour( start + i*inc, bef_i, aft_i, drw_c, tot_bef_bin, tot_aft_bin, i/step*matches + (step - i)/step*ref_cntr ) for nonref_frag, ref_frag in zip(nonref_cntrs, sliced_matches): for i in range(int(np.ceil((aft_i - bef_i)/2)), aft_i - bef_i): draw_filled_contour( start + i*inc, bef_i, aft_i, drw_c, tot_bef_bin, tot_aft_bin, i/step*nonref_frag + (step - i)/step*ref_frag ) def interpolate_simple_association(bef_bin, aft_bin, drw_c, bef_i, aft_i, bef_cnt, aft_cnt, step): # cnt <- center # cntr <- contour bef_cntr = get_contour(bef_bin) aft_cntr = get_contour(aft_bin) if bef_cntr is None: start = bef_i inc = 1 ref = bef_cntr bef_cntr = np.array([ [bef_cnt] ]) bef_bin = np.zeros_like(aft_bin) elif aft_cntr is None: start = aft_i inc = -1 ref = aft_cntr aft_cntr = np.array([ [aft_cnt] ]) aft_bin = np.zeros_like(bef_bin) if bef_cntr.shape[0] > aft_cntr.shape[0]: start = bef_i inc = 1 ref = bef_cntr matches = find_nearest_neighbors_slow_v2(bef_cntr, aft_cntr) else: start = aft_i inc = -1 ref = aft_cntr matches = find_nearest_neighbors_slow_v2(aft_cntr, bef_cntr) for i in range(1, aft_i - bef_i): draw_filled_contour( start + i*inc, bef_i, aft_i, drw_c, bef_bin, aft_bin, i/step*matches + (step - i)/step*ref ) def interpolate_step(bef_i, aft_i, drw_c, step): # Label connected components in each bef_lbl = measure.label(drw_c[bef_i, :, :], background=0) aft_lbl = measure.label(drw_c[aft_i, :, :], background=0) # Associate connected components based on proximity and overlap num_bef = np.max(bef_lbl) num_aft = np.max(aft_lbl) aft_cvg = [False for _ in range(num_aft)] bef_to_aft = {} aft_to_bef = {} # Iterate over all pairs of blobs for i in range(1, num_bef+1): bef_bin = np.equal(bef_lbl, i).astype(np.int) bef_cnt_x, bef_cnt_y = np.argwhere(bef_bin == 1).sum(0)/bef_bin.sum() bef_covered = False istr = "{}".format(i) for j in range(1, num_aft+1): aft_bin = np.equal(aft_lbl, j).astype(np.int) # Get size of overlap ovr_sz = np.multiply(bef_bin, aft_bin).sum() # Get metrics describing blob proximity aft_cnt_x, aft_cnt_y = np.argwhere(aft_bin == 1).sum(0)/aft_bin.sum() cnt_dsp = [aft_cnt_y - bef_cnt_y, aft_cnt_x - bef_cnt_x] cnt_dst_sq = cnt_dsp[0]**2 + cnt_dsp[1]**2 if ovr_sz > 0 or cnt_dst_sq < 5**2: jstr = "{}".format(j) if istr not in bef_to_aft: bef_to_aft[istr] = [] bef_to_aft[istr] += [{ "ind": j, "ovr_sz": int(ovr_sz), "cnt_dst_sq": cnt_dst_sq }] if jstr not in aft_to_bef: aft_to_bef[jstr] = [] aft_to_bef[jstr] += [{ "ind": i, "ovr_sz": int(ovr_sz), "cnt_dst_sq": cnt_dst_sq }] bef_covered = True aft_cvg[j-1] = True if not bef_covered: interpolate_simple_association( bef_bin, None, drw_c, bef_i, aft_i, [bef_cnt_y, bef_cnt_x], [bef_cnt_y, bef_cnt_x], step ) for j, ac in enumerate(aft_cvg): if not ac: aft_bin = np.equal(aft_lbl, j+1).astype(np.int) aft_cnt_x, aft_cnt_y = np.argwhere(aft_bin == 1).sum(0)/aft_bin.sum() interpolate_simple_association( None, aft_bin, drw_c, bef_i, aft_i, [aft_cnt_y, aft_cnt_x], [aft_cnt_y, aft_cnt_x], step ) # If each only has one candidate, that's easy for istr in bef_to_aft: if len(bef_to_aft[istr]) == 1 and len(aft_to_bef[str(bef_to_aft[istr][0]["ind"])]) == 1: bef_bin = np.equal(bef_lbl, int(istr)).astype(np.int) aft_bin = np.equal(aft_lbl, bef_to_aft[istr][0]["ind"]).astype(np.int) aft_cnt_x, aft_cnt_y = np.argwhere(aft_bin == 1).sum(0)/aft_bin.sum() bef_cnt_x, bef_cnt_y = np.argwhere(bef_bin == 1).sum(0)/bef_bin.sum() interpolate_simple_association( bef_bin, aft_bin, drw_c, bef_i, aft_i, [bef_cnt_y, bef_cnt_x], [aft_cnt_y, aft_cnt_x], step ) else: # More complex decision... strict_bta = [x for x in bef_to_aft[istr] if x["ovr_sz"] > 0] strict_atb = [] for k in range(len(strict_bta)): strict_atb += [ x for x in aft_to_bef[str(strict_bta[k]["ind"])] if x["ovr_sz"] > 0 ] handled = False if len(strict_bta) == 1: if len(strict_atb) == 1: handled = True bef_bin = np.equal(bef_lbl, int(istr)).astype(np.int) aft_bin = np.equal(aft_lbl, strict_bta[0]["ind"]).astype(np.int) aft_cnt_x, aft_cnt_y = np.argwhere(aft_bin == 1).sum(0)/aft_bin.sum() bef_cnt_x, bef_cnt_y = np.argwhere(bef_bin == 1).sum(0)/bef_bin.sum() interpolate_simple_association( bef_bin, aft_bin, drw_c, bef_i, aft_i, [bef_cnt_y, bef_cnt_x], [aft_cnt_y, aft_cnt_x], step ) if not handled: # Need to do a group merge bef_grp, aft_grp = get_group(istr, bef_to_aft, aft_to_bef) interpolate_merge_association( bef_grp, aft_grp, bef_lbl, aft_lbl, drw_c, bef_i, aft_i, step ) return drw_c def interpolate_drawings(drw_c, step, arb_bdry=False): # Get inclusive start and end frames start = 0 while start < drw_c.shape[0]: if np.sum(drw_c[start]) > 0: break else: start += 1 end = drw_c.shape[0] - 1 while end > start: if np.sum(drw_c[end]) > 0: break else: end -= 1 if arb_bdry: start += step end -= step while start < end + step + 1: drw_c = interpolate_step(max(start - step, 0), min(start, drw_c.shape[0] - 1), drw_c, step) start += step return drw_c def get_blur_kernel_d(affine): kerx = signal.gaussian(5, std=1/np.abs(affine[0,2])).reshape(5, 1) kerxy = np.outer(kerx, kerx).reshape(1, 5, 5) kerz = signal.gaussian(5, std=1/np.abs(affine[2,0])).reshape(5, 1, 1) kerxyz = np.outer(kerz, kerxy) kerxyz /= np.sum(kerxyz) return torch.from_numpy(kerxyz.reshape(1,1,5,5,5)).to("cuda:0") def get_threshold(region_type): # This seems to work -- no need to adjust based on region now that ureter is gone return -30 def find_hilum_in_slice(thresh, side): # TODO use custom if available thresh = thresh.astype(np.uint8) ( nb_components, output, stats, centroids ) = cv2.connectedComponentsWithStats(thresh, connectivity=4) sizes = stats[:, -1] max_label = 0 max_size = 0 for i in range(1, nb_components): if sizes[i] > max_size: max_label = i max_size = sizes[i] thresh[output != max_label] = 0 centroid = np.array(tuple(centroids[max_label])) contours, _ = cv2.findContours( thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE ) if len(contours) == 0: return None primary_contour = contours[0] hull = cv2.convexHull(primary_contour, returnPoints=False) defects = cv2.convexityDefects(primary_contour, hull) # Choose from defects distances = [] scores = [] criteria = [] depths = [] if defects is None: return None for i in range(defects.shape[0]): s, e, f, d = defects[i, 0] start = np.array(tuple(primary_contour[s][0])) end = np.array(tuple(primary_contour[e][0])) furthest = np.array(tuple(primary_contour[f][0])) defect_center = (start + end)/2 depth = np.linalg.norm(furthest - defect_center) centroid_offset = centroid - defect_center distance = np.linalg.norm(start - end, ord=2) # print(centroid, defect_center, centroid_offset, distance) if side == "left": score = 1*centroid_offset[0] + centroid_offset[1] elif side == "right": score = -1*centroid_offset[0] + centroid_offset[1] distance = np.linalg.norm(start - end, ord=2) scores = scores + [score] distances = distances + [distance] depths = depths + [depth] criteria = criteria + [int(score>0)*(distance+3*depth)] if np.sum(criteria) > 1e-2: winner = np.argmax(criteria) s, e, f, d = defects[winner, 0] start = tuple(primary_contour[s][0]) end = tuple(primary_contour[e][0]) hlm = [start, end] else: hlm = None return hlm def apply_hilum_to_slice(thresholded_c, blur_c, threshold, ind, hlm): if hlm is None: return cv2.line(thresholded_c[ind], hlm[0], hlm[1], 1, 2) abuse_slc = thresholded_c[ind].copy() mask = np.zeros((thresholded_c.shape[1]+2, thresholded_c.shape[2]+2), np.uint8) cv2.floodFill(abuse_slc, mask, (0,0), 1) thresholded_c[ind] = np.logical_and( (np.equal(abuse_slc, 0) | thresholded_c[ind]).astype(thresholded_c[ind].dtype), np.greater(blur_c[ind], threshold) ) # TODO allow for custom hilums to be specified in dln # Polygons will be allowed for logged-in users def add_renal_hilum(thresholded_c, blr_c, threshold, lzn, side, cbox, custom_hilums): first_hilum_slice = None last_hilum_slice = None for ann in lzn["annotations"]: if ann["spatial_type"] == "whole-image" and not ann["deprecated"]: bound = None for cp in ann["classification_payloads"]: if cp["confidence"] > 0.5: if cp["class_id"] == 7: bound = "sup" elif cp["class_id"] == 8: bound = "inf" if bound is None: continue frame = int(ann["frame"]) if bound == "sup": if first_hilum_slice is None or frame < first_hilum_slice: first_hilum_slice = frame - cbox["zmin"] elif bound == "inf": if last_hilum_slice is None or frame > last_hilum_slice: last_hilum_slice = frame - cbox["zmin"] for ind in range(thresholded_c.shape[0]): if "slice_{}".format(ind) in custom_hilums: for hlm in custom_hilums["slice_{}".format(ind)]: apply_hilum_to_slice(thresholded_c, blr_c, threshold, ind, hlm) elif ( ( first_hilum_slice is not None and ind >= first_hilum_slice ) and ( last_hilum_slice is not None and ind <= last_hilum_slice ) ): # TODO send dln here and use custom hilum if possible hlm = find_hilum_in_slice(thresholded_c[ind].copy(), side) apply_hilum_to_slice(thresholded_c, blr_c, threshold, ind, hlm) else: if first_hilum_slice is None: print("First hilum slice could not be determined") if last_hilum_slice is None: print("Last hilum slice could not be determined") return thresholded_c def get_side(cbox): if cbox["xmin"] + cbox["xmax"] > cbox["xdim"]: return "left" return "right" def generate_segmentation(region_type, cropped_img, cropped_drw, step=1, affine=None, lzn=None, cbox=None, custom_hilums={}): # Interpolate drawings cropped_drw = interpolate_drawings(cropped_drw, step) # Send tensors to GPU img_d = torch.from_numpy(cropped_img).to("cuda:0") drw_d = torch.from_numpy(cropped_drw).to("cuda:0") # Apply a 3d blur convolution blur_kernel_d = get_blur_kernel_d(affine) blr_d = torch.nn.functional.conv3d( img_d.reshape((1,1)+cropped_img.shape), blur_kernel_d, stride=1, padding=2 ).reshape(cropped_img.shape) # Apply threshold threshold = get_threshold(region_type) thresholded_d = torch.logical_and( torch.greater(blr_d, threshold), torch.greater(drw_d, 0) ).int() # If region is kidney, add hilum, redraw, and get new threshold thresholded_c = thresholded_d.to("cpu").numpy() blr_c = blr_d.to("cpu").numpy() if region_type == "kidney": side = get_side(cbox) thresholded_c = add_renal_hilum(thresholded_c, blr_c, threshold, lzn, side, cbox, custom_hilums) # Bring result back to cpu memory return thresholded_c def inflate_seg_to_image_size(cbox, cropped_seg): seg_np = np.zeros((cbox["zdim"], cbox["ydim"], cbox["xdim"]), dtype=np.int) seg_np[ cbox["zmin"]:cbox["zmax"] + 1, cbox["ymin"]:cbox["ymax"] + 1, cbox["xmin"]:cbox["xmax"] + 1, ] = cropped_seg return seg_np def get_custom_hilums(meta, cbox): ret = {} if "custom_hilums" not in meta: return ret for ch in meta["custom_hilums"]: if ch["slice_index"] < cbox["zmin"] or ch["slice_index"] > cbox["zmax"]: continue dct_key = "slice_{}".format(ch["slice_index"] - cbox["zmin"]) if dct_key not in ret: ret[dct_key] = [] for hlm in ch["hilums"]: ret[dct_key] += [ [ ( hlm[0][0] - cbox["xmin"], hlm[0][1] - cbox["ymin"] ), ( hlm[1][0] - cbox["xmin"], hlm[1][1] - cbox["ymin"] ) ] ] return ret def delineation_to_seg(region_type, image_path, delineation_path, meta, localization_path=None): # Read and parse delination and (maybe) localization from file lzn = None if region_type == "kidney": assert localization_path is not None lzn = load_json(localization_path) dln = load_json(delineation_path) # Read CT scan img_nib = nib.load(str(image_path)) # Crop image to the smallest possible box for memory/computational efficiency cbox = get_containing_box(dln, img_nib.shape) cropped_img = get_cropped_scan(cbox, img_nib) # Generate the drawing made by the annotator cropped_drw = generate_cropped_drawing_interior(cbox, dln) # Get any custom hilums within the containing box custom_hilums = get_custom_hilums(meta, cbox) # Apply heuristics to infer segmentation based on drawing and image cropped_seg = generate_segmentation( region_type, cropped_img, cropped_drw, cbox["step"], img_nib.affine, lzn, cbox, custom_hilums ) # Undo cropping to get final segmentation seg = inflate_seg_to_image_size(cbox, cropped_seg) # Return the seg in nifti format return nib.Nifti1Image(seg.astype(np.uint8), img_nib.affine)
33.972
125
0.548059
0
0
0
0
0
0
0
0
2,537
0.099572
acd7b158a80820d009b00a99ec78dd68591f2b96
458
py
Python
Programs/Evernote/PackMemo/PackMemo.py
Psiphonc/EasierLife
ad9143a6362d70489ef4b36651ce58a3cc1d0fa3
[ "MIT" ]
203
2016-04-02T07:43:47.000Z
2022-01-05T11:41:03.000Z
Programs/Evernote/PackMemo/PackMemo.py
Psiphonc/EasierLife
ad9143a6362d70489ef4b36651ce58a3cc1d0fa3
[ "MIT" ]
4
2016-05-13T11:20:09.000Z
2018-09-23T01:12:07.000Z
Programs/Evernote/PackMemo/PackMemo.py
Psiphonc/EasierLife
ad9143a6362d70489ef4b36651ce58a3cc1d0fa3
[ "MIT" ]
169
2016-04-26T03:20:04.000Z
2022-03-09T18:36:19.000Z
from EvernoteController import EvernoteController from Memo import Memo MEMO_NAME = 'Memo' MEMO_DIR = 'Memo' MEMO_STORAGE_DIR = 'S-Memo' def f(fn, *args, **kwargs): try: fn(*args, **kwargs) except: pass m = Memo() e = EvernoteController() f(e.create_notebook, MEMO_DIR) f(e.create_notebook, MEMO_STORAGE_DIR) f(e.move_note, MEMO_DIR+'/'+MEMO_NAME, MEMO_STORAGE_DIR) e.create_note('Memo', m.raw_memo(), MEMO_DIR)
22.9
57
0.676856
0
0
0
0
0
0
0
0
29
0.063319
acd8698ede161e1968a17fca40892216abee09bb
128
py
Python
files/build_trigger.py
vexingcodes/vexing.codes-infra
ace315c7fb868f37914573aca353b5454ba7433c
[ "MIT" ]
null
null
null
files/build_trigger.py
vexingcodes/vexing.codes-infra
ace315c7fb868f37914573aca353b5454ba7433c
[ "MIT" ]
null
null
null
files/build_trigger.py
vexingcodes/vexing.codes-infra
ace315c7fb868f37914573aca353b5454ba7433c
[ "MIT" ]
null
null
null
import boto3 def handler(event, _): boto3.client('codebuild').start_build( projectName=event['Records'][0]['customData'])
25.6
50
0.71875
0
0
0
0
0
0
0
0
32
0.25
acd881563c9bf3502bbf14ea75fc78f8d4ff68b7
6,173
py
Python
process/scene/menu.py
Koishilll/pyFuujinrokuDestiny
2b904e15c93195d50e74a66e5bd40d33c3c64a92
[ "MIT" ]
3
2021-06-13T00:22:21.000Z
2021-06-13T11:58:35.000Z
process/scene/menu.py
Koishilll/pyFuujinrokuDestiny
2b904e15c93195d50e74a66e5bd40d33c3c64a92
[ "MIT" ]
null
null
null
process/scene/menu.py
Koishilll/pyFuujinrokuDestiny
2b904e15c93195d50e74a66e5bd40d33c3c64a92
[ "MIT" ]
2
2021-06-14T08:09:42.000Z
2021-06-15T11:41:15.000Z
# menu.py # 维护暂停界面 import pygame from pygame.locals import * import sys from utility import globe from process.scene import menu_confirm from PIL import Image, ImageFilter class Pause_Menu(object): # 暂停页面 def __init__(self): self.button_rect = [] self.rs = globe.destiny.rsManager.image self.pause_title = self.rs["menu_title"] self.confirm_title = self.rs["confirm_title"] self.button_rect.append([100, 220]) # Resume_Start self.button_rect.append([100, 260]) # To_Title_Start self.button_rect.append([90, 300]) # Retry_Start self.image = [] self.image.append(self.rs["Resume_Start"]) # index: 0 self.image.append(self.rs["To_Title_Start"]) # index: 1 self.image.append(self.rs["Retry_Start"]) # index: 2 self.index = 3 globe.scene_menu_choose = False # 按键状态 # 为全局变量是便于二级菜单对状态的重置 def replace(self): if self.index ^ 3: self.image[self.index].set_alpha(90) self.button_rect[self.index][0] += 5 self.index = 3 def update(self): if not globe.scene_menu_choose: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() sys.exit() if event.type == KEYDOWN: if event.key == pygame.K_F4 and event.mod == pygame.KMOD_LALT: pygame.quit() sys.exit() if event.key == K_UP: if self.index == 3: self.index = 0 self.button_rect[self.index][0] -= 5 # 按键微移 self.image[self.index].set_alpha(1000) # 增大透明度, 突出显示 globe.destiny.msManager.play_SE("select") elif self.index != 0: self.index -= 1 self.button_rect[self.index][0] -= 5 # 按键微移 self.image[self.index].set_alpha(1000) # 增大透明度, 突出显示 self.button_rect[self.index + 1][0] += 5 # 按键微移 self.image[self.index + 1].set_alpha(90) # 重置透明度 globe.destiny.msManager.play_SE("select") else: self.index = 2 self.button_rect[self.index][0] -= 5 self.image[self.index].set_alpha(1000) self.button_rect[0][0] += 5 self.image[0].set_alpha(90) globe.destiny.msManager.play_SE("select") if event.key == K_DOWN: if self.index == 3: self.index = 2 self.button_rect[self.index][0] -= 5 # 按键微移 self.image[self.index].set_alpha(1000) # 增大透明度, 突出显示 globe.destiny.msManager.play_SE("select") elif self.index != 2: self.index += 1 self.button_rect[self.index][0] -= 5 self.image[self.index].set_alpha(1000) self.button_rect[self.index - 1][0] += 5 self.image[self.index - 1].set_alpha(90) globe.destiny.msManager.play_SE("select") else: self.index = 0 self.button_rect[self.index][0] -= 5 self.image[self.index].set_alpha(1000) self.button_rect[2][0] += 5 self.image[2].set_alpha(90) globe.destiny.msManager.play_SE("select") if event.key == K_z and self.index ^ 3: globe.scene_menu_choose = True globe.destiny.msManager.play_SE("select") if event.key == K_ESCAPE: self.replace() globe.destiny.msManager.unpause() globe.destiny.back() else: if self.index == 0: # 返回游戏 self.replace() globe.destiny.back() globe.destiny.msManager.unpause() if self.index == 1: # 选择了选单第一项, 跳转到确认页面 self.replace() globe.scene_menu_flag = 1 globe.destiny.call(menu_confirm.Scene_Menu_Confirm) if self.index == 2: # 选择了选单第二项, 跳转到确认页面 self.replace() globe.scene_menu_flag = 2 globe.destiny.call(menu_confirm.Scene_Menu_Confirm) def draw(self, screen): screen.blit(self.pause_title, (160, 140)) for i in range(0, 3): screen.blit(self.image[i], self.button_rect[i]) def start(self): pass def stop(self): pass class Scene_Menu(object): # 游戏背景糊化类 def __init__(self): self.rs = globe.destiny.rsManager self.menu = Pause_Menu() self.count = 0 self.fade = pygame.Surface(globe.destiny.screen.get_size()) self.imgtmp = globe.destiny.screen.subsurface(Rect(30, 14, 388, 452)).copy() # 使用枕头库模糊化游戏窗口 for i in range(0, 3): # 转换PyGame图像为pillow图像 raw_str = pygame.image.tostring(self.imgtmp, "RGBA", False) image = Image.frombytes("RGBA", self.imgtmp.get_size(), raw_str) imgblur = image.filter(ImageFilter.BLUR) # 转换pillow图像为PyGame图像 raw_str = imgblur.tobytes("raw", "RGBA") imgblur_pygame = pygame.image.fromstring(raw_str, imgblur.size, "RGBA") self.imgtmp = imgblur_pygame globe.game_active_bg_blured = imgblur_pygame def update(self): self.menu.update() def draw(self, screen): screen.blit(self.imgtmp, (30, 14)) self.menu.draw(screen) def start(self): pass def stop(self): pass
39.06962
84
0.491657
6,276
0.970465
0
0
0
0
0
0
750
0.115973
acd8f0f4ddc149972cebacdb480ed672f1f25c59
602
py
Python
setup.py
LSSTDESC/sn_simulation
927d6034bde1b729e96f581804c2bbca93d73548
[ "BSD-3-Clause" ]
null
null
null
setup.py
LSSTDESC/sn_simulation
927d6034bde1b729e96f581804c2bbca93d73548
[ "BSD-3-Clause" ]
null
null
null
setup.py
LSSTDESC/sn_simulation
927d6034bde1b729e96f581804c2bbca93d73548
[ "BSD-3-Clause" ]
2
2020-04-22T08:20:02.000Z
2021-03-06T18:40:52.000Z
from setuptools import setup # get the version here pkg_vars = {} with open("version.py") as fp: exec(fp.read(), pkg_vars) setup( name='sn_simulation', version= pkg_vars['__version__'], description='Simulations for supernovae', url='http://github.com/lsstdesc/sn_simulation', author='Philippe Gris', author_email='philippe.gris@clermont.in2p3.fr', license='BSD', packages=['sn_simulator', 'sn_simu_wrapper'], python_requires='>=3.5', zip_safe=False, install_requires=[ 'sn_tools>=0.1', 'sn_stackers>=0.1', 'dustmaps' ], )
23.153846
51
0.644518
0
0
0
0
0
0
0
0
266
0.44186
acd9840ea3a2e447f7b6fdbc4e46bfc1b2de17cc
1,059
py
Python
test/src/testing/universal/uart.py
Jcc99/Adafruit_Blinka
41f8155bab83039ed9d45276addd3d501e83f3e6
[ "MIT" ]
294
2018-06-30T19:08:27.000Z
2022-03-26T21:08:47.000Z
test/src/testing/universal/uart.py
Jcc99/Adafruit_Blinka
41f8155bab83039ed9d45276addd3d501e83f3e6
[ "MIT" ]
421
2018-06-30T20:54:46.000Z
2022-03-31T15:08:37.000Z
test/src/testing/universal/uart.py
Jcc99/Adafruit_Blinka
41f8155bab83039ed9d45276addd3d501e83f3e6
[ "MIT" ]
234
2018-07-23T18:49:16.000Z
2022-03-28T16:59:48.000Z
import gc from unittest import TestCase from testing import await_true gc.collect() class TestGPSInteractive(TestCase): def test_read_value(self): import adafruit_blinka adafruit_blinka.patch_system() # needed before adafruit_gps imports time import microcontroller.pin gc.collect() import busio gc.collect() import adafruit_gps gc.collect() # configure the last available UART (first uart often for REPL) uartId, uartTx, uartRx = microcontroller.pin.uartPorts[0] uart = busio.UART(uartTx, uartRx, baudrate=9600, timeout=3000) gps = adafruit_gps.GPS(uart) gps.send_command("PMTK314,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0") gps.send_command("PMTK220,1000") def try_fix(): gps.update() return gps.has_fix await_true("GPS fix", try_fix) self.assertTrue(gps.satellites is not None) self.assertTrue(-90 <= gps.latitude < 90) self.assertTrue(-180 <= gps.longitude < 180)
25.214286
81
0.636449
971
0.916903
0
0
0
0
0
0
174
0.164306
acda34b0ee62fcb18b11289e0bca2b9de4cb05fb
678
py
Python
atividade064.py
henrikysena/Atividades_CursosEmVideo_Python
2890c67ea4be2cde6f42c2489d4c96b47d7b597e
[ "MIT" ]
null
null
null
atividade064.py
henrikysena/Atividades_CursosEmVideo_Python
2890c67ea4be2cde6f42c2489d4c96b47d7b597e
[ "MIT" ]
null
null
null
atividade064.py
henrikysena/Atividades_CursosEmVideo_Python
2890c67ea4be2cde6f42c2489d4c96b47d7b597e
[ "MIT" ]
null
null
null
"""Exercício Python 64: Crie um programa que leia vários números inteiros pelo teclado. O programa só vai parar quando o usuário digitar o valor 999, que é a condição de parada. No final, mostre quantos números foram digitados e qual foi a soma entre eles (desconsiderando o flag (999)).""" q = 0 s = 0 print('[Digite 999 para parar o programa]') # se colocar o flag[999] antes do whale, quando ele for 'acionado' ele não entrará no algoritmo n = int(input('Digite um número: ')) while n != 999: q = q + 1 s = s + n n = int(input('Digite um número: ')) print('') print('Foram digitados {} números;'.format(q)) print('A soma dos números foi de {}.'.format(s))
27.12
95
0.687316
0
0
0
0
0
0
0
0
539
0.777778
acda4ed5bc757596386a66d5ba62a7fb43391ce1
25,202
pyw
Python
AutoElectsysConfig.pyw
gousaiyang/AutoElectsys
9db0580f0b323b7f8f83f3ae79ebaac10b6deca7
[ "MIT" ]
3
2019-09-20T21:20:40.000Z
2019-09-22T12:42:39.000Z
AutoElectsysConfig.pyw
gousaiyang/AutoElectsys
9db0580f0b323b7f8f83f3ae79ebaac10b6deca7
[ "MIT" ]
1
2019-09-22T12:38:58.000Z
2019-09-27T05:53:05.000Z
AutoElectsysConfig.pyw
gousaiyang/AutoElectsys
9db0580f0b323b7f8f83f3ae79ebaac10b6deca7
[ "MIT" ]
null
null
null
import base64 import contextlib import os import re import sys import tkinter as tk from tkinter import messagebox, ttk from AutoElectsysUtil import (config_file_name, course_rounds, file_read_json, file_read_lines, file_write_json, file_write_lines, first_categories, general_validation, is_positive_int, pswd_file_name, remove_utf8_bom) os.chdir(os.path.dirname(os.path.realpath(sys.argv[0]))) default_pswd_choice = 0 default_captcha = 0 default_relogin_interval = 60 default_teacher_row = 1 default_round = 2 default_first_category = 3 default_sleep = 2000 class AutoElectsysConfig: def __init__(self): self.init_window() self.init_color() self.init_coords() self.init_widgets() self.init_tips() self.load() self.show_status() def init_window(self): self.window = tk.Tk() self.window.title('AutoElectsys 配置设置') self.window.geometry('552x400') self.window.resizable(False, False) self.window.protocol('WM_DELETE_WINDOW', self.ask_quit) def init_color(self): self.login_color = 'White' self.course_color = 'White' self.misc_color = 'White' self.info_color = 'RoyalBlue' self.success_color = 'LimeGreen' self.error_color = 'Crimson' self.style = ttk.Style() self.style.configure('Login.TRadiobutton', background=self.login_color) self.style.configure('Login.TLabel', background=self.login_color) self.style.configure('Login.TCheckbutton', background=self.login_color) self.style.configure('Course.TLabel', background=self.course_color) self.style.configure('Course.TCheckbutton', background=self.course_color) self.style.configure('Misc.TRadiobutton', background=self.misc_color) self.style.configure('Misc.TLabel', background=self.misc_color) self.style.configure('Info_Login.TLabel', foreground=self.info_color, background=self.login_color) self.style.configure('Info_Course.TLabel', foreground=self.info_color, background=self.course_color) self.style.configure('Info_Misc.TLabel', foreground=self.info_color, background=self.misc_color) self.style.configure('Success.TLabel', foreground=self.success_color) self.style.configure('Error.TLabel', foreground=self.error_color) def init_coords(self): self.tab_x = 15 self.tab_y = 30 self.tab_w = 520 self.tab_h = 280 self.x1 = 15 self.yd = 33 self.y1 = 15 self.y2 = self.y1 + self.yd self.y3 = self.y1 + self.yd * 2 self.y4 = self.y1 + self.yd * 3 self.y5 = self.y1 + self.yd * 4 self.y6 = self.y1 + self.yd * 5 self.y7 = self.y1 + self.yd * 6 self.w1 = 14 self.w2 = 8 self.w3 = 7 self.w4 = 22 self.login_x2 = self.x1 + 18 self.login_x3 = self.x1 + 82 self.login_x4 = self.x1 + 150 self.course_x2 = self.x1 + 70 self.misc_x2 = self.x1 + 70 self.misc_x3 = self.x1 + 140 self.bottom_x1 = 180 self.bottom_x2 = 280 self.bottom_y = 350 self.tip_x = 275 self.status_x = 276 self.status_y = 7 def init_widgets(self): self.tab = ttk.Notebook(self.window, width=self.tab_w, height=self.tab_h) self.tab.place(x=self.tab_x, y=self.tab_y) self.login_frame = tk.Frame(self.tab, background=self.login_color) self.tab.add(self.login_frame, text='登录设置') self.pswd_choice_v = tk.IntVar() self.pswd_chrome_radio = ttk.Radiobutton(self.login_frame, text='使用 Chrome 保存的用户名和密码', variable=self.pswd_choice_v, value=1, command=self.on_pswd, style='Login.TRadiobutton') self.pswd_chrome_radio.place(x=self.x1, y=self.y1) self.pswd_custom_radio = ttk.Radiobutton(self.login_frame, text='使用自定义的用户名和密码', variable=self.pswd_choice_v, value=0, command=self.on_pswd, style='Login.TRadiobutton') self.pswd_custom_radio.place(x=self.x1, y=self.y2) self.clear_pswd_button = ttk.Button(self.login_frame, text='清除账号信息', command=self.on_clear) self.clear_pswd_button.place(x=self.tip_x, y=self.y4) self.user_label = ttk.Label(self.login_frame, text='用户名:', style='Login.TLabel') self.user_label.place(x=self.login_x2, y=self.y3) self.user_v = tk.StringVar() self.user_entry = ttk.Entry(self.login_frame, textvariable=self.user_v, width=self.w1) self.user_entry.place(x=self.login_x3, y=self.y3) self.pass_label = ttk.Label(self.login_frame, text='密码:', style='Login.TLabel') self.pass_label.place(x=self.login_x2, y=self.y4) self.pass_v = tk.StringVar() self.pass_entry = ttk.Entry(self.login_frame, textvariable=self.pass_v, width=self.w1, show='*') self.pass_entry.place(x=self.login_x3, y=self.y4) self.captcha_v = tk.IntVar() self.captcha_check = ttk.Checkbutton(self.login_frame, text='自动识别验证码', variable=self.captcha_v, command=self.on_captcha, style='Login.TCheckbutton') self.captcha_check.place(x=self.x1, y=self.y5) self.relogin_v = tk.IntVar() self.relogin_check = ttk.Checkbutton(self.login_frame, text='自动重新登录', variable=self.relogin_v, command=self.on_relogin, style='Login.TCheckbutton') self.relogin_check.place(x=self.x1, y=self.y6) self.per_label = ttk.Label(self.login_frame, text='时间间隔:', style='Login.TLabel') self.per_label.place(x=self.login_x2, y=self.y7) self.relogin_interval_v = tk.StringVar() self.relogin_interval = ttk.Entry(self.login_frame, textvariable=self.relogin_interval_v, width=self.w2) self.relogin_interval.place(x=self.login_x3, y=self.y7) self.min_label = ttk.Label(self.login_frame, text='分钟', style='Login.TLabel') self.min_label.place(x=self.login_x4, y=self.y7) self.course_frame = tk.Frame(self.tab, background=self.course_color) self.tab.add(self.course_frame, text='课程设置') self.course_id_label = ttk.Label(self.course_frame, text='课程代码:', style='Course.TLabel') self.course_id_label.place(x=self.x1, y=self.y1) self.course_id_v = tk.StringVar() self.course_id_entry = ttk.Entry(self.course_frame, textvariable=self.course_id_v, width=self.w1) self.course_id_entry.place(x=self.course_x2, y=self.y1) self.teacher_row_label = ttk.Label(self.course_frame, text='教师行数:', style='Course.TLabel') self.teacher_row_label.place(x=self.x1, y=self.y2) self.teacher_row_v = tk.StringVar() self.teacher_row_entry = ttk.Entry(self.course_frame, textvariable=self.teacher_row_v, width=self.w1) self.teacher_row_entry.place(x=self.course_x2, y=self.y2) self.round_label = ttk.Label(self.course_frame, text='选课轮次:', style='Course.TLabel') self.round_label.place(x=self.x1, y=self.y3) self.round_v = tk.StringVar() self.round_combo = ttk.Combobox(self.course_frame, values=course_rounds, textvariable=self.round_v, state='readonly', width=self.w1) self.round_combo.bind('<<ComboboxSelected>>', self.on_round) self.round_combo.place(x=self.course_x2, y=self.y3) self.autolocate_v = tk.IntVar() self.autolocate_check = ttk.Checkbutton(self.course_frame, text='自动定位选课页面', variable=self.autolocate_v, command=self.on_locate, style='Course.TCheckbutton') self.autolocate_check.place(x=self.x1, y=self.y4) self.first_cat_label = ttk.Label(self.course_frame, text='一级分类:', style='Course.TLabel') self.first_cat_label.place(x=self.x1, y=self.y5) self.first_cat_v = tk.StringVar() self.first_cat_combo = ttk.Combobox(self.course_frame, values=first_categories, textvariable=self.first_cat_v, state='readonly', width=self.w1) self.first_cat_combo.bind('<<ComboboxSelected>>', self.on_first_cat) self.first_cat_combo.place(x=self.course_x2, y=self.y5) self.second_cat_label = ttk.Label(self.course_frame, text='二级分类:', style='Course.TLabel') self.second_cat_label.place(x=self.x1, y=self.y6) self.second_cat_v = tk.StringVar() self.second_cat_entry = ttk.Entry(self.course_frame, textvariable=self.second_cat_v, width=self.w4) self.second_cat_entry.place(x=self.course_x2, y=self.y6) self.misc_frame = tk.Frame(self.tab, background=self.misc_color) self.tab.add(self.misc_frame, text='其他设置') self.sleep_label = ttk.Label(self.misc_frame, text='刷新间隔:', style='Misc.TLabel') self.sleep_label.place(x=self.x1, y=self.y1) self.sleep_v = tk.StringVar() self.sleep_entry = ttk.Entry(self.misc_frame, textvariable=self.sleep_v, width=self.w2) self.sleep_entry.place(x=self.misc_x2, y=self.y1) self.ms_label = ttk.Label(self.misc_frame, text='毫秒', style='Misc.TLabel') self.ms_label.place(x=self.misc_x3, y=self.y1) self.ok_button = ttk.Button(self.window, text='确定', command=self.store) self.ok_button.place(x=self.bottom_x1, y=self.bottom_y) self.window.bind('<Control-s>', self.store) self.cancel_button = ttk.Button(self.window, text='取消', command=self.ask_quit) self.cancel_button.place(x=self.bottom_x2, y=self.bottom_y) self.window.bind('<Escape>', self.ask_quit) def init_tips(self): self.pswd_tip_label = ttk.Label(self.login_frame, style='Info_Login.TLabel') self.captcha_tip_label = ttk.Label(self.login_frame, text='自动识别 jAccount 登录页面的验证码。', style='Info_Login.TLabel') self.captcha_tip_label.place(x=self.tip_x, y=self.y5) self.relogin_tip_label = ttk.Label(self.login_frame, text='每隔一段时间自动重新登录,\n' '刷新 Cookie 以防止页面过期。\n' '需开启自动定位选课页面功能。', style='Info_Login.TLabel') self.relogin_tip_label.place(x=self.tip_x, y=self.y6) self.course_id_tip_label = ttk.Label(self.course_frame, text='欲选课程的课程代码,可在网页上查知。', style='Info_Course.TLabel') self.course_id_tip_label.place(x=self.tip_x, y=self.y1) self.teacher_row_tip_label = ttk.Label(self.course_frame, text='欲选教师或时间段在内层页面中第几行。', style='Info_Course.TLabel') self.teacher_row_tip_label.place(x=self.tip_x, y=self.y2) self.round_tip_label = ttk.Label(self.course_frame, text='“其他”项用于二专选课等。\n' '此模式下不支持自动定位选课页面。', style='Info_Course.TLabel') self.round_tip_label.place(x=self.tip_x, y=self.y3) self.autolocate_tip_label = ttk.Label(self.course_frame, text='按照课程的类型,自动进入所在的页面。', style='Info_Course.TLabel') self.autolocate_tip_label.place(x=self.tip_x, y=self.y4) self.second_cat_tip_label = ttk.Label(self.course_frame, style='Info_Course.TLabel') self.second_cat_tip_label.place(x=self.tip_x, y=self.y6) self.sleep_time_tip_label = ttk.Label(self.misc_frame, text='设置自动刷新选课页面的频率。\n' '若提示“请勿频繁刷新本页面”,\n' '请将此值调大。', style='Info_Misc.TLabel') self.sleep_time_tip_label.place(x=self.tip_x, y=self.y1) def on_pswd(self): if self.pswd_choice_v.get(): new_state = 'disabled' new_text = '将加载您的 Chrome 用户文件,\n您的插件以及保存的密码会被\nChrome 载入。' new_y = self.y1 else: new_state = 'normal' new_text = '将以新用户身份打开 Chrome,\n不会载入您的插件和保存的密码。\n请在左侧填写 jAccount 账号信息。' new_y = self.y2 self.user_label.config(state=new_state) self.user_entry.config(state=new_state) self.pass_label.config(state=new_state) self.pass_entry.config(state=new_state) self.pswd_tip_label.config(text=new_text) self.pswd_tip_label.place(x=self.tip_x, y=new_y) def on_clear(self): self.user_v.set('') self.pass_v.set('') def on_captcha(self): if self.captcha_v.get(): self.captcha_tip_label.config(state='normal') if self.autolocate_v.get(): self.relogin_check.config(state='normal') else: self.captcha_tip_label.config(state='disabled') self.relogin_check.config(state='disabled') self.relogin_v.set(0) self.relogin_tip_label.config(state='disabled') self.on_relogin() def on_relogin(self): if self.relogin_v.get(): new_state = 'normal' if self.relogin_interval_v.get().strip() == '': self.relogin_interval_v.set(str(default_relogin_interval)) else: new_state = 'disabled' self.relogin_tip_label.config(state=new_state) self.per_label.config(state=new_state) self.relogin_interval.config(state=new_state) self.min_label.config(state=new_state) def on_locate(self): if self.autolocate_v.get(): self.autolocate_tip_label.config(state='normal') self.first_cat_label.config(state='normal') self.first_cat_combo.config(state='normal') self.first_cat_combo.config(state='readonly') self.on_first_cat() if self.captcha_v.get(): self.relogin_check.config(state='normal') else: self.autolocate_tip_label.config(state='disabled') self.first_cat_label.config(state='disabled') self.first_cat_combo.config(state='disabled') self.second_cat_label.config(state='disabled') self.second_cat_entry.config(state='disabled') self.second_cat_tip_label.config(state='disabled') self.relogin_check.config(state='disabled') self.relogin_v.set(0) self.relogin_tip_label.config(state='disabled') self.on_relogin() def on_round(self, event=None): if self.round_v.get() == course_rounds[0]: self.autolocate_v.set(0) self.autolocate_check.config(state='disabled') self.autolocate_tip_label.config(state='disabled') self.round_tip_label.place(x=self.tip_x, y=self.y3) self.on_locate() else: self.autolocate_check.config(state='normal') self.round_tip_label.place_forget() def on_first_cat(self, event=None): s = self.first_cat_v.get() if s in (first_categories[0], first_categories[4]): new_state = 'disabled' new_text = '' else: new_state = 'normal' if s == first_categories[1]: new_text = '填写课程所属的模块名称,\n如“个性化教育”等。\n请填写网页上显示的全称,不要简写。' elif s == first_categories[2]: new_text = '填写课程所属的模块名称,\n如“人文学科”“社会科学”等。\n请填写网页上显示的全称,不要简写。' else: new_text = '填写开课院系名称。\n请填写网页上显示的全称,不要简写。\n注:默认选择本年级,跨年级选课\n请勿使用自动定位功能。' self.second_cat_label.config(state=new_state) self.second_cat_entry.config(state=new_state) self.second_cat_tip_label.config(state=new_state, text=new_text) def load(self): self.config_file_exists = os.path.isfile(config_file_name) try: remove_utf8_bom(config_file_name) except Exception: messagebox.showerror('错误', '无法写入配置文件!') self.quit() self.config_file_valid = True try: config = file_read_json(config_file_name) except Exception: self.config_file_valid = False try: cf_pswd_choice = config['Login']['password_saved'] general_validation(isinstance(cf_pswd_choice, bool)) self.pswd_choice_v.set(cf_pswd_choice) except Exception: self.config_file_valid = False self.pswd_choice_v.set(default_pswd_choice) try: cf_captcha = config['Login']['auto_captcha'] general_validation(isinstance(cf_captcha, bool)) self.captcha_v.set(cf_captcha) except Exception: self.config_file_valid = False self.captcha_v.set(default_captcha) try: cf_relogin_interval = config['Login']['relogin_interval'] general_validation(isinstance(cf_relogin_interval, int) and cf_relogin_interval >= 0) if cf_relogin_interval == 0: self.relogin_interval_v.set('') self.relogin_v.set(0) else: self.relogin_interval_v.set(str(cf_relogin_interval)) self.relogin_v.set(1) except Exception: self.config_file_valid = False self.relogin_interval_v.set('') self.relogin_v.set(0) self.relogin_tip_label.config(state='disabled') try: cf_course_id = config['CourseInfo']['course_id'] general_validation(isinstance(cf_course_id, str) and re.fullmatch('[A-Za-z0-9]*', cf_course_id)) self.course_id_v.set(cf_course_id) except Exception: self.config_file_valid = False self.course_id_v.set('') try: cf_teacher_row = config['CourseInfo']['teacher_row'] general_validation(isinstance(cf_teacher_row, int) and cf_teacher_row > 0) self.teacher_row_v.set(str(cf_teacher_row)) except Exception: self.config_file_valid = False self.teacher_row_v.set(str(default_teacher_row)) try: cf_round = config['CourseLocate']['round'] general_validation(isinstance(cf_round, int) and cf_round in range(0, 7)) self.round_v.set(course_rounds[cf_round]) except Exception: self.config_file_valid = False self.round_v.set(course_rounds[default_round]) try: cf_auto_locate = config['CourseLocate']['auto_locate'] general_validation(isinstance(cf_auto_locate, bool)) self.autolocate_v.set(cf_auto_locate) except Exception: self.config_file_valid = False self.autolocate_v.set(0) self.autolocate_tip_label.config(state='disabled') try: cf_first_category = config['CourseLocate']['first_category'] general_validation(isinstance(cf_first_category, int) and cf_first_category in (1, 2, 3, 4, 5)) self.first_cat_v.set(first_categories[cf_first_category - 1]) if cf_first_category in (2, 3, 4): try: cf_second_category = config['CourseLocate']['second_category'] general_validation(isinstance(cf_second_category, str)) self.second_cat_v.set(cf_second_category) except Exception: self.config_file_valid = False self.second_cat_v.set('') except Exception: self.config_file_valid = False self.first_cat_v.set(first_categories[default_first_category - 1]) try: cf_sleep = config['Miscellaneous']['sleep_time'] general_validation(isinstance(cf_sleep, int) and cf_sleep > 0) self.sleep_v.set(str(cf_sleep)) except Exception: self.config_file_valid = False self.sleep_v.set(str(default_sleep)) try: username, password = file_read_lines(pswd_file_name) password = base64.b85decode(password).decode() except Exception: username = '' password = '' self.user_v.set(username) self.pass_v.set(password) self.on_pswd() self.on_relogin() self.on_captcha() self.on_first_cat() self.on_locate() self.on_round() def show_status(self): if self.config_file_exists: if self.config_file_valid: self.status_label = ttk.Label(self.window, text='成功读取配置文件', style='Success.TLabel') else: self.status_label = ttk.Label(self.window, text='配置文件格式错误或值无效,将自动修正', style='Error.TLabel') else: self.status_label = ttk.Label(self.window, text='配置文件不存在,将创建新的配置文件', style='Error.TLabel') self.status_label.place(x=self.status_x, y=self.status_y, anchor=tk.N) def store(self, event=None): config = { 'Login': {}, 'CourseInfo': {}, 'CourseLocate': {}, 'Miscellaneous': {} } config['Login']['password_saved'] = bool(self.pswd_choice_v.get()) config['Login']['auto_captcha'] = bool(self.captcha_v.get()) if self.relogin_v.get(): cf_relogin_interval = self.relogin_interval_v.get().strip() if cf_relogin_interval == '': messagebox.showwarning('错误', '请设置自动重新登录的间隔时间!') return if not is_positive_int(cf_relogin_interval): messagebox.showwarning('错误', '自动重新登录间隔时间应为正整数!') return else: cf_relogin_interval = '0' config['Login']['relogin_interval'] = int(cf_relogin_interval) cf_course_id = self.course_id_v.get().strip() if not re.fullmatch('[A-Za-z0-9]*', cf_course_id): messagebox.showwarning('错误', '课程代码只能包含字母和数字!') return config['CourseInfo']['course_id'] = cf_course_id cf_teacher_row = self.teacher_row_v.get().strip() if cf_teacher_row == '': messagebox.showwarning('错误', '请填写教师行数!') return if not is_positive_int(cf_teacher_row): messagebox.showwarning('错误', '教师行数应为正整数!') return config['CourseInfo']['teacher_row'] = int(cf_teacher_row) config['CourseLocate']['round'] = course_rounds.index(self.round_v.get()) config['CourseLocate']['auto_locate'] = bool(self.autolocate_v.get()) config['CourseLocate']['first_category'] = first_categories.index(self.first_cat_v.get()) + 1 config['CourseLocate']['second_category'] = self.second_cat_v.get().strip() cf_sleep = self.sleep_v.get().strip() if cf_sleep == '': messagebox.showwarning('错误', '请设置选课页面刷新间隔!') return if not is_positive_int(cf_sleep): messagebox.showwarning('错误', '选课页面刷新间隔应为正整数!') return config['Miscellaneous']['sleep_time'] = int(cf_sleep) if not messagebox.askokcancel('保存', '确定要保存当前配置吗?'): return try: file_write_json(config_file_name, config, indent=4, ensure_ascii=False) except Exception: messagebox.showerror('错误', '无法写入配置文件!') self.quit() username = self.user_v.get() password = self.pass_v.get() if username == '' and password == '': with contextlib.suppress(Exception): os.remove(pswd_file_name) else: try: password = base64.b85encode(password.encode()).decode() file_write_lines(pswd_file_name, (username, password)) except Exception: messagebox.showerror('错误', '无法写入账号信息文件!') self.quit() self.quit() def __call__(self): self.window.mainloop() def quit(self): self.window.quit() def ask_quit(self, event=None): if messagebox.askokcancel('退出', '确定要退出吗?'): self.quit() def main(): aec = AutoElectsysConfig() aec() if __name__ == '__main__': main()
41.314754
113
0.59281
25,818
0.971624
0
0
0
0
0
0
4,042
0.152115
acda9d18d103c562e1efc9480f044e24f35cf4ad
1,746
py
Python
src/masonite/presets/React.py
cercos/masonite
f7f220efa7fae833683e9f07ce13c3795a87d3b8
[ "MIT" ]
1,816
2018-02-14T01:59:51.000Z
2022-03-31T17:09:20.000Z
src/masonite/presets/React.py
cercos/masonite
f7f220efa7fae833683e9f07ce13c3795a87d3b8
[ "MIT" ]
340
2018-02-11T00:27:26.000Z
2022-03-21T12:00:24.000Z
src/masonite/presets/React.py
cercos/masonite
f7f220efa7fae833683e9f07ce13c3795a87d3b8
[ "MIT" ]
144
2018-03-18T00:08:16.000Z
2022-02-26T01:51:58.000Z
"""React Preset""" import shutil import os from .Preset import Preset from ..utils.filesystem import make_directory from ..utils.location import resources_path, views_path class React(Preset): """ Configure the front-end scaffolding for the application to use ReactJS Will also remove Vue as Vue and React are a bit mutally exclusive """ key = "react" packages = { "react": "^17.0.2", "react-dom": "^17.0.2", "@babel/preset-react": "^7.16.5", } removed_packages = ["vue", "vue-loader"] def install(self): """Install the preset""" self.update_packages(dev=True) self.update_webpack_mix() self.update_js() self.add_components() self.update_css() self.create_view() self.remove_node_modules() def add_components(self): """Copy example React component into application (delete example Vue component if it exists)""" # make components directory if does not exists make_directory(resources_path("js/components/Example.js")) # delete Vue components if exists vue_files = [ resources_path("js/components/HelloWorld.vue"), resources_path("js/App.vue"), ] for vue_file in vue_files: if os.path.exists(vue_file): os.remove(vue_file) # add Vue components shutil.copyfile( self.get_template_path("Example.js"), resources_path("js/components/Example.js"), ) def create_view(self): """Copy an example app view with assets included.""" shutil.copyfile( self.get_template_path("app.html"), views_path("app_react.html") )
28.622951
86
0.612829
1,570
0.899198
0
0
0
0
0
0
675
0.386598
acde47f7cdaa571a2abde3c7a22b637d977809a6
1,445
py
Python
apps/credit_card/models.py
code-yeongyu/backend
cafad5a1cae47ab86ca71028379b72837ea4543d
[ "MIT" ]
1
2021-07-09T01:27:16.000Z
2021-07-09T01:27:16.000Z
apps/credit_card/models.py
code-yeongyu/backend
cafad5a1cae47ab86ca71028379b72837ea4543d
[ "MIT" ]
10
2021-07-08T04:26:55.000Z
2021-07-20T14:01:58.000Z
apps/credit_card/models.py
code-yeongyu/pangpang-eats-backend
cafad5a1cae47ab86ca71028379b72837ea4543d
[ "MIT" ]
3
2021-07-08T04:06:59.000Z
2021-10-02T04:32:16.000Z
from django.db import models from django.core.validators import MinLengthValidator from apps.user.models import User from pangpangeats.settings import AUTH_USER_MODEL from apps.common.models import BaseModel from apps.common.validators import numeric_validator class CreditCard(BaseModel): owner: User = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.CASCADE, null=False) owner_first_name = models.CharField(max_length=5, null=False, blank=False) owner_last_name = models.CharField(max_length=5, null=False, blank=False) alias = models.CharField(max_length=100, null=True, blank=True) card_number = models.CharField( validators=(MinLengthValidator(16), ), max_length=16, null=False, blank=False, ) cvc = models.CharField( validators=( MinLengthValidator(3), numeric_validator, ), max_length=3, null=False, blank=False, ) # both should be a future than now, but not validate them on the model, but validate them in the serializer expiry_year = models.PositiveSmallIntegerField(null=False) expiry_month = models.PositiveSmallIntegerField(null=False) def __str__(self): # pragma: no cover CARD_NUMBER = self.card_number[:4] + "-****" * 3 return f"{self.owner_last_name}{self.owner_first_name} {CARD_NUMBER}"
39.054054
111
0.673356
1,182
0.817993
0
0
0
0
0
0
194
0.134256
acdeb5274d2c50da87f81181774349ab7a26267b
13,450
py
Python
simpleoncall/views.py
simpleoncall/simpleoncall
ffc247045c7ce357871899c84fdfc61f4add06a9
[ "MIT" ]
1
2016-01-11T21:37:44.000Z
2016-01-11T21:37:44.000Z
simpleoncall/views.py
simpleoncall/simpleoncall
ffc247045c7ce357871899c84fdfc61f4add06a9
[ "MIT" ]
48
2015-01-04T16:04:20.000Z
2015-01-25T20:53:49.000Z
simpleoncall/views.py
simpleoncall/simpleoncall
ffc247045c7ce357871899c84fdfc61f4add06a9
[ "MIT" ]
null
null
null
import contextlib import datetime import json import StringIO from django.contrib.auth import logout as logout_user from django.contrib import messages from django.core.urlresolvers import reverse from django.core.exceptions import ObjectDoesNotExist from django.http import HttpResponseRedirect, JsonResponse, Http404, HttpResponse, HttpResponseBadRequest from django.db.models import Count from django.shortcuts import render from django.template.loader import render_to_string from django.utils import timezone from django.utils.http import urlencode, urlquote from django_ical import feedgenerator from simpleoncall.forms.auth import AuthenticationForm, RegistrationForm from simpleoncall.forms.account import EditAccountForm, ChangePasswordForm from simpleoncall.forms.schedule import TeamScheduleForm from simpleoncall.forms.team import CreateTeamForm, SelectTeamForm, InviteTeamForm from simpleoncall.decorators import require_authentication, require_selected_team from simpleoncall.models import APIKey, TeamMember, TeamInvite, User, TeamSchedule from simpleoncall.models import Alert, EventStatus, NotificationSetting, NotificationType @require_authentication() @require_selected_team() def dashboard(request): end = timezone.now() start = end - datetime.timedelta(hours=12) date_added__range = (start, end) alerts = Alert.objects.filter(team=request.team, date_added__range=date_added__range).order_by('-date_added')[:10] alert_statuses = Alert.objects.filter( team=request.team, date_added__range=date_added__range ).values('status').annotate(total=Count('status')) alert_times = Alert.objects.filter( team=request.team, date_added__range=date_added__range ).values('date_added').annotate(total=Count('date_added')).order_by('-date_added') alert_timeseries = {} while start <= end: bucket = start - datetime.timedelta(minutes=start.minute % 60, seconds=start.second, microseconds=start.microsecond) alert_timeseries[bucket.strftime('%s')] = 0 start += datetime.timedelta(minutes=60) for alert in alert_times: added = alert['date_added'] bucket = added - datetime.timedelta(minutes=added.minute % 60, seconds=added.second, microseconds=added.microsecond) alert_timeseries[bucket.strftime('%s')] += alert['total'] context = { 'title': 'Dashboard', 'alerts': alerts, 'statuses': dict((a['status'], a['total']) for a in alert_statuses), 'timeseries': json.dumps(alert_timeseries), } return render(request, 'dashboard.html', context) def login(request): if request.user.is_authenticated(): return HttpResponseRedirect(reverse('dashboard')) context = { 'login_form': AuthenticationForm(), 'register_form': RegistrationForm(), 'login': True, 'title': 'Login', } return render(request, 'login.html', context) def register(request): if request.user.is_authenticated(): return HttpResponseRedirect(reverse('dashboard')) context = { 'login_form': AuthenticationForm(), 'register_form': RegistrationForm(), 'register': True, 'title': 'Register', 'next': urlquote(request.GET.get('next')), } return render(request, 'login.html', context) def logout(request): logout_user(request) return HttpResponseRedirect(reverse('login')) @require_authentication() @require_selected_team() def settings(request): api_keys = APIKey.objects.filter(team=request.team) members = TeamMember.objects.filter(team=request.team) context = { 'title': '%s Settings' % (request.team.name, ), 'api_keys': api_keys, 'members': members, 'invite_team_form': InviteTeamForm(), } return render(request, 'settings.html', context) @require_authentication() def account(request): alerts = request.user.get_notification_settings() if not alerts: alerts = [ NotificationSetting(id=0, type=NotificationType.EMAIL, time=0) ] context = { 'title': 'Account', 'edit_account_form': EditAccountForm(instance=request.user), 'change_password_form': ChangePasswordForm(instance=request.user), 'alerts': alerts, } return render(request, 'account.html', context) @require_authentication() @require_selected_team() def alerts(request): alert_count = Alert.objects.filter(team=request.team).count() alerts = Alert.objects.filter(team=request.team).order_by('-date_updated')[:10] context = { 'title': 'Alerts', 'alert_count': alert_count, 'alerts': alerts, } return render(request, 'alerts.html', context) @require_authentication() @require_selected_team() def schedule(request): schedule = request.team.get_active_schedule() oncall = None if schedule: oncall = schedule.get_currently_on_call() context = { 'title': 'Schedule', 'schedule': schedule, 'oncall': oncall, } return render(request, 'schedule.html', context) @require_authentication(require_team=False) def create_team(request): create_team_form = CreateTeamForm(request.POST or None) if create_team_form.is_valid(): team = create_team_form.save(request) messages.success(request, 'New team %s created' % team.name) return HttpResponseRedirect(reverse('dashboard')) context = { 'title': 'Create New Team', 'create_team_form': create_team_form, } return render(request, 'team/create.html', context) @require_authentication() def select_team(request): select_team_form = SelectTeamForm(request.POST or None, request.user) if select_team_form.is_valid(): team = select_team_form.save(request) messages.success(request, 'Team changed to %s' % team.name) return HttpResponseRedirect(reverse('dashboard')) context = { 'title': 'Select Team', 'select_team_form': select_team_form, } return render(request, 'team/select.html', context) def invite_accept(request): code = request.GET.get('code') email = request.GET.get('email') if not code or not email: return HttpResponseRedirect(reverse('dashboard')) invite = TeamInvite.objects.get(invite_code=code, email=email) if not invite: return HttpResponseRedirect(reverse('dashboard')) user = User.objects.get(email=email) if user: try: team_member = TeamMember.objects.get(team=invite.team, user=user) except ObjectDoesNotExist: team_member = None if team_member: messages.warning(request, 'already a member of team %s' % (invite.team.name, )) else: team_member = TeamMember(team=invite.team, user=user) team_member.save() messages.success(request, 'added to team %s' % (invite.team.name, )) else: args = { 'code': code, 'email': email, } next = '%s?%s' % (reverse('invite-accept'), urlencode(args)) redirect = '%s?next=%s' % (reverse('register'), urlquote(next)) return HttpResponseRedirect(redirect) return HttpResponseRedirect(reverse('dashboard')) @require_authentication() @require_selected_team() def alert_ack(request, alert_id): alert = Alert.objects.get(id=alert_id) if not alert: messages.error(request, 'Alert %s was not found' % (alert_id, )) elif alert.status == EventStatus.ACKNOWLEDGED: messages.warning(request, 'Alert %s already acknowledged' % (alert_id, )) else: alert.status = EventStatus.ACKNOWLEDGED alert.save(user=request.user) messages.success(request, 'Alert %s was acknowledged' % (alert_id, )) return HttpResponseRedirect(reverse('alerts')) @require_authentication() @require_selected_team() def alert_resolve(request, alert_id): alert = Alert.objects.get(id=alert_id, team=request.team) if not alert: messages.error(request, 'Alert %s was not found' % (alert_id, )) elif alert.status == EventStatus.RESOLVED: messages.warning(request, 'Alert %s already resolved' % (alert_id, )) else: alert.status = EventStatus.RESOLVED alert.save(user=request.user) messages.success(request, 'Alert %s was resolved' % (alert_id, )) return HttpResponseRedirect(reverse('alerts')) @require_authentication() @require_selected_team() def alert_view(request, alert_id): alert = Alert.objects.get(id=alert_id, team=request.team) if not alert: messages.error(request, 'Alert %s was not found' % (alert_id, )) return HttpResponseRedirect(reverse('dashboard')) context = { 'title': alert.title, 'event': alert, } return render(request, 'alert.html', context) @require_authentication() @require_selected_team() def edit_schedule(request): msg = None schedule_id = None if 'schedule_id' in request.POST: schedule_id = int(request.POST['schedule_id']) dummy_schedule = TeamSchedule(team=request.team) data = None if schedule_id else request.POST or None new_schedule_form = TeamScheduleForm(request.team, data, instance=dummy_schedule) saved = False if request.method == 'POST' and not schedule_id: if new_schedule_form.is_valid(): new_schedule_form.save() saved = True msg = 'New Schedule Added' schedule_forms = [] for schedule in request.team.get_schedules(): data = None if schedule.id == schedule_id: data = request.POST schedule_form = TeamScheduleForm(request.team, data, instance=schedule) if data and schedule_form.is_valid(): schedule_form.save() msg = 'Schedule Updated' schedule_forms.append(schedule_form) if msg: messages.success(request, msg) context = { 'title': 'Edit Schedule', 'active_schedule': request.team.get_active_schedule(), 'schedule_forms': schedule_forms, 'new_schedule_form': new_schedule_form, 'hidden_schedule_form': not saved or request.method != 'POST', } return render(request, 'edit_schedule.html', context) @require_authentication() @require_selected_team() def delete_schedule(request): id = request.GET.get('id') if id: schedule = TeamSchedule.objects.get(team=request.team, id=id) schedule.delete() messages.success(request, 'Schedule %s Deleted' % (schedule.name, )) else: messages.error(request, 'Unknown Schedule Id') return HttpResponseRedirect(reverse('edit-schedule')) @require_authentication() @require_selected_team() def partial(request, partial): context = { 'request': request, 'user': request.user, 'team': request.team, } html = render_to_string('partials/%s.html' % (partial, ), context) return JsonResponse({'html': html}) @require_authentication() @require_selected_team() def team_calendar(request): schedule = request.team.get_active_schedule() if not schedule: return Http404('Unkown Calendar') feed = feedgenerator.ICal20Feed( title='Team %s On-Call Schedule %s' % (request.team.name, schedule.name), link=request.build_absolute_uri(request.path), description='Team %s On-Call Schedule %s' % (request.team.name, schedule.name), language='en', subtitle='Generated by SimpleOnCall', author_email='service@simpleoncall.com', author_link='http://simpleoncall.com', author_name='SimpleOnCall', feed_url=request.build_absolute_uri(request.path) ) now = timezone.now() starting_time = datetime.datetime(now.year, now.month, now.day, schedule.starting_time, tzinfo=timezone.utc) next_start_time = None currently_oncall = None for i in xrange(90): now = starting_time + datetime.timedelta(days=i) oncall = schedule.get_currently_on_call(now) if next_start_time is None: next_start_time = now currently_oncall = oncall elif currently_oncall.id != oncall.id: feed.add_item( title='%s On-Call' % (oncall.get_display_name(), ), link=request.build_absolute_uri(reverse('schedule')), description='%s On-Call' % (currently_oncall.get_display_name(), ), start_datetime=next_start_time, end_datetime=now ) next_start_time = now currently_oncall = oncall feed.add_item( title='%s On-Call' % (oncall.get_display_name(), ), link=request.build_absolute_uri(reverse('schedule')), description='%s On-Call' % (currently_oncall.get_display_name(), ), start_datetime=next_start_time, end_datetime=now ) results = None with contextlib.closing(StringIO.StringIO()) as output: feed.write(output, 'utf-8') results = output.getvalue() if results is not None: return HttpResponse(results, content_type='text/calendar; charset=utf-8') return HttpResponseBadRequest('Could not generate iCal at this time')
34.311224
118
0.667658
0
0
0
0
10,252
0.76223
0
0
1,778
0.132193
acdf3e55cd2c8792fb197a984b6a2c5d5f92f167
1,302
py
Python
scraper/scraper3.py
SebChw/IsMusicANaturalLanguage
9cb245f9bea6c0f93863920fceeea867efa73ded
[ "MIT" ]
null
null
null
scraper/scraper3.py
SebChw/IsMusicANaturalLanguage
9cb245f9bea6c0f93863920fceeea867efa73ded
[ "MIT" ]
null
null
null
scraper/scraper3.py
SebChw/IsMusicANaturalLanguage
9cb245f9bea6c0f93863920fceeea867efa73ded
[ "MIT" ]
null
null
null
from bs4 import BeautifulSoup import requests import os """ This script download all songs with given genre from midiworld.com """ genre_name = input( "type in genre name (lowercase, no space, no special characters): ") # Just in case someone don't respect the rules. genre_name = genre_name.lower() genre_name = genre_name.strip() genre_name = "".join(genre_name.split(" ")) folder = os.path.join("genresDataset", genre_name, "midiworld") if not os.path.isdir(folder): os.mkdir(folder) #Here I was lazy, the biggest genre on that page has 38 pages so I've done it that way. #If there is no page we will not get any answer, and just run the loop withouth doing anything. for i in range(1, 38): URL = f"https://www.midiworld.com/search/{i}/?q={genre_name}" page = requests.get(URL) soup = BeautifulSoup(page.content, "html.parser") results = soup.find_all("li") for r in results: link = r.find("a") if link: if "download" in link: link = link['href'] song_title = r.text.split("-")[0].strip() print(f"Downloading: {song_title}") song = requests.get(link) with open(os.path.join(folder, song_title + ".mid"), "wb") as f: f.write(song.content)
35.189189
95
0.635177
0
0
0
0
0
0
0
0
533
0.40937
ace23c42ac6cd6ba7edc32ab86732664871dda3e
23,446
py
Python
imageset-viewer.py
its-jd/imageset-viewer
53998bbcdfe1aad91664ff791c489b3f59a501f9
[ "MIT" ]
53
2018-05-17T06:12:07.000Z
2022-03-28T10:41:24.000Z
imageset-viewer.py
its-jd/imageset-viewer
53998bbcdfe1aad91664ff791c489b3f59a501f9
[ "MIT" ]
7
2020-06-09T15:33:32.000Z
2021-11-12T14:20:54.000Z
imageset-viewer.py
its-jd/imageset-viewer
53998bbcdfe1aad91664ff791c489b3f59a501f9
[ "MIT" ]
18
2019-05-30T15:22:40.000Z
2022-01-06T15:34:30.000Z
#!/usr/bin/env python # coding: utf-8 __author__ = 'Zhuo Zhang' __copyright__ = 'Copyright 2017-2020, Zhuo Zhang' __license__ = 'MIT' __version__ = '0.5' __email__ = 'imzhuo@foxmail.com' __status__ = 'Development' __description__ = 'Tkinter based GUI, visualizing PASCAL VOC object detection annotation' """ Changelog: - 2020-06-16 11:39 v0.5 Support specifying ignore and not ignore class names. Better logger. Fix MacOS font. - 2020-06-13 00:48 v0.4 API change: add class name mapping dict, mapping xml class name to shown class name. Based on this, ImageNet2012 and self-defined VOC format style dataset labels can show. Supported image extension: bmp, jpg, jpeg, png and their upper cases. - 2020-06-09 23:14 v0.3 User select saving directory(optional) for picking up interested images. By pressing left control button, selected image is saved. - 2020-06-02 16:40 v0.2 User choose image and annotation folders separately. Better UI layout. Colorful boxes and class name text. - 2020-06-01 14:44 v0.1 Draw object class name. Add license. Polish meta info. Adjust UI. - 2017.10.22 22:36 v0.0 Created project. Dependencies: Python, Tkinter(GUI), opencv(image processing), lxml(annotation parsing). You may need this: pip install --upgrade image pillow lxml numpy """ from PIL import Image, ImageTk, ImageFont, ImageDraw # pillow module import os import cv2 from lxml import etree import numpy as np import random import colorsys import shutil import platform import matplotlib.font_manager as fm # to create font import six import logging from natsort import natsorted import time if six.PY3: import tkinter as tk from tkinter.filedialog import askdirectory else: import Tkinter as tk from tkFileDialog import askdirectory def draw_text(im, text, text_org, color=(0,0,255,0), font=None): """ Draw text on OpenCV's Image (ndarray) Implemented by: ndarray -> pil's image -> draw text -> ndarray Note: OpenCV puttext's drawback: font too large, no anti-alias, can't show Chinese chars @param im: opencv loaded image @param text: text(string) to be put. support Chinese @param font: font, e.g. ImageFont.truetype('C:/Windows/Fonts/msyh.ttc', font_size) Example Usage: font_size = 20 font = ImageFont.truetype('C:/Windows/Fonts/msyh.ttc', font_size) text_org = (256, 256) im = draw_text(im, "object", text_org, font) """ im_pil = Image.fromarray(im) draw = ImageDraw.Draw(im_pil) draw.text(text_org, text, font=font, fill=color) return np.array(im_pil) class BndBox(object): def __init__(self, x1=0, y1=0, x2=0, y2=0, cls_name=None): self.x1 = x1 self.y1 = y1 self.x2 = x2 self.y2 = y2 self.cls_name = cls_name # class name class PascalVOC2007XML: def __init__(self, xml_pth): # TODO: validate xml_pth's content self.tree = etree.parse(xml_pth) self.boxes = [] def get_boxes(self): if len(self.boxes) == 0: for obj in self.tree.xpath('//object'): box = BndBox() for item in obj.getchildren(): if (item.tag=='name'): box.cls_name = item.text elif (item.tag=='bndbox'): coords = [int(float(_.text)) for _ in item.getchildren()] box.x1, box.y1, box.x2, box.y2 = coords self.boxes.append(box) return self.boxes def get_color_table(num_cls=20): hsv_tuples = [(x*1.0 / num_cls, 1., 1.) for x in range(num_cls)] colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples)) colors = list(map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)), colors)) random.seed(42) random.shuffle(colors) random.seed(None) return colors class VOCViewer(tk.Tk): def __init__(self, im_dir=None, anno_dir=None, save_dir=None, max_width=None, max_height=None, box_thick=1, name_mapping=None, ignore_names=None, not_ignore_names=None): """ @param im_dir: the directory which contains images, e.g. "JPEGImages" @param max_width: max image width when image is displayed @param max_height: max image height when image is displayed @param box_thick: thickness of bounding box @param name_mapping: dict of: class name in XML => class name to be viewed @param ignore_names: list of class names that will be ignored on viewer @param not_ignore_names: list of all class names to be viewed @note `ignore_names` and `not_ignore_names` shouldn't be setting at the same time @note loading image: tk doesn't support directly load image. Pillow module is required as intermidiate stuff. """ #super().__init__() # not working for Python2 tk.Tk.__init__(self) self.init_logger() self.init_layout(im_dir, anno_dir, save_dir, max_width, max_height, box_thick) self.init_dataset(name_mapping, ignore_names, not_ignore_names) def init_logger(self): logger = logging.getLogger() logger.setLevel(logging.WARN) formatter = logging.Formatter( '%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S') time_line = time.strftime('%Y%m%d%H%M', time.localtime(time.time())) logfile = os.getcwd() + '/view-' + time_line + '.log' # print to file via FileHandler fh = logging.FileHandler(logfile) fh.setLevel(logging.DEBUG) fh.setFormatter(formatter) # print to screen via StreamHandler ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) ch.setFormatter(formatter) # add two Handler logger.addHandler(ch) logger.addHandler(fh) self.logger = logger def should_ignore(self, cls_name): if self.ignore_names is not None: if cls_name in self.ignore_names: return True else: return False if self.not_ignore_names is not None: if cls_name in self.not_ignore_names: return False return True return False def init_dataset(self, name_mapping, ignore_names, not_ignore_names): if (ignore_names is not None and not_ignore_names is not None): self.logger.fatal("ignore_names and not_ignore_names can't be setting at the same time") self.name_mapping = dict() if name_mapping is not None: self.name_mapping = name_mapping self.ignore_names = None if ignore_names is not None: self.ignore_names = ignore_names self.not_ignore_names = None if not_ignore_names is not None: self.not_ignore_names = not_ignore_names self.color_table = get_color_table() self.class_to_ind = dict() for cls_name in self.name_mapping.keys(): next_ind = len(self.class_to_ind) self.class_to_ind[cls_name] = next_ind self.supported_im_ext = ['bmp', 'BMP', 'png', 'PNG', 'jpg', 'JPG', 'jpeg', 'JPEG', 'jpe', 'jif', 'jfif', 'jfi'] def get_color_by_cls_name(self, cls_name): ind = self.class_to_ind[cls_name] return self.color_table[ind] def init_layout(self, im_dir, anno_dir, save_dir, max_width, max_height, box_thick): # custom settings self.max_width = max_width self.max_height = max_height self.box_thick = box_thick self.bg = '#34373c' self.fg = '#f2f2f2' # MacOSX's tk is wired and I don't want tkmacosx if platform.system()=='Darwin': self.bg, self.fg = self.fg, self.bg # set title, window size and background self.title('ImageSet Viewer ' + __version__) self.width = (int)(0.6 * self.winfo_screenwidth()) self.height = (int)(0.6 * self.winfo_screenheight()) self.geometry('%dx%d+200+100' % (self.width, self.height)) self.configure(bg=self.bg) self.minsize(800, 600) # Setting top level widget's row & column weight, # children widgets won't stretch-and-fill-in until setting this weight # ref: https://blog.csdn.net/acaic/article/details/80963688 self.rowconfigure(0,weight=1) self.columnconfigure(0,weight=1) # Top Level Layout: main_frame & side_frame main_frame_width = (int)(0.8*self.width) main_frame = tk.LabelFrame(self, bg=self.bg, width=main_frame_width) main_frame.grid(row=0, column=0, padx=10, pady=10, sticky=tk.NSEW) side_frame = tk.LabelFrame(self, bg=self.bg) side_frame.grid(row=0, column=1, padx=10, pady=10, sticky=tk.NSEW) # main_frame: directory_frame & image_frame main_frame.rowconfigure(0, weight=20) main_frame.rowconfigure(1, weight=80) main_frame.columnconfigure(0, weight=1) directory_frame = tk.LabelFrame(main_frame, bg=self.bg) directory_frame.grid(row=0, column=0, sticky=tk.NSEW) image_frame_height = (int)(0.7*self.height) image_frame = tk.LabelFrame(main_frame, height=image_frame_height, bg=self.bg) image_frame.grid(row=1, column=0, sticky=tk.NSEW) # keep widgets size stay, instead of change when switching to another image # ref: https://zhidao.baidu.com/question/1643979034294549180.html image_frame.grid_propagate(0) # image_frame image_frame.rowconfigure(0, weight=1) image_frame.columnconfigure(0, weight=1) self.surface = self.get_surface_image() # Surface image # self.surface = self.cv_to_tk(cv2.imread('surface.jpg')) # Use image file self.image_label = tk.Label(image_frame, image=self.surface, bg=self.bg, fg=self.fg,compound='center') self.image_label.grid(row=0, column=0, sticky=tk.NSEW) #self.image_label.bind('<Configure>', self.changeSize) #TODO # side_frame side_frame.rowconfigure(0, weight=5) side_frame.rowconfigure(1, weight=95) image_names_label = tk.Label(side_frame, text="Image Files", bg=self.bg, fg=self.fg) image_names_label.grid(row=0, column=0) self.scrollbar = tk.Scrollbar(side_frame, orient=tk.VERTICAL) self.listbox = tk.Listbox(side_frame, yscrollcommand=self.scrollbar.set) self.listbox.grid(row=1, column=0, sticky=tk.NS) # directory_frame directory_frame.rowconfigure(0, weight=5) directory_frame.rowconfigure(1, weight=5) directory_frame.rowconfigure(2, weight=5) directory_frame.columnconfigure(0, weight=1) directory_frame.columnconfigure(1, weight=9) # im_dir button choose_im_dir_btn = tk.Button(directory_frame, text='Image Directory', command=self.select_image_directory, bg=self.bg, fg=self.fg) choose_im_dir_btn.grid(row=0, column=0, sticky=tk.NSEW) self.im_dir = tk.StringVar() im_dir_entry = tk.Entry(directory_frame, text=self.im_dir, state='readonly') im_dir_entry.grid(row=0, column=1, sticky=tk.NSEW) self.im_names = [] if im_dir is not None: self.im_dir.set(im_dir) self.im_names = [_ for _ in os.listdir(self.im_dir.get())] self.im_names = natsorted(self.im_names) for im_name in self.im_names: self.listbox.insert(tk.END, im_name) self.listbox.bind('<<ListboxSelect>>', self.callback) # more key binds see https://www.cnblogs.com/muziyunxuan/p/8297536.html self.listbox.bind('<Control_L>', self.save_image) self.scrollbar.config(command=self.listbox.yview) self.scrollbar.grid(row=1, column=1, sticky=tk.NS) # anno_dir button choose_anno_dir_bn = tk.Button(directory_frame, text='Annotation Directory', command=self.select_annotation_directory, bg=self.bg, fg=self.fg) choose_anno_dir_bn.grid(row=1, column=0, sticky=tk.NSEW) self.anno_dir = tk.StringVar() anno_dir_entry = tk.Entry(directory_frame, text=self.anno_dir, state='readonly') anno_dir_entry.grid(row=1, column=1, sticky=tk.NSEW) if anno_dir is not None: self.anno_dir.set(anno_dir) # copy (save) dir button choose_save_dir_btn = tk.Button(directory_frame, text='Copy Save Directory', command=self.select_save_directory, bg=self.bg, fg=self.fg) choose_save_dir_btn.grid(row=2, column=0, sticky=tk.NSEW) self.save_dir = tk.StringVar() save_dir_entry = tk.Entry(directory_frame, text=self.save_dir, state='readonly') save_dir_entry.grid(row=2, column=1, sticky=tk.NSEW) if save_dir is not None: self.save_dir.set(save_dir) def callback(self, event=None): im_id = self.listbox.curselection() if im_id: im_id = im_id[0] self.logger.info('im_id is {:d}'.format(im_id)) im_name = self.listbox.get(im_id) im_ext = im_name.split('.')[-1] if im_ext in self.supported_im_ext: im_pth = os.path.join(self.im_dir.get(), im_name).replace('\\', '/') self.tkim = self.get_tkim(im_pth) self.image_label.configure(image=self.tkim) #self.logger.debug(im_pth) def save_image(self, event): """Save (copy) current displayed (original, no box) image to specified saving directory. This is binding to left-control key now. Useful for manually picking up images. """ im_id = self.listbox.curselection() if im_id: im_name = self.listbox.get(im_id) im_ext = im_name.split('.')[-1] if im_ext in self.supported_im_ext: im_pth = os.path.join(self.im_dir.get(), im_name).replace('\\', '/') save_pth = os.path.join(self.save_dir.get(), im_name).replace('\\', '/') shutil.copyfile(im_pth, save_pth) self.logger.info('Save(copy) to {:s}'.format(save_pth)) #self.logger.debug(im_pth) def get_tkim(self, im_pth): """ Load image and annotation, draw on image, and convert to image. When necessary, image resizing is utilized. """ im = cv2.imread(im_pth) self.logger.info('Image file is: {:s}'.format(im_pth)) im_ht, im_wt, im_dt = im.shape if self.max_width is None or self.max_width >= im_wt: show_width = im_wt else: show_width = self.max_width if self.max_height is None or self.max_height >= im_ht: show_height = im_ht else: show_height = self.max_height scale_width = im_wt * 1.0 / show_width scale_height = im_ht * 1.0 / show_height if show_width!=im_wt or show_height!=im_ht: im = cv2.resize(im, (show_width, show_height)) self.logger.info('doing resize, show_width={:d}, im_wt={:d}, show_height={:d}, im_ht={:d}'.format(show_width, im_wt, show_height, im_ht)) # xml_pth = im_pth.replace('JPEGImages', 'Annotations').replace('.jpg', '.xml').replace('.png', '.xml') # We don't assume a standard PASCAL VOC dataset directory. # User should choose image and annotation folder seperately. im_head = '.'.join(im_pth.split('/')[-1].split('.')[:-1]) xml_pth = self.anno_dir.get() + '/' + im_head + '.xml' if os.path.exists(xml_pth): self.logger.info('XML annotation file is {:s}'.format(xml_pth)) boxes = self.parse_xml(xml_pth) for box in boxes: if self.should_ignore(box.cls_name): continue if box.cls_name not in self.name_mapping.keys(): self.name_mapping[box.cls_name] = box.cls_name next_ind = len(self.class_to_ind) self.class_to_ind[box.cls_name] = next_ind xmin = int(box.x1/scale_width) ymin = int(box.y1/scale_height) xmax = int(box.x2/scale_width) ymax = int(box.y2/scale_height) color = self.get_color_by_cls_name(box.cls_name) cv2.rectangle(im, pt1=(xmin, ymin), pt2=(xmax, ymax), color = color, thickness=self.box_thick) font_size = 16 font = self.get_font(font_size) tx = xmin ty = ymin-20 if(ty<0): ty = ymin+10 tx = xmin+10 text_org = (tx, ty) show_text = self.name_mapping[box.cls_name] self.logger.debug('box.cls_name is:' + box.cls_name) self.logger.debug('show_text:' + show_text) im = draw_text(im, show_text, text_org, color, font) else: self.logger.warning("XML annotation file {:s} doesn't exist".format(xml_pth)) return self.cv_to_tk(im) @staticmethod def cv_to_tk(im): """Convert OpenCV's (numpy) image to Tkinter-compatible photo image""" im = im[:, :, ::-1] # bgr => rgb return ImageTk.PhotoImage(Image.fromarray(im)) @staticmethod def get_font(font_size): font_pth = None if platform.system()=='Windows': font_pth = 'C:/Windows/Fonts/msyh.ttc' elif (platform.system()=='Linux'): font_pth = fm.findfont(fm.FontProperties(family='DejaVu Mono')) else: font_pth = '/Library/Fonts//Songti.ttc' return ImageFont.truetype(font_pth, font_size) def get_surface_image(self): """Return surface image, which is ImageTK type""" im = np.ndarray((256, 256, 3), dtype=np.uint8) for y in range(256): for x in range(256): im[y, x, :] = (60, 55, 52) # #34373c(RGB)'s BGR split im = cv2.resize(im, ((int)(self.width*0.6), (int)(self.height*0.6))) font_size = 30 font = self.get_font(font_size) text_org = (self.width*0.16, self.height*0.26) text = 'ImageSet Viewer' im = draw_text(im, text, text_org, color=(255, 255, 255, 255), font=font) return self.cv_to_tk(im) def parse_xml(self, xml_pth): anno = PascalVOC2007XML(xml_pth) return anno.get_boxes() def select_image_directory(self): im_dir = askdirectory() self.listbox.delete(0, len(self.im_names)-1) # delete all elements self.fill_im_names(im_dir) def select_annotation_directory(self): anno_dir = askdirectory() self.anno_dir.set(anno_dir) # TODO: validate anno_dir def select_save_directory(self): save_dir = askdirectory() self.save_dir.set(save_dir) # the directory to save(copy) select images def fill_im_names(self, im_dir): if im_dir is not None: self.im_dir.set(im_dir) # Get natural order of image file names self.im_names = [_ for _ in os.listdir(im_dir)] self.im_names = natsorted(self.im_names) for im_name in self.im_names: self.listbox.insert(tk.END, im_name) def example1(): """The simplest example: don't specify any parameters. Choose imd dir and xml dir in GUI """ app = VOCViewer() app.mainloop() def example2(): """Specify directories & drawing related settings """ app = VOCViewer(im_dir = '/Users/chris/data/VOC2007/JPEGImages', # image directory anno_dir = '/Users/chris/data/VOC2007/Annotations', # XML directory save_dir = '/Users/chris/data/VOC2007/save', # Picking images saving directory max_width = 1000, # max allowed shown image width is 1000 max_height = 800, # max allowed shown image height is 800 box_thick = 2, # bounding box thickness ) app.mainloop() def example3(): """Specify name mapping """ # category mapping dict: key for class name in XML, # value for shown class name in displayed image # note: you can make key=val if it is understandable voc_mapping = { '__background__': '背景', 'aeroplane': '飞机', 'bicycle': '自行车', 'bird': '鸟', 'boat': '船', 'bottle': '瓶子', 'bus': '公交车', 'car': '汽车', 'cat': '猫', 'chair': '椅子', 'cow': '牛', 'diningtable': '餐桌', 'dog': '狗', 'horse': '马', 'motorbike': '摩托车', 'person': '人', 'pottedplant': '盆栽', 'sheep': '绵羊', 'sofa': '沙发', 'train': '火车', 'tvmonitor': '显示器' } app = VOCViewer(im_dir = '/Users/chris/data/VOC2007/JPEGImages', # image directory anno_dir = '/Users/chris/data/VOC2007/Annotations', # XML directory save_dir = '/Users/chris/data/VOC2007/save', # Picking images saving directory max_width = 1000, # max allowed shown image width is 1000 max_height = 800, # max allowed shown image height is 800 box_thick = 2, # bounding box thickness name_mapping = voc_mapping #!! ) app.mainloop() def example4(): """Specify ignore_names / not_ignore_names You can specify either ignore_names or not_ignore_names. But can't specify neither. """ app = VOCViewer(im_dir = '/Users/chris/data/VOC2007/JPEGImages', # image directory anno_dir = '/Users/chris/data/VOC2007/Annotations', # XML directory save_dir = '/Users/chris/data/VOC2007/save', # Picking images saving directory max_width = 1000, # max allowed shown image width is 1000 max_height = 800, # max allowed shown image height is 800 box_thick = 2, # bounding box thickness not_ignore_names = ['person'] ) app.mainloop() def example5(): """ Take ImageNet2012 as example. You can imitate this and show your own PASCAL-VOC-Style-Labeled imageset """ fin = open('imagenet_cls_cn.txt', encoding='UTF-8') lines = [_.strip() for _ in fin.readlines()] fin.close() ilsvrc2012_cls_dict = dict() for item in lines: item = item.split(' ') digit_cls_name = item[0] literal_cls_name = ' '.join(item[1:]) ilsvrc2012_cls_dict[digit_cls_name] = literal_cls_name app = VOCViewer(im_dir = 'D:/data/ILSVRC2012/ILSVRC2012_img_train/n01440764', # image directory anno_dir = 'D:/data/ILSVRC2012/ILSVRC2012_bbox_train_v2/n01440764', # XML directory save_dir = None, # not specified saving direcotry max_width = 1000, # max allowed shown image width is 1000 max_height = 800, # max allowed shown image height is 800 box_thick = 2, # bounding box thickness name_mapping = ilsvrc2012_cls_dict ) app.mainloop() if __name__ == '__main__': example1() #example2() #example3() #example4() #example5()
38.753719
149
0.612258
16,402
0.697245
0
0
609
0.025888
0
0
7,416
0.315253
ace2b1a29a3abb15aedb474de4948707e3d81eeb
416
py
Python
erpnext_feature_board/hook_events/review_request.py
akurungadam/erpnext_feature_board
8c99b4dfaa79d86d8e8b46fa1bf235d0bfa471e0
[ "MIT" ]
15
2021-05-31T16:29:22.000Z
2021-12-02T20:18:32.000Z
erpnext_feature_board/hook_events/review_request.py
akurungadam/erpnext_feature_board
8c99b4dfaa79d86d8e8b46fa1bf235d0bfa471e0
[ "MIT" ]
18
2021-06-01T07:39:08.000Z
2021-07-14T09:02:35.000Z
erpnext_feature_board/hook_events/review_request.py
akurungadam/erpnext_feature_board
8c99b4dfaa79d86d8e8b46fa1bf235d0bfa471e0
[ "MIT" ]
6
2021-06-01T07:19:53.000Z
2021-12-28T20:06:25.000Z
import frappe def delete_approved_build_requests(): """ Scheduled hook to delete approved Review Requests for changing site deployments. """ approved_build_requests = frappe.get_all( "Review Request", filters={ "request_type": ["in", ["Build", "Upgrade", "Delete"]], "request_status": "Approved", }, ) for request in approved_build_requests: frappe.delete_doc("Review Request", request.name)
21.894737
81
0.71875
0
0
0
0
0
0
0
0
190
0.456731
ace31bc75f6c304a3efff0e6911ae27ee2b4ecee
1,064
py
Python
DataProcessor/dev_set_partition.py
cherry979988/feedforward-RE
546a608a8cb5b35c475e577995df70a89affa15e
[ "MIT" ]
1
2019-08-25T00:44:27.000Z
2019-08-25T00:44:27.000Z
DataProcessor/dev_set_partition.py
cherry979988/feedforward-RE
546a608a8cb5b35c475e577995df70a89affa15e
[ "MIT" ]
null
null
null
DataProcessor/dev_set_partition.py
cherry979988/feedforward-RE
546a608a8cb5b35c475e577995df70a89affa15e
[ "MIT" ]
null
null
null
__author__ = 'QinyuanYe' import sys import random from shutil import copyfile # split the original train set into # 90% train-set (train_split.json) and 10% dev-set (dev.json) if __name__ == "__main__": random.seed(1234) if len(sys.argv) != 3: print 'Usage:feature_generation.py -DATA -ratio' exit(1) dataset = sys.argv[1] ratio = float(sys.argv[2]) dir = 'data/source/%s' % sys.argv[1] original_train_json = dir + '/train.json' train_json = dir + '/train_split.json' dev_json = dir + '/dev.json' if 'TACRED' in dataset or 'Sub' in dataset: print '%s has a provided dev set, skip splitting' % dataset copyfile(original_train_json, train_json) exit(0) fin = open(original_train_json, 'r') lines = fin.readlines() dev_size = int(ratio * len(lines)) random.shuffle(lines) dev = lines[:dev_size] train_split = lines[dev_size:] fout1 = open(dev_json, 'w') fout1.writelines(dev) fout2 = open(train_json, 'w') fout2.writelines(train_split)
24.744186
67
0.640038
0
0
0
0
0
0
0
0
283
0.265977
ace6e11dd2c37cb4d2255b5a7148639ad40c246e
717
py
Python
NetCatKS/Logger/api/implementers/__init__.py
dimddev/NetCatKS-CP
2d9e72b2422e344569fd4eb154866b98e9707561
[ "BSD-2-Clause" ]
null
null
null
NetCatKS/Logger/api/implementers/__init__.py
dimddev/NetCatKS-CP
2d9e72b2422e344569fd4eb154866b98e9707561
[ "BSD-2-Clause" ]
null
null
null
NetCatKS/Logger/api/implementers/__init__.py
dimddev/NetCatKS-CP
2d9e72b2422e344569fd4eb154866b98e9707561
[ "BSD-2-Clause" ]
null
null
null
__author__ = 'dimd' from twisted.python import log from zope.interface import implementer from NetCatKS.Logger.api.interfaces import ILogger GLOBAL_DEBUG = True @implementer(ILogger) class Logger(object): def __init__(self): pass def debug(self, msg): if GLOBAL_DEBUG is True: log.msg('[ ====== DEBUG ]: {}'.format(msg)) def info(self, msg): log.msg('[ ++++++ INFO ]: {}'.format(msg)) def warning(self, msg): log.msg('[ !!!!!! WARNING ]: {}'.format(msg)) def error(self, msg): log.msg('[ ------ ERROR ]: {}'.format(msg)) def critical(self, msg): log.msg('[ @@@@@@ CRITICAL ]: {}'.format(msg)) __all__ = [ 'Logger' ]
19.378378
55
0.563459
500
0.69735
0
0
522
0.728033
0
0
128
0.178522
ace7ab1c03480ac4b4f41e3fb954c1d488666de5
247
py
Python
trustpayments/models/failure_category.py
TrustPayments/python-sdk
6fde6eb8cfce270c3612a2903a845c13018c3bb9
[ "Apache-2.0" ]
2
2020-01-16T13:24:06.000Z
2020-11-21T17:40:17.000Z
postfinancecheckout/models/failure_category.py
pfpayments/python-sdk
b8ef159ea3c843a8d0361d1e0b122a9958adbcb4
[ "Apache-2.0" ]
4
2019-10-14T17:33:23.000Z
2021-10-01T14:49:11.000Z
postfinancecheckout/models/failure_category.py
pfpayments/python-sdk
b8ef159ea3c843a8d0361d1e0b122a9958adbcb4
[ "Apache-2.0" ]
2
2019-10-15T14:17:10.000Z
2021-09-17T13:07:09.000Z
# coding: utf-8 from enum import Enum, unique @unique class FailureCategory(Enum): TEMPORARY_ISSUE = "TEMPORARY_ISSUE" INTERNAL = "INTERNAL" END_USER = "END_USER" CONFIGURATION = "CONFIGURATION" DEVELOPER = "DEVELOPER"
17.642857
39
0.688259
189
0.765182
0
0
197
0.797571
0
0
78
0.315789
ace8a1d4982694d6c21b44de13de44e692abcb9d
20,146
py
Python
func_moead.py
dynamic-sevn/moead_svm
0f119d5c0b840d1897b7c8067c4563285fd70031
[ "BSD-2-Clause" ]
1
2021-07-31T08:54:49.000Z
2021-07-31T08:54:49.000Z
func_moead.py
dynamic-sevn/moead_svm
0f119d5c0b840d1897b7c8067c4563285fd70031
[ "BSD-2-Clause" ]
null
null
null
func_moead.py
dynamic-sevn/moead_svm
0f119d5c0b840d1897b7c8067c4563285fd70031
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ Author: Xi Lin <xi.lin@my.cityu.edu.hk> website: http://www.cs.cityu.edu.hk/~xilin4/ github: This code is a demo for this paper: A Decomposition based Multiobjective Evolutionary Algorithm with Classification Xi Lin, Qingfu Zhang, Sam Kwong Proceedings of the 2016 IEEE Congress on Evolutionary Computation (CEC16) Vancouver, Canada, July 2016 """ import os import sys import copy import numpy as np import scipy from sklearn import svm from test_instances import * path = os.path.abspath(os.path.dirname(sys.argv[0])) class Problem: """Multi-objective Problem name: MoP name dim: dimension of decision space nobj:dimension of objective space domain: domain for decision variable """ def __init__(self,name,dim,nobj,domain = None): self.name = name self.dim = dim self.nobj = nobj if domain is None: self.domain = np.tile(np.array([0,1]),(self.dim,1)) def evaluate(self,x): #evaluate objective values for given decision variable return testfunc(self,x) class Params: """Parameters for MOEA/D popsize: population size niche: neighbourhood size dmethod: decomposition method iteration: number of maximum iterations in each run; not used in this demo stop_nfeval: number of maximum function evaluations in each run updatedprob: probability that parent solutions are selected from the neighbourhood, but not the whole population updatednb: maximum number of current solutions which would be replaced by each new solution F, CR: parameters for DE operator """ def __init__(self,popsize,niche,dmethod,iteration, stop_nfeval,updateprob,updatenb,F,CR): self.popsize = popsize self.niche = niche self.dmethod = dmethod self.iteration = iteration self.stop_nfeval = stop_nfeval self.updateprob = updateprob self.updatenb = updatenb self.F = F self.CR = CR class Subproblem: """Subproblem in MOEA/D weight: decomposition weight neighbour: index of neighbours curpoint: Individual Class current best solution subpoint: Individual Class current sub-best solution, for classification training """ def __init__(self,weight,mop,params): self.weight = weight self.neighbour = np.full(params.niche,np.nan) self.curpoint = np.full(mop.dim,np.nan) self.subpoint = np.full(mop.dim,np.nan) class Individual: """Solution in MOEA/D parameter: decision variable value: objective value """ def __init__(self,parameter): self.parameter = parameter self.value = float('Inf') def init_params(ins): """Initialize parameters for test instance The given parameters in this function are the same as in the paper: A Decomposition based Multiobjective Evolutionary Algorithm with Classification Xi Lin, Qingfu Zhang, Sam Kwong IEEE World Congress on Computational Intelligence(IEEE WCCI), Vancouver, Canada, July 2016 Parameters ---------- ins: name for test instance Returns ------- dim: dimension of decision space nobj: dimenson of objective space popsize: population size niche: neighbourhood size stop_nfeval: number of maximum function evaluations in each run """ if ins in ['ZDT1','ZDT2','ZDT3']: dim = 30 nobj = 2 popsize = 300 niche = 30 stop_nfeval = 100000 if ins in ['ZDT4','ZDT6']: dim = 10 nobj = 2 popsize = 300 niche = 30 stop_nfeval = 100000 if ins in ['DTLZ1']: dim = 10 nobj = 3 popsize = 595 niche = 50 stop_nfeval = 100000 if ins in ['DTLZ2']: dim = 30 nobj = 3 popsize = 595 niche = 50 stop_nfeval = 100000 if ins in ['UF1','UF2','UF3','UF4','UF5','UF6','UF7']: dim = 30 nobj = 2 popsize = 600 niche = 30 stop_nfeval = 300000 if ins in ['UF8','UF9','UF10']: dim = 30 nobj = 3 popsize = 595 niche = 50 stop_nfeval = 300000 return dim, nobj, popsize, niche, stop_nfeval def init_point(mop): """Initialize a solution with randomly generated decision variable Parameters ---------- mop: Problem Class multi-objective problem to be sloved Returns ------- point: Individual Class a solution with randomly generated decision variable, which is not evaluated yet """ lowend = mop.domain[:,0] span = mop.domain[:,1] - lowend para = lowend + span * np.random.rand(mop.dim) point = Individual(para) return point def init_subproblem_classification(mop,params): """Initialize all subproblems and ideal point for MOEA/D-SVM Parameters ---------- mop: Problem Class multi-objective problem to be sloved params: Params Class parameters for moea/d Returns ------- subproblems: Subproblem Class all subproblems initialized accroding to mop and params idealpoint: estimated idealpoint for Tchebycheff decomposition """ #load already genereted weights vector in weight file weights = np.loadtxt(path + "/weight/W%dD_%d.dat"%(mop.nobj,params.popsize)) idealpoint = np.ones(mop.nobj) * float('Inf') subproblems = [] #initialize Subproblem Class for each weight vetor for i in range(params.popsize): sub = Subproblem(weights[i],mop,params) subproblems.append(sub) #distmat[i,j] is the distance btw sub[i] and sub[j], distmat[i,i] = nan distmat = np.full([params.popsize, params.popsize],np.nan) #initialize current best/sub-best point for each subproblem and idealpoint for i in range(params.popsize): for j in range(i+1,params.popsize): a = subproblems[i].weight b = subproblems[j].weight distmat[i,j] = np.linalg.norm(a - b) distmat[j,i] = distmat[i,j] #calculate the neighbourhood for each subproblem subproblems[i].neighbour = distmat[i,].argsort()[0:params.niche] subproblems[i].curpoint = init_point(mop) subproblems[i].curpoint.value = mop.evaluate( subproblems[i].curpoint.parameter) subproblems[i].subpoint = init_point(mop) subproblems[i].subpoint.value = mop.evaluate( subproblems[i].subpoint.parameter) idealpoint = np.minimum.reduce([idealpoint, subproblems[i].curpoint.value, subproblems[i].subpoint.value]) #swap(curpoint,subpoint) if g_i(subpoint) < g_i(curpoint) #where g_i() is value function for the i-th subproblem for i in range(params.popsize): curvalue = subobjective_vec(subproblems[i].weight, subproblems[i].curpoint.value.reshape(1,-1), idealpoint,params.dmethod) subvalue = subobjective_vec(subproblems[i].weight, subproblems[i].subpoint.value.reshape(1,-1), idealpoint,params.dmethod) if subvalue < curvalue: subproblems[i].curpoint, subproblems[i].subpoint = subproblems[i].subpoint, subproblems[i].curpoint return (subproblems, idealpoint) def init_subproblem(mop,params): """Initialize all subproblems and ideal point for MOEA/D Parameters ---------- mop: Problem Class multi-objective problem to be sloved params: Params Class parameters for moea/d Returns ------- subproblems: Subproblem Class all subproblems initialized accroding to mop and params idealpoint: estimated idealpoint for Tchebycheff decomposition """ weights = np.loadtxt(path + "/weight/W%dD_%d.dat"%(mop.nobj,params.popsize)) idealpoint = np.ones(mop.nobj) * float('Inf') subproblems = [] #initialize Subproblem Class for each weight vetor for i in range(params.popsize): sub = Subproblem(weights[i],mop,params) subproblems.append(sub) #distmat[i,j] is the distance btw sub[i] and sub[j], distmat[i,i] = nan distmat = np.full([params.popsize, params.popsize],np.nan) #initialize current best/sub-best point for each subproblem and idealpoint for i in range(params.popsize): for j in range(i+1,params.popsize): a = subproblems[i].weight b = subproblems[j].weight distmat[i,j] = np.linalg.norm(a - b) distmat[j,i] = distmat[i,j] subproblems[i].neighbour = distmat[i,].argsort()[0:params.niche] subproblems[i].curpoint = init_point(mop) subproblems[i].curpoint.value = mop.evaluate( subproblems[i].curpoint.parameter) idealpoint = np.minimum(idealpoint,subproblems[i].curpoint.value) return (subproblems, idealpoint) def terminate(n,params): """Decide on whether to terminate current algo run or not Parameters ---------- n: number of total evaluations have been conducted in current run params: Params Class parameters for moea/d Returns ------- boolean expression True if number of total evaluations exceed params.stop_nfeval """ return n >= params.stop_nfeval def genetic_op(index,updateneighbour,mop, params,subproblems,ptype): """Generated a new solutions for the index-th subproblem Parameters ---------- index: subproblem index updateneighbour: boolean expression whether parent solutions are selected from the neighbourhood or not mop: Problem Class multi-objective problem to be sloved params: Params Class parameters for moea/d subproblems: Subproblem Class all subproblems ptype: the type of generated solutions, always "current" in this demo Returns ------- newpoint: Individual Class a new generated solution """ #select parents parents_index = mate_select(index,updateneighbour, subproblems,params,2) #generate a new solution using DE crossover newpoint = de_crossover(index,parents_index,subproblems, params.F,params.CR,mop,ptype) #mutate new solution mutate(newpoint,mop,1.0/mop.dim,20) return newpoint def mate_select(index,updateneighbour, subproblems,params,size): """Select parents for new solution generation Parameters ---------- index: subproblem index updateneighbour: boolean expression whether parents are selected from the neighbourhood or not subproblems: Subproblem Class all subproblems params: Params Class parameters for moea/d size: number of parents selected Returns ------- selected_list: List, len(List) = size list of selected parents' indexes """ selected_list = [] #decide on whether parents are selected from the neighbourhood or not if(updateneighbour): selindex = subproblems[index].neighbour else: selindex = range(params.popsize) #select list of selected parents' indexes while len(selected_list) < size: r = np.random.rand(1)[0] parent = selindex[np.int(np.floor(len(selindex)*r))] if (not parent in selected_list): selected_list.append(parent) return selected_list def de_crossover(index,parents_index,subproblems,F,CR,mop,ptype): """Generate a new solution using DE crossover Parameters ---------- index: subproblem index parents_index: List list of selected parents' indexes subproblems: Subproblem Class all subproblems F,CR: DE parameters mop: Problem Class multi-objective problem to be sloved ptype: the type of generated solutions, always "current" in this demo Returns ------- newpoint: Individual Class a new generated solution """ #initialize new solution with randomly generated decision variable newpoint = init_point(mop) #decide the decision variable using DE crossover if ptype == 'current': x1 = subproblems[index].curpoint.parameter x2 = subproblems[parents_index[0]].curpoint.parameter x3 = subproblems[parents_index[1]].curpoint.parameter cross = x1 + F * (x2 - x3) newpoint.parameter = np.copy(subproblems[index].curpoint.parameter) crossindex = np.random.rand(mop.dim) < CR newpoint.parameter[crossindex] = cross[crossindex] for i in range(mop.dim): r1 = np.random.rand(1)[0] if r1 < CR: newpoint.parameter[i] = cross[i] #handle the boundary lowerbound = mop.domain[i,0] upperbound = mop.domain[i,1] if newpoint.parameter[i] < lowerbound: r2 = np.random.rand(1)[0] newpoint.parameter[i] = lowerbound + r2*(x1[i] - lowerbound) if newpoint.parameter[i] > upperbound: r2 = np.random.rand(1)[0] newpoint.parameter[i] = upperbound - r2*(upperbound - x1[i]) return newpoint def mutate(newpoint,mop,rate,eta): """Mutate new generated solution Parameters ---------- index: subproblem index mop: Problem Class multi-objective problem to be sloved rate,eta: mutation parameters Returns ------- newpoint is mutable, hence no return is needed """ #polynomial mutate for i in range(mop.dim): r1 = np.random.rand(1)[0] if(r1 < rate): y = newpoint.parameter[i] yl = mop.domain[i,0] yu = mop.domain[i,1] r2 = np.random.rand(1)[0] if(r2 < 0.5): sigma = (2 * r2) ** (1.0/(eta + 1)) - 1 else: sigma = 1 - (2 - 2*r2) ** (1.0/(eta + 1)) newpoint.parameter[i] = y + sigma * (yu - yl) if newpoint.parameter[i] > yu: newpoint.parameter[i] = yu if newpoint.parameter[i] < yl: newpoint.parameter[i] = yl def subobjective_vec(weight,value,idealpoint,dmethod): """Calculate the value of subproblem with given weight, value and idealpoint Parameters ---------- weight: weight vector value: objective value idealpoint: idealpoint dmethod: decomposition method; in this demo, dmethod is always 'tc' which stands for Tchebycheff decomposition Returns ------- mutated_newpoint: Individual Class a new generated solution """ if dmethod is 'tc': new_weight = np.copy(weight) new_weight[new_weight == 0.0] = 0.0001 absdiff = np.abs(value - idealpoint) return np.amax(new_weight * absdiff,axis = 1) def update_vec(index,updateneighbour,newpoint, mop,params,subproblems,idealpoint): """Updated current population using new generated solutions Parameters ---------- index: subproblem index updateneighbour: boolean expression whether parent solutions are selected from the neighbourhood or not newpoint: Individual Class a new generated solution mop: Problem Class multi-objective problem to be sloved params: Params Class parameters for moea/d subproblems: Subproblem Class all subproblems idealpoint: estimated idealpoint Returns ------- is_updated: 1 if at least one current solution was replaced 0 otherwise """ is_updated = 0 #Classes subproblems[k] k = 1,2,..., is mutable, hence no return is needed if(updateneighbour): updateindex = np.array(subproblems[index].neighbour) else: updateindex = np.array(range(params.popsize)) np.random.shuffle(updateindex) weight_vec = np.array([subproblems[k].weight for k in updateindex]) oldvalue_vec = np.array( [subproblems[k].curpoint.value for k in updateindex]) oldobj_vec = subobjective_vec(weight_vec,oldvalue_vec, idealpoint,params.dmethod) newobj_vec = subobjective_vec(weight_vec,newpoint.value, idealpoint,params.dmethod) #contain maximum(not always) 2 elementc replaceindex = updateindex[newobj_vec < oldobj_vec][:2] for k in replaceindex: subproblems[k].subpoint = subproblems[k].curpoint subproblems[k].curpoint = newpoint is_updated = 1 return is_updated def calculateigd(truepf, pf): """Calculate IGD value for truepf and pf Parameters ---------- truepf: "true" pf value pf: estimated pf value Returns ------- igd: IGD value """ Y = scipy.spatial.distance.cdist(truepf, pf, 'euclidean') mindist = np.min(Y, axis=1) igd = np.mean(mindist) return igd def trainSVMmodel(subproblems,params,gamma=1,C=100): """Train SVM Classification model Parameters ---------- subproblems: Subproblem Class all subproblems params: Params Class parameters for moea/d gamma, C: kernel parameters In this demo, the kernel we use is always RBF kernel with fixed gamma = 1 and C = 100. Returns ------- classifier: trained SVM classifier """ #curpoints as positive samples, and subpoints as negative samples curX = np.array([subproblems[k].curpoint.parameter for k in range(params.popsize)]) subX = np.array([subproblems[k].subpoint.parameter for k in range(params.popsize)]) trainX = np.concatenate((curX, subX)) trainLabel = np.concatenate((np.ones(params.popsize), np.zeros(params.popsize))) classifier = svm.SVC(gamma = gamma, C = C) classifier.fit(trainX, trainLabel) return classifier
30.570561
112
0.563983
2,385
0.118374
0
0
0
0
0
0
9,264
0.459797
ace927c4fc1e25bbda7ec5e5f7a33fa84304d5ec
9,388
py
Python
videoServer.py
Hugoargui/eyeDetector
8c0361f90dacc2e5d8262cca40b34165fdda841a
[ "MIT" ]
null
null
null
videoServer.py
Hugoargui/eyeDetector
8c0361f90dacc2e5d8262cca40b34165fdda841a
[ "MIT" ]
null
null
null
videoServer.py
Hugoargui/eyeDetector
8c0361f90dacc2e5d8262cca40b34165fdda841a
[ "MIT" ]
3
2015-04-11T15:23:22.000Z
2021-02-09T07:19:07.000Z
## MIT LICENSE #Copyright (c) 2014 Hugo Arguinariz. #http://www.hugoargui.com # #Permission is hereby granted, free of charge, to any person #obtaining a copy of this software and associated documentation #files (the "Software"), to deal in the Software without #restriction, including without limitation the rights to use, #copy, modify, merge, publish, distribute, sublicense, and/or sell #copies of the Software, and to permit persons to whom the #Software is furnished to do so, subject to the following #conditions: #The above copyright notice and this permission notice shall be #included in all copies or substantial portions of the Software. #THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, #EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES #OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT #HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, #WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR #OTHER DEALINGS IN THE SOFTWARE. ## This module requires the SimpleWebSocketServer module by Opiate ## http://opiate.github.io/SimpleWebSocketServer/ ## That software is also distributed under MIT license ## I am in not the author of SimpleWebSocketServer.py ####################################################################################################!/ ## videoServer.py ## Inputs: NONE ## Outputs: NONE ## Non standard modules: eyeDetector, SimpleWebSocketServer ####################################################################################################!/usr/bin/env python ## This module runs on the server side ## It is is expected to continuously run on the background ## This is not a web server, a web app will need a real web server (Apache?) running in parallel ## On the client side (website) the browser is expected to open a WebSocket to this server ## The browser can capture webcam images from the user using Javascript + WebRTC ## The browser then sends several video frames per second to this server via the WebRTC socket ## For each video frame, this server uses the eyeDetector module to detect the eyes on the image ## This is done in 3 steps: ## A) The received image is decoded (it had been encoded by the client javascript before sending it over websocket ## B) The eyes are detected on the image. ## ## This returns: Eye coordinates (int X, int Y) ## Image modified to include green rectangles around the person eyes ## C) The new image is encoded to a format suitable to be sent back to the client via websockets ## Once the video frames have been processed, the data can be sent back to the browser via the same websocket connection ## In addition to the eye coordinates (X, Y) ## The image from step C can be sent too. ## This last step is optional, it may be enough to send only the eye coordinate variables (X, Y) ## This coordinates could be used on the client side to draw the exact same rectangles ## If the image is not going to be sent, step C should be removed in order to improve performace. #################################################################################################### #################################################################################################### import signal, sys, ssl, logging import time from SimpleWebSocketServer import WebSocket, SimpleWebSocketServer, SimpleSSLWebSocketServer from optparse import OptionParser import cv2 import numpy as np import base64 ## Import custom packages import eyeDetector import clientAnimation try: import simplejson as json except: import json logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG) ################################################################################################## class VideoServer(WebSocket): ############################################################################################## def handleMessage(self): ## STEP A # Handle incoming video frame if self.data is None: self.data = '' decImg = None ## Image after being decoded procImg = None ## Image with rectangles around the eyes encImg = None ## Image encoded in a format suitable to be sent over websocket # ################################################# # Try processing the frame try: ######################################### # Decode image # The image should have been received from the client in binary form img = str(self.data) img = np.fromstring(img, dtype=np.uint8) decImg = eyeDetector.decodeImage(img) if (decImg == None): print self.address, 'ERROR: Could not decode image. System time: '+ str(time.clock()) if ( decImg != None): ## STEP B ## Nothing wrong, detect eyes in the image procImg, eyesX, eyesY = eyeDetector.detectEyes(decImg) else: # Neither None nor !None... no image in the first place! print self.address, 'ERROR: Could not find an image to process! '+ str(time.clock()) ######################################### # Encode image to send it back if (procImg != None): ## STEP C retval, encImg = eyeDetector.encodeImage(procImg) if False == retval: print self.address, ('ERROR: Could not encode image!'+ str(time.clock())) else: encImg = base64.b64encode(encImg) else: print self.address, 'ERROR: Could not find an image to encode!' except Exception as n: print 'OpenCV catch fail' + str(n) # ################################################# # Try sending the frame back to the client try: if (encImg != None): # eyesX and eyesY are of numpy.int type, which is not json serializable # We get them back to normal python int eyesX = np.asscalar(np.int16(eyesX)) eyesY = np.asscalar(np.int16(eyesY)) #jsonize all data to send ## If we don't wish to send encImage it should be removed from here out = {'frame': encImg, 'eyesX': eyesX, 'eyesY': eyesY} jsonMessage = json.dumps(out, default=lambda obj: obj.__dict__) message = encImg else: print self.address, 'ERROR: Something went wrong, NOT sending any image. '+ str(time.clock()) self.sendMessage( jsonMessage ) except Exception as n: print n ############################################################################################## def handleConnected(self): ## Incoming websocket connection from a browser ## Several connections can be handled at the same time from different browsers print self.address, 'Video Server: Connection received from client at system time: '+ str(time.clock()) ############################################################################################## def handleClose(self): ## The client closed the connection with the server print self.address, 'Video Server: Connection closed at system time: '+ str(time.clock()) ################################################################################################## if __name__ == "__main__": print ' ' print 'Video server waiting for requests. System time: '+ str(time.clock()) print '*****************************************************************' ## When launched from command line we parse OPTIONAL input arguments ## The defaults will work just fine most times ## The http port used by websocket connections is set by --port parser = OptionParser(usage="usage: %prog [options]", version="%prog 1.0") parser.add_option("--host", default='', type='string', action="store", dest="host", help="hostname (localhost)") parser.add_option("--port", default=8090, type='int', action="store", dest="port", help="port (8000)") parser.add_option("--example", default='VideoServer', type='string', action="store", dest="example", help="VideoServer, others") parser.add_option("--ssl", default=0, type='int', action="store", dest="ssl", help="ssl (1: on, 0: off (default))") parser.add_option("--cert", default='./cert.pem', type='string', action="store", dest="cert", help="cert (./cert.pem)") parser.add_option("--ver", default=ssl.PROTOCOL_TLSv1, type=int, action="store", dest="ver", help="ssl version") (options, args) = parser.parse_args() cls = VideoServer ## If we wish to encode the websocket data stream if options.ssl == 1: server = SimpleSSLWebSocketServer(options.host, options.port, cls, options.cert, options.cert, version=options.ver) else: server = SimpleWebSocketServer(options.host, options.port, cls) ## Handle when shooting this server down def close_sig_handler(signal, frame): server.close() sys.exit() ## START the server signal.signal(signal.SIGINT, close_sig_handler) server.serveforever()
46.246305
132
0.586387
3,630
0.386664
0
0
0
0
0
0
5,991
0.638155
aceafae6150c02010849d9e6e9cb7de3fd751523
733
py
Python
Dataset/Leetcode/valid/78/302.py
kkcookies99/UAST
fff81885aa07901786141a71e5600a08d7cb4868
[ "MIT" ]
null
null
null
Dataset/Leetcode/valid/78/302.py
kkcookies99/UAST
fff81885aa07901786141a71e5600a08d7cb4868
[ "MIT" ]
null
null
null
Dataset/Leetcode/valid/78/302.py
kkcookies99/UAST
fff81885aa07901786141a71e5600a08d7cb4868
[ "MIT" ]
null
null
null
class Solution: def XXX(self, nums: List[int]) -> List[List[int]]: final = list() # ---------------------------------------------------- if len(nums)==1: return [[],nums] if len(nums)==0: return [] # ------------------------------------------------------ def pop(cut): if not cut: return else: for i in range(len(cut)): tmp = copy.deepcopy(cut) tmp.pop(i) if tmp not in final: final.append(tmp) pop(tmp) pop(nums) if nums: final.append(nums) return final
29.32
64
0.317872
731
0.997271
0
0
0
0
0
0
110
0.150068
aceb29154c2185e704bf568b3d023bfe66de4e73
1,203
py
Python
ems/utils.py
EMSTrack/EMS-Simulator
50b0dd60bfa7c5c115fc011e830d275b4eb07ab5
[ "MIT" ]
1
2020-07-15T00:16:48.000Z
2020-07-15T00:16:48.000Z
ems/utils.py
EMSTrack/Algorithms
139160619a935001582a60d3f43c0e33082bce99
[ "BSD-3-Clause" ]
40
2018-12-06T23:13:52.000Z
2019-07-11T01:24:13.000Z
ems/utils.py
EMSTrack/Algorithms
139160619a935001582a60d3f43c0e33082bce99
[ "BSD-3-Clause" ]
1
2020-04-23T11:17:43.000Z
2020-04-23T11:17:43.000Z
import pandas as pd def parse_headered_csv (file: str, desired_keys: list): """ Takes a headered CSV file and extracts the columns with the desired keys :param file: CSV filename :param desired_keys: Names of columns to extract :return: pandas dataframe """ if file is None: return None raw = pd.read_csv (file) keys_read = raw.keys() for key in desired_keys: if key not in keys_read: raise Exception("{} was not found in keys of file {}".format(key, file)) return raw[desired_keys] def parse_unheadered_csv (file: str, positions: list, header_names: list): """ Takes an unheadered CSV file, extracts the columns based on given positions, and provides them headers for the pandas dataframe :param file: CSV filename :param positions: Indices of the columns to extract :param header_names: Header names for the extracted columns :return: pandas dataframe """ if file is None: return None raw = pd.read_csv (file) headered_df = pd.DataFrame() for pos, header in zip(positions, header_names): headered_df[header] = raw.iloc[:, pos] return headered_df
25.0625
84
0.670823
0
0
0
0
0
0
0
0
565
0.469659
aced99ea255c2f32d779a17cbc4d6da0683b14ec
660
py
Python
plenum/test/bls/test_send_txns_no_bls.py
steptan/indy-plenum
488bf63c82753a74a92ac6952da784825ffd4a3d
[ "Apache-2.0" ]
null
null
null
plenum/test/bls/test_send_txns_no_bls.py
steptan/indy-plenum
488bf63c82753a74a92ac6952da784825ffd4a3d
[ "Apache-2.0" ]
null
null
null
plenum/test/bls/test_send_txns_no_bls.py
steptan/indy-plenum
488bf63c82753a74a92ac6952da784825ffd4a3d
[ "Apache-2.0" ]
null
null
null
from plenum.test.bls.helper import check_bls_multi_sig_after_send from plenum.test.pool_transactions.conftest import looper, clientAndWallet1, \ client1, wallet1, client1Connected nodeCount = 4 nodes_wth_bls = 0 def test_each_node_has_bls(txnPoolNodeSet): for node in txnPoolNodeSet: assert node.bls_bft assert node.replicas[0]._bls_bft_replica def test_send_txns_no_bls(looper, txnPoolNodeSet, client1, client1Connected, wallet1): check_bls_multi_sig_after_send(looper, txnPoolNodeSet, client1, wallet1, saved_multi_sigs_count=0)
33
78
0.689394
0
0
0
0
0
0
0
0
0
0
acee87269de38c5afcc9577b696b2d9e96852134
149
py
Python
Questoes/b1_q09_piso.py
viniciusm0raes/python
c4d4f1a08d1e4de105109e1f67fae9fcc20d7fce
[ "MIT" ]
null
null
null
Questoes/b1_q09_piso.py
viniciusm0raes/python
c4d4f1a08d1e4de105109e1f67fae9fcc20d7fce
[ "MIT" ]
null
null
null
Questoes/b1_q09_piso.py
viniciusm0raes/python
c4d4f1a08d1e4de105109e1f67fae9fcc20d7fce
[ "MIT" ]
null
null
null
metros = float(input('Quantos metros de piso vc deseja? ')) preco = 70 total = metros*preco print('O preço total do pedido é: R$ %.2f' % (total))
18.625
59
0.66443
0
0
0
0
0
0
0
0
74
0.490066
acf0e5f93f43919ca8a537e46d570aa00d8144da
1,639
py
Python
backend/serv/online_data.py
Alliance-Of-Independent-Programmers/acc-book
3a0f9fa1092d7eee54102e787e2233607c6922cf
[ "MIT" ]
null
null
null
backend/serv/online_data.py
Alliance-Of-Independent-Programmers/acc-book
3a0f9fa1092d7eee54102e787e2233607c6922cf
[ "MIT" ]
1
2021-11-02T22:22:57.000Z
2021-11-02T22:22:57.000Z
backend/serv/online_data.py
Alliance-Of-Independent-Programmers/acc-book
3a0f9fa1092d7eee54102e787e2233607c6922cf
[ "MIT" ]
null
null
null
import base64 import os.path path=os.path.dirname(__file__) misha = base64.b64encode(open(os.path.join(path, "../Pics/Miahs.jpg"), "rb").read()).decode("UTF-8") yaroslav = base64.b64encode(open(os.path.join(path, "../Pics/Yaroslav.jpg"), "rb").read()).decode("UTF-8") goblin = base64.b64encode(open(os.path.join(path, "../Pics/Goblin.jpg"), "rb").read()).decode("UTF-8") sanya = base64.b64encode(open(os.path.join(path, "../Pics/Sanya.jpg"), "rb").read()).decode("UTF-8") artem = base64.b64encode(open(os.path.join(path, "../Pics/Artem.jpg"), "rb").read()).decode("UTF-8") slava = base64.b64encode(open(os.path.join(path, "../Pics/Slava.jpg"), "rb").read()).decode("UTF-8") andrew = base64.b64encode(open(os.path.join(path, "../Pics/Andrew.jpg"), "rb").read()).decode("UTF-8") killreal = base64.b64encode(open(os.path.join(path, "../Pics/KillReal.jpg"), "rb").read()).decode("UTF-8") mauri = base64.b64encode(open(os.path.join(path, "../Pics/Maury.jpg"), "rb").read()).decode("UTF-8") online1 = { "login": "Artem", "img": artem, } online2 = { "login": "Slava", "img": slava, } online3 = { "login": "Misha", "img": misha, } online4 = { "login": "Andrew", "img": andrew, } online5 = { "login": "Goblin", "img": goblin, } online6 = { "login": "KillReal", "img": killreal, } online7 = { "login": "Mauri", "img": mauri, } online8 = { "login": "Sany0K", "img": sanya, } online9 = { "login": "Yaroslave", "img": yaroslav, } all_online = [ online1, online2, online3, online4, online5, online6, online7, online8, online9, ]
21.565789
106
0.594875
0
0
0
0
0
0
0
0
459
0.280049
acf0ea081196fdcaa8448d959385eacc3ae88049
202
py
Python
profiles_api/serializers.py
parth-singh71/profiles-rest-api
c415d2fd6c1c6c51674bca601644bcedb67cf72c
[ "MIT" ]
null
null
null
profiles_api/serializers.py
parth-singh71/profiles-rest-api
c415d2fd6c1c6c51674bca601644bcedb67cf72c
[ "MIT" ]
4
2020-04-15T07:14:27.000Z
2021-06-04T22:31:09.000Z
profiles_api/serializers.py
parth-singh71/profiles-rest-api
c415d2fd6c1c6c51674bca601644bcedb67cf72c
[ "MIT" ]
null
null
null
from rest_framework import serializers class HelloSerializer(serializers.Serializer): """Serializers a name field for testing our APIView""" name = serializers.CharField(max_length= 10)
25.25
58
0.757426
154
0.762376
0
0
0
0
0
0
54
0.267327
acf0f05dd07e3d68609ccda5295083be48e3b3c9
7,116
py
Python
xpsi/PostProcessing/_cache.py
DevarshiChoudhury/xpsi
200b82b4ef4a4e7342fc30dd03c5821cff0031c2
[ "MIT" ]
14
2019-09-26T12:08:06.000Z
2021-05-11T15:26:10.000Z
xpsi/PostProcessing/_cache.py
DevarshiChoudhury/xpsi
200b82b4ef4a4e7342fc30dd03c5821cff0031c2
[ "MIT" ]
13
2020-01-10T11:03:28.000Z
2021-10-04T14:44:01.000Z
xpsi/PostProcessing/_cache.py
DevarshiChoudhury/xpsi
200b82b4ef4a4e7342fc30dd03c5821cff0031c2
[ "MIT" ]
9
2020-03-04T13:28:05.000Z
2021-09-28T09:00:50.000Z
from __future__ import division, print_function from .. import __version__ from ._global_imports import * try: import h5py except ImportError: print('Install h5py to enable signal caching.') raise class _Cache(object): """ Cache numerical model objects computed during likelihood evaluation. :param str filename: Filename of cache. :param str cache_dir: Directory to write cache to. :param bool read_only: Do not write to cache file? :param bool archive: If not read-only, then archive an existing cache file found at the same path? """ def __init__(self, filename, cache_dir='./', read_only=False, archive=True): if isinstance(filename, _six.string_types): if filename[-3:] != '.h5': self._filename = filename + '.h5' else: self._filename = filename self._cache_dir = cache_dir self._path = _os.path.join(self._cache_dir, self._filename) self._read_only = read_only self._archive_if_incompatible = archive def __enter__(self): return self def __exit__(self, exc, exc_value, traceback): if exc: print('Encountered problem whilst caching:') def _open(self, mode='r'): """ Get the :mod:`h5py` context manager. """ if self._read_only and mode != 'r': raise RuntimeError('The cache is in read-only mode.') return h5py.File(self._path, mode) def cache(self, data): """ Cache the computational data. """ with self._open('r+') as f: g = f['data'] for key, value in data.iteritems(): if isinstance(value, tuple) or isinstance(value, list): if key not in g.keys(): shape = [f.attrs['n'], len(value)] shape += [s for s in value[0].shape] g.create_dataset(key, shape=shape, dtype='float64') for j, v in enumerate(value): g[key][self.i,j,...] = v else: if key not in g.keys(): shape = [f.attrs['n']] + [s for s in value.shape] g.create_dataset(key, shape=shape, dtype='float64') g[key][self.i,...] = value self.i += 1 def reset_iterator(self): """ Reset the counter for the cache iterator. """ self.i = 0 def __iter__(self): self.reset_iterator() return self def __next__(self): """ Read from the cache. """ cached = {} with self._open('r') as f: g = f['data'] for key in g.keys(): cached[key] = g[key][self.i,...] self.i += 1 return cached def next(self): """ Python 2.x compatibility. """ return self.__next__() @make_verbose('Checking whether an existing cache can be read:', 'Cache state determined') def do_caching(self, samples, force=False): """ Check whether a new cache is required or whether an exising cache can be read without additional computation. :return: Boolean indicating whether to read (``False``) or write. """ if force: self._new(samples) return True try: # try reading file and checking keys with self._open('r') as f: if 'thetas' not in f.keys(): self._new(samples) return True except IOError: # create new cache file self._new(samples) return True else: # can be read, so check if samples array are matching if self._changed(samples): self._new(samples) return True else: return False @make_verbose('Creating new cache file', 'Cache file created') def _new(self, samples): """ Prepare a new cache file. """ if not _os.path.isdir(self._cache_dir): _os.mkdir(self._cache_dir) if self._archive_if_incompatible: try: with self._open('r'): pass except IOError: self._initialise(samples) else: self._archive() self._initialise(samples) else: self._initialise(samples) @make_verbose('Initialising cache file', 'Cache file initialised') def _initialise(self, samples): """ Initialise the cache. """ with self._open('w') as f: f.attrs['version'] = __version__ f.attrs['n'] = samples.shape[0] f.create_dataset('thetas', data=samples) f.create_group('/data') self.reset_iterator() def _changed(self, samples): """ Check whether software version or sample set has changed. """ with self._open('r') as f: if f.attrs['version'] != __version__: return True if not _np.array_equal(f['thetas'], samples): return True return False @make_verbose('Attempting to archive existing cache file in ' 'a subdirectory') def _archive(self): """ Archive an existing cache file. """ # to archive the existing cache file archive_dir = _os.path.join(self._cache_dir, 'archive') try: if not _os.path.isdir(archive_dir): _os.mkdir(archive_dir) except OSError: yield ('Archiving failed... cache file %s will be ' 'overwritten.' % self._filename) yield else: yield 'Targeting subdirectory: %s.' % archive_dir try: from datetime import datetime except ImportError: yield ('Archiving failed... cache file %s will be ' 'overwritten.' % self._filename) yield else: name_archived = self._filename[:-3] + '__archive__' name_archived += 'xpsi_version_%s__' % __version__ obj = datetime.now() name_archived += 'datetime__%i.%i.%i__%i.%i.%i' % (obj.day, obj.month, obj.year, obj.hour, obj.minute, obj.second) try: _os.rename(self._filename, _os.path.join(archive_dir, name_archived + '.h5')) except OSError: yield ('Archiving failed... cache file %s will be ' 'overwritten.' % self._filename) else: yield ('Exisiting cache file archived in ' 'subdirectory %s.' % archive_dir) yield None
32.199095
77
0.51068
6,902
0.969927
1,791
0.251686
3,813
0.535835
0
0
1,899
0.266863
acf119e7c277821bbc64ba71171fddd1c61cd7ed
1,234
py
Python
multiprocessingTest.py
lakshay1296/python-multiprocessing-sample
c42788686168b95b3d98edb417d9071ef3e7eccd
[ "Unlicense" ]
null
null
null
multiprocessingTest.py
lakshay1296/python-multiprocessing-sample
c42788686168b95b3d98edb417d9071ef3e7eccd
[ "Unlicense" ]
null
null
null
multiprocessingTest.py
lakshay1296/python-multiprocessing-sample
c42788686168b95b3d98edb417d9071ef3e7eccd
[ "Unlicense" ]
null
null
null
from multiprocessing import Process, Manager ''' Custom Module Imports ''' from calculator.add import addition from calculator.subtract import subtraction from calculator.multiply import multiplication from calculator.divide import division class Main: def __init__(self) -> None: pass def calculatorFunction(self): ls = [[1,2],[3,4],[5,6]] with Manager() as manager: result = manager.dict() for i in ls: obj1 = addition(i[0],i[1], result) obj2 = subtraction(i[0],i[1], result) obj3 = multiplication(i[0],i[1], result) obj4 = division(i[0],i[1], result) p1 = Process(target=obj1.add) p2 = Process(target=obj2.subtract) p3 = Process(target=obj3.multiply) p4 = Process(target=obj4.divide) p = [p1,p2,p3,p4] p1.start() p2.start() p3.start() p4.start() for procs in p: procs.join() print(result) if __name__ == '__main__': main = Main() main.calculatorFunction()
28.697674
57
0.508914
901
0.730146
0
0
0
0
0
0
39
0.031605
acf4266847f871c1b8280d08536c0e7db9ac800b
1,797
py
Python
migrations/d7cd5138bb9b_minor_fixes.py
szkkteam/agrosys
a390332202f7200632d2ff3816e1b0f3cc76f586
[ "MIT" ]
null
null
null
migrations/d7cd5138bb9b_minor_fixes.py
szkkteam/agrosys
a390332202f7200632d2ff3816e1b0f3cc76f586
[ "MIT" ]
null
null
null
migrations/d7cd5138bb9b_minor_fixes.py
szkkteam/agrosys
a390332202f7200632d2ff3816e1b0f3cc76f586
[ "MIT" ]
null
null
null
"""minor fixes Revision ID: d7cd5138bb9b Revises: 0fed690a57ce Create Date: 2020-09-18 07:56:14.159782 """ from alembic import op import geoalchemy2 import sqlalchemy as sa import backend # revision identifiers, used by Alembic. revision = 'd7cd5138bb9b' down_revision = '0fed690a57ce' branch_labels = () depends_on = None def upgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('task_harvesting', sa.Column('id', sa.BigInteger(), nullable=False), sa.Column('specific_product_id', sa.BigInteger(), nullable=False), sa.ForeignKeyConstraint(['id'], ['task.task_id'], name=op.f('fk_task_harvesting_id_task'), onupdate='CASCADE', ondelete='CASCADE'), sa.ForeignKeyConstraint(['specific_product_id'], ['specific_product.id'], name=op.f('fk_task_harvesting_specific_product_id_specific_product')), sa.PrimaryKeyConstraint('id', name=op.f('pk_task_harvesting')), mysql_charset='utf8', mysql_engine='InnoDB' ) # ### end Alembic commands ### def downgrade(): # ### commands auto generated by Alembic - please adjust! ### op.create_table('spatial_ref_sys', sa.Column('srid', sa.INTEGER(), autoincrement=False, nullable=False), sa.Column('auth_name', sa.VARCHAR(length=256), autoincrement=False, nullable=True), sa.Column('auth_srid', sa.INTEGER(), autoincrement=False, nullable=True), sa.Column('srtext', sa.VARCHAR(length=2048), autoincrement=False, nullable=True), sa.Column('proj4text', sa.VARCHAR(length=2048), autoincrement=False, nullable=True), sa.CheckConstraint('(srid > 0) AND (srid <= 998999)', name='spatial_ref_sys_srid_check'), sa.PrimaryKeyConstraint('srid', name='spatial_ref_sys_pkey') ) op.drop_table('task_harvesting') # ### end Alembic commands ###
36.673469
148
0.71675
0
0
0
0
0
0
0
0
771
0.429048
acf4e2b582ea3797ec2204138be8231ebbf6a6c6
8,883
py
Python
creation/bundles.py
jim-bo/silp2
1186a84b2570af0e4ed305ddfff8f931e012eadf
[ "MIT" ]
1
2018-01-29T05:00:43.000Z
2018-01-29T05:00:43.000Z
creation/bundles.py
jim-bo/silp2
1186a84b2570af0e4ed305ddfff8f931e012eadf
[ "MIT" ]
1
2016-01-31T13:13:10.000Z
2016-02-02T14:16:05.000Z
creation/bundles.py
jim-bo/silp2
1186a84b2570af0e4ed305ddfff8f931e012eadf
[ "MIT" ]
null
null
null
#!/usr/bin/python ''' creates bundle graph from filtered multigraph ''' ### imports ### import sys import os import logging import networkx as nx import numpy as np import scipy.stats as stats import cPickle import helpers.io as io import helpers.misc as misc ### definitions ### ### functions ### def compress_edges(MG, p, q): ''' compresses the edges ''' # check for types. bcnts = [0, 0, 0, 0] for z in MG[p][q]: bcnts[MG[p][q][z]['state']] += 1 # build numpy arrays for each distance type. bdists = list() for i in range(4): bdists.append(np.zeros(bcnts[i], dtype=np.float)) # populate array with distances. bidxs = [0, 0, 0, 0] for z in MG[p][q]: state = MG[p][q][z]['state'] dist = MG[p][q][z]['dist'] bdists[state][bidxs[state]] = dist bidxs[state] += 1 # compute bundle info. devs = list() means = list() mins = list() maxs = list() for i in range(4): if bdists[i].shape[0] <= 0: devs.append(-1) means.append(-1) mins.append(-1) maxs.append(-1) else: devs.append(np.std(bdists[i])) means.append(np.mean(bdists[i])) mins.append(bdists[i].min()) maxs.append(bdists[i].max()) # return summaries. return bcnts, bdists, devs, means, mins, maxs def _load_reps(file_path): ''' loads repeat info from cpickle''' # no weights. if file_path == None: return dict() # try dictionary emthod. if os.path.isdir(file_path) == True: reps = dict() for f in os.listdir(file_path): n = f.replace(".npy","") try: reps[n] = np.load("%s/%s" % (file_path, f)) except: continue return reps # get weights. try: with open(file_path) as fin: return cPickle.load(fin) except: logging.warning("unable to load repeat pickle, ignoring weights") return dict() def create_bundles(paths, args): """ creates bundles Parameters ---------- paths.edge_file : string args.bundle_size : int args.pthresh : int args.bup : int """ # load repeat annotations. repcnts = _load_reps(args.rep_file) # load the multi graph. MG = nx.read_gpickle(paths.edge_file) # create bundle graph. BG = nx.Graph() # add nodes. for n in MG.nodes(): BG.add_node(n, MG.node[n]) # build set of adjacencies. adjset = set() for p, nbrs in MG.adjacency_iter(): for q in nbrs: adjset.add(tuple(sorted([p,q]))) # compute bundles from adjacencies. zerod = 0 zcnt = 0 ztot = len(adjset) for p, q in adjset: #logging.info("progress: %d of %d" % (zcnt, ztot)) zcnt += 1 # sanity check. if MG.node[p]['cov'] == 0.0 or MG.node[q]['cov'] == 0.0: logging.error("how can this happen?") sys.exit() # bundle size check. bsize = len(MG[p][q]) if bsize < args.bundle_size: continue # group by insert size. groups = dict() std_devs = dict() for z in MG[p][q]: ins_size = MG[p][q][z]['ins_size'] if ins_size not in groups: groups[ins_size] = list() std_devs[ins_size] = MG[p][q][z]['std_dev'] groups[ins_size].append(z) # loop over groups. for ins_size in groups: # compress info. bcnts, bdists, devs, means, mins, maxs = compress_edges(MG, p, q) # compute weights. cov = 1 - abs(MG.node[p]['cov'] - MG.node[q]['cov']) / (MG.node[p]['cov'] + MG.node[q]['cov']) # swap bdists for python lists. for i in range(len(bdists)): bdists[i] = list(bdists[i]) # add start stop info. poses1 = list() poses2 = list() for z in MG[p][q]: tmp = MG[p][q][z] poses1.append((tmp['left1'], tmp['right1'])) poses2.append((tmp['left2'], tmp['right2'])) # create bundle. if BG.has_edge(p, q): logging.error("can't have multiple insert sizes between same node") sys.exit(1) # zero out negative distances. avgs = [np.average(bdists[i]) for i in range(4)] for i in range(4): if avgs[i] == np.nan: bcnts[i] = 0.0 if avgs[i] < -2 * args.bundle_size: bcnts[i] = 0.0 zerod += 1 # don't add it if no support. if np.sum(bcnts) == 0: continue #BG.add_edge(p, q, bcnts=bcnts, bdists=bdists, devs=devs, means=means, mins=mins, maxs=maxs, ins_size=ins_size, std_dev=std_devs[ins_size], poses1=poses1, poses2=poses2) BG.add_edge(p, q, bcnts=bcnts, bdists=bdists, ins_size=ins_size, std_dev=std_devs[ins_size], cov=cov) # start the slimming. logging.info("starting repeat based slimming") # do repeat mods. track_upped = 0 track_remed = 0 track_ogedg = len(BG.edges()) idxs = np.zeros(1) if repcnts != dict(): # create repeat distrib. repavgs = np.zeros(len(repcnts), dtype=np.dtype([('name','S256'),('avg',np.float)])) i = 0 for name in repcnts: # save the name. repavgs[i]['name'] = name # skip no repeat info. if name not in repcnts or repcnts[name] == None: repavgs[i]['avg'] = 0 i += 1 continue # take the average over ins_size + 6 (std_dev) d = args.ins_size + (6 * args.std_dev) if repcnts[name].shape[0] < d: repavgs[i]['avg'] = np.average(repcnts[name]) else: r = range(0,d)+range(len(repcnts[name])-d,len(repcnts[name])) repavgs[i]['avg'] = np.average(repcnts[name][r]) i += 1 # compute the cutoff threshold. score = stats.scoreatpercentile(repavgs[:]['avg'], args.pthresh) idxs = repavgs[:]['avg'] > score # look at each bundle and see if the repeats necessitates attention. for p, q in BG.edges(): # get index of pairs. idp = np.where(repavgs[:]['name'] == p)[0] idq = np.where(repavgs[:]['name'] == q)[0] # skip if both not high. if idxs[idp] == False and idxs[idq] == False: continue # get score. scp = repavgs[idp]['avg'] scq = repavgs[idq]['avg'] # check if this bundle needs attention. if max(scp, scq) > score: track_upped += 1 # it gets its minumm bundle size upped. for i in range(len(BG[p][q]['bcnts'])): # clear if it doesn't meet criteria. if BG[p][q]['bcnts'][i] < args.bundle_size + args.bup: BG[p][q]['bcnts'][i] = 0 # remove bundle if no support. if np.sum(BG[p][q]['bcnts']) == 0: track_remed += 1 BG.remove_edge(p,q) else: logging.info('no repeat information supplied') # add repeat weights. for p, q in BG.edges(): # create weight. BG[p][q]['u'] = [0.0] * 4 # sum weights. for z in MG[p][q]: left1 = MG[p][q][z]['left1'] left2 = MG[p][q][z]['left2'] right1 = MG[p][q][z]['right1'] right2 = MG[p][q][z]['right2'] cntl = np.sum(repcnts[p][left1:left2]) cntr = np.sum(repcnts[p][right1:right2]) try: propl = 1.0 - (float(cntl) / float(left2-left1)) propr = 1.0 - (float(cntr) / float(right2-right1)) except: continue # add average. p_k = (propl + propr) / 2.0 # add it. BG[p][q]['u'][MG[p][q][z]['state']] += p_k # note the modifications due to filtering. logging.info("contigs with repeat regions in %.2f threshold: %i of %i" % (args.pthresh, np.sum(idxs), len(idxs))) logging.info("bundles effected by repeats: %i of %i" % (track_upped, track_ogedg)) logging.info("bundles removed by repeats: %i of %i" % (track_remed, track_ogedg)) logging.info("bundles removed by neg dist: %i" % (zerod)) logging.info("total bundles: %i" % (len(BG.edges()))) # write to disk. nx.write_gpickle(BG, paths.bundle_file)
29.808725
181
0.501745
0
0
0
0
0
0
0
0
2,376
0.267477
acf541af1bb3ebd0a182c3839d64f9ce9a19e679
903
py
Python
ParadoxTrading/EngineExt/Futures/__init__.py
yutiansut/ParadoxTrading
b915d1491663443bedbb048017abeed3f7dcd4e2
[ "MIT" ]
2
2018-01-25T08:33:59.000Z
2018-05-14T13:59:54.000Z
ParadoxTrading/EngineExt/Futures/__init__.py
yutiansut/ParadoxTrading
b915d1491663443bedbb048017abeed3f7dcd4e2
[ "MIT" ]
null
null
null
ParadoxTrading/EngineExt/Futures/__init__.py
yutiansut/ParadoxTrading
b915d1491663443bedbb048017abeed3f7dcd4e2
[ "MIT" ]
null
null
null
from .Arbitrage import ArbitrageEqualFundSimplePortfolio, \ ArbitrageEqualFundVolatilityPortfolio, ArbitrageStrategy from .BacktestEngine import BacktestEngine from .BacktestMarketSupply import BacktestMarketSupply from .BarBacktestExecution import BarBacktestExecution from .BarPortfolio import BarPortfolio from .InterDayBacktestExecution import InterDayBacktestExecution from .InterDayOnlineEngine import InterDayOnlineEngine from .InterDayOnlineExecution import InterDayOnlineExecution from .InterDayOnlineMarketSupply import InterDayOnlineMarketSupply from .InterDayPortfolio import InterDayPortfolio from .TickBacktestExecution import TickBacktestExecution from .TickPortfolio import TickPortfolio from .Trend import CTAEqualFundPortfolio, CTAEqualRiskATRPortfolio, \ CTAEqualRiskRatePortfolio, CTAEqualRiskVolatilityPortfolio, \ CTAStatusType, CTAStrategy, CTAEqualRiskGARCHPortfolio
53.117647
69
0.890365
0
0
0
0
0
0
0
0
0
0
acf75d195a7f9454dff3256ac3c4f362cd91d9cd
529
py
Python
core/network/Swin_T/__init__.py
ViTAE-Transformer/ViTAE-Transformer-Matting
5cd1574cd46009a4e9660cabdc008718e20bc381
[ "MIT" ]
8
2022-03-31T05:58:45.000Z
2022-03-31T13:24:18.000Z
core/network/Swin_T/__init__.py
ViTAE-Transformer/ViTAE-Transformer-Matting
5cd1574cd46009a4e9660cabdc008718e20bc381
[ "MIT" ]
null
null
null
core/network/Swin_T/__init__.py
ViTAE-Transformer/ViTAE-Transformer-Matting
5cd1574cd46009a4e9660cabdc008718e20bc381
[ "MIT" ]
null
null
null
from .swin_stem_pooling5_transformer import swin_stem_pooling5_encoder from .swin_stem_pooling5_transformer import SwinStemPooling5TransformerMatting from .decoder import SwinStemPooling5TransformerDecoderV1 __all__ = ['p3mnet_swin_t'] def p3mnet_swin_t(pretrained=True, img_size=512, **kwargs): encoder = swin_stem_pooling5_encoder(pretrained=pretrained, img_size=img_size, **kwargs) decoder = SwinStemPooling5TransformerDecoderV1() model = SwinStemPooling5TransformerMatting(encoder, decoder) return model
35.266667
92
0.835539
0
0
0
0
0
0
0
0
15
0.028355
acf995ba4adee5652bf497dcac8aaaa0df89b254
702
py
Python
tests/test_day22.py
arcadecoffee/advent-2021
57d24cd6ba6e2b4d7e68ea492b955b73eaad7b6a
[ "MIT" ]
null
null
null
tests/test_day22.py
arcadecoffee/advent-2021
57d24cd6ba6e2b4d7e68ea492b955b73eaad7b6a
[ "MIT" ]
null
null
null
tests/test_day22.py
arcadecoffee/advent-2021
57d24cd6ba6e2b4d7e68ea492b955b73eaad7b6a
[ "MIT" ]
null
null
null
""" Tests for Day 22 """ from day22.module import part_1, part_2, \ FULL_INPUT_FILE, TEST_INPUT_FILE_1, TEST_INPUT_FILE_2, TEST_INPUT_FILE_3 def test_part_1_1(): result = part_1(TEST_INPUT_FILE_1) assert result == 39 def test_part_1_2(): result = part_1(TEST_INPUT_FILE_2) assert result == 590784 def test_part_1_3(): result = part_1(TEST_INPUT_FILE_3) assert result == 474140 def test_part_1_full(): result = part_1(FULL_INPUT_FILE) assert result == 546724 def test_part_2(): result = part_2(TEST_INPUT_FILE_3) assert result == 2758514936282235 def test_part_2_full(): result = part_2(FULL_INPUT_FILE) assert result == 1346544039176841
18.972973
76
0.720798
0
0
0
0
0
0
0
0
24
0.034188
acf99b16735919f2fa01bf50bc4e4be9aea749c8
1,441
py
Python
src/kde_crime/kde_test.py
ras9841/UP-STAT-2018
cad06bfac3c12b4cb14c3b703e23c52cc391383a
[ "MIT" ]
null
null
null
src/kde_crime/kde_test.py
ras9841/UP-STAT-2018
cad06bfac3c12b4cb14c3b703e23c52cc391383a
[ "MIT" ]
1
2018-05-08T12:16:50.000Z
2018-05-08T21:28:40.000Z
src/kde_crime/kde_test.py
ras9841/UP-STAT-2018
cad06bfac3c12b4cb14c3b703e23c52cc391383a
[ "MIT" ]
null
null
null
from spatial_kde import * from sklearn.model_selection import train_test_split import pandas as pd import matplotlib.pyplot as plt data_loc = "../../data/RPD_crime2011toNow.csv" data = process_RPD_data(data_loc) print("Loaded data") Y = data[["class"]] X = data[["X", "Y"]] print("Starting Predictions") n_trials = 25 results = np.zeros([2,n_trials]) for test in range(2): print("Running test #%d"%(test+1)) for i in range(n_trials): print("\nRunning trial %d/%d"%(i+1, n_trials)) # Setup Data if test == 0: X_tr, X_te, Y_tr, Y_te = train_test_split(X, Y, test_size=0.30) else: X_tr, X_te, Y_tr, Y_te = train_test_split(X, Y, test_size=0.30,\ stratify=Y) train_df = pd.concat([X_tr, Y_tr], axis=1) y = Y_te.values.reshape(Y_te.shape[0],) print("Starting KDE") kde = KDE() kde.train(train_df) print("Making predictions") yhat = kde.predict(X_te) results[test, i] = compute_accuracy(y, yhat)*100 print("Accuracy: %d%%"%(results[test,i])) results = results.T print("NS Accuracy: (%.3f +/- %.3f)%%"%(results[:,0].mean(),\ results[:,0].std())) print("STRAT Accuracy: (%.3f +/- %.3f)%%"%(results[:,1].mean(),\ results[:,1].std())) results_df = pd.DataFrame(results, columns=["Random", "Stratified"]) results_df.boxplot() plt.grid(False) plt.ylabel("Accuracy (%)") plt.show()
29.408163
76
0.605135
0
0
0
0
0
0
0
0
288
0.199861
acfb03705c27649ad1f5865c957917038f62a92e
2,872
py
Python
setup.py
C0DK/lightbus
be5cc2771b1058f7c927cca870ed75d4cbbe61a3
[ "Apache-2.0" ]
null
null
null
setup.py
C0DK/lightbus
be5cc2771b1058f7c927cca870ed75d4cbbe61a3
[ "Apache-2.0" ]
null
null
null
setup.py
C0DK/lightbus
be5cc2771b1058f7c927cca870ed75d4cbbe61a3
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # DO NOT EDIT THIS FILE! # This file has been autogenerated by dephell <3 # https://github.com/dephell/dephell try: from setuptools import setup except ImportError: from distutils.core import setup import os.path readme = "" here = os.path.abspath(os.path.dirname(__file__)) readme_path = os.path.join(here, "README.rst") if os.path.exists(readme_path): with open(readme_path, "rb") as stream: readme = stream.read().decode("utf8") setup( long_description=readme, name="lightbus", version="1.1.0", description="RPC & event framework for Python 3", python_requires=">=3.7", project_urls={ "documentation": "https://lightbus.org", "homepage": "https://lightbus.org", "repository": "https://github.com/adamcharnock/lightbus/", }, author="Adam Charnock", author_email="adam@adamcharnock.com", keywords="python messaging redis bus queue", classifiers=[ "Development Status :: 5 - Production/Stable", "Framework :: AsyncIO", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: MacOS :: MacOS X", "Operating System :: POSIX", "Programming Language :: Python :: 3", "Topic :: System :: Networking", "Topic :: Communications", ], entry_points={ "console_scripts": ["lightbus = lightbus.commands:lightbus_entry_point"], "lightbus_event_transports": [ "debug = lightbus:DebugEventTransport", "redis = lightbus:RedisEventTransport", ], "lightbus_plugins": [ "internal_metrics = lightbus.plugins.metrics:MetricsPlugin", "internal_state = lightbus.plugins.state:StatePlugin", ], "lightbus_result_transports": [ "debug = lightbus:DebugResultTransport", "redis = lightbus:RedisResultTransport", ], "lightbus_rpc_transports": [ "debug = lightbus:DebugRpcTransport", "redis = lightbus:RedisRpcTransport", ], "lightbus_schema_transports": [ "debug = lightbus:DebugSchemaTransport", "redis = lightbus:RedisSchemaTransport", ], }, packages=[ "lightbus", "lightbus.client", "lightbus.client.docks", "lightbus.client.internal_messaging", "lightbus.client.subclients", "lightbus.commands", "lightbus.config", "lightbus.plugins", "lightbus.schema", "lightbus.serializers", "lightbus.transports", "lightbus.transports.redis", "lightbus.utilities", ], package_dir={"": "."}, package_data={}, install_requires=["aioredis>=1.2.0", "jsonschema>=3.2", "pyyaml>=3.12"], )
31.56044
81
0.607591
0
0
0
0
0
0
0
0
1,688
0.587744
acfb6bea34e4f21d414dc262f6f49c3c957210d9
5,840
py
Python
src/skill_algorithms/trueskill_data_processing.py
EllAchE/nba_tipoff
f3820e391d4a6ddb611efeb6c709f16876771684
[ "MIT" ]
null
null
null
src/skill_algorithms/trueskill_data_processing.py
EllAchE/nba_tipoff
f3820e391d4a6ddb611efeb6c709f16876771684
[ "MIT" ]
null
null
null
src/skill_algorithms/trueskill_data_processing.py
EllAchE/nba_tipoff
f3820e391d4a6ddb611efeb6c709f16876771684
[ "MIT" ]
null
null
null
import ENVIRONMENT from src.database.database_creation import createPlayerTrueSkillDictionary from src.skill_algorithms.algorithms import trueSkillMatchWithRawNums, trueSkillTipWinProb from src.skill_algorithms.common_data_processing import beforeMatchPredictions, runAlgoForSeason, runAlgoForAllSeasons # backlogtodo optimize trueskill, glicko etc. for rapid iteration # backlogtodo refactor equations here to be generic def runTrueSkillForSeason(seasonCsv: str, winningBetThreshold: float= ENVIRONMENT.GLICKO_TIPOFF_ODDS_THRESHOLD, startFromBeginning=False): runAlgoForSeason(seasonCsv, ENVIRONMENT.PLAYER_TRUESKILL_DICT_PATH, ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH, trueSkillBeforeMatchPredictions, trueSkillUpdateDataSingleTipoff, winningBetThreshold, columnAdds=['Home TS Mu', 'Away TS Mu', 'Home TS Sigma', 'Away TS Sigma', 'Home Lifetime Appearances', 'Away Lifetime Appearances', 'Home Tipper Wins', 'Away Tipper Wins', 'Home Tipper Losses', 'Away Tipper Losses'], startFromBeginning=startFromBeginning) # backlogtodo setup odds prediction to use Ev or win prob rather than bet threshold def trueSkillBeforeMatchPredictions(psd, homePlayerCode, awayPlayerCode, homeTeam, awayTeam, tipWinnerCode, scoringTeam, predictionArray=None, actualArray=None, histogramPredictionsDict=None, winningBetThreshold=ENVIRONMENT.TS_TIPOFF_ODDS_THRESHOLD): return beforeMatchPredictions(psd, homePlayerCode, awayPlayerCode, homeTeam, awayTeam, tipWinnerCode, scoringTeam, predictionArray=predictionArray, actualArray=actualArray, histogramPredictionsDict=histogramPredictionsDict, predictionSummaryPath=ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH, minimumTipWinPercentage=winningBetThreshold, predictionFunction=trueSkillTipWinProb, minimumAppearances=ENVIRONMENT.MIN_TS_APPEARANCES) def runTSForAllSeasons(seasons, winningBetThreshold=ENVIRONMENT.TS_TIPOFF_ODDS_THRESHOLD): runAlgoForAllSeasons(seasons, ENVIRONMENT.PLAYER_TRUESKILL_DICT_PATH, ENVIRONMENT.TS_PREDICTION_SUMMARIES_PATH, trueSkillBeforeMatchPredictions, trueSkillUpdateDataSingleTipoff, winningBetThreshold, columnAdds=['Home TS Mu', 'Away TS Mu', 'Home TS Sigma', 'Away TS Sigma', 'Home Lifetime Appearances', 'Away Lifetime Appearances', 'Home Tipper Wins', 'Away Tipper Wins', 'Home Tipper Losses', 'Away Tipper Losses']) def trueSkillUpdateDataSingleTipoff(psd, winnerCode, loserCode, homePlayerCode, game_code=None): if game_code: print(game_code) winnerCode = winnerCode[11:] loserCode = loserCode[11:] winnerOgMu = psd[winnerCode]["mu"] winnerOgSigma = psd[winnerCode]["sigma"] loserOgMu = psd[loserCode]["mu"] loserOgSigma = psd[loserCode]["sigma"] winnerMu, winnerSigma, loserMu, loserSigma = trueSkillMatchWithRawNums(psd[winnerCode]["mu"], psd[winnerCode]["sigma"], psd[loserCode]['mu'], psd[loserCode]["sigma"]) winnerWinCount = psd[winnerCode]["wins"] + 1 winnerAppearances = psd[winnerCode]["appearances"] + 1 loserLosses = psd[loserCode]["losses"] + 1 loserAppearances = psd[loserCode]["appearances"] + 1 psd[winnerCode]["wins"] = winnerWinCount psd[winnerCode]["appearances"] = winnerAppearances psd[loserCode]["losses"] = loserLosses psd[loserCode]["appearances"] = loserAppearances psd[winnerCode]["mu"] = winnerMu psd[winnerCode]["sigma"] = winnerSigma psd[loserCode]["mu"] = loserMu psd[loserCode]["sigma"] = loserSigma print('Winner:', winnerCode, 'trueskill increased', winnerMu - winnerOgMu, 'to', winnerMu, '. Sigma is now', winnerSigma, '. W:', winnerWinCount, 'L', winnerAppearances - winnerWinCount) print('Loser:', loserCode, 'trueskill decreased', loserMu - loserOgMu, 'to', loserMu, '. Sigma is now', loserSigma, '. W:', loserAppearances - loserLosses, 'L', loserLosses) # backlogtodo refactor repeated code out of algo methods if homePlayerCode == winnerCode: homeMu = winnerOgMu homeSigma = winnerOgSigma awayMu = loserOgMu awaySigma = loserOgSigma homeAppearances = winnerAppearances - 1 awayAppearances = loserAppearances - 1 homeWins = winnerWinCount - 1 homeLosses = psd[winnerCode]["losses"] awayWins = psd[loserCode]["wins"] awayLosses = loserLosses elif homePlayerCode == loserCode: homeMu = loserOgMu homeSigma = loserOgSigma awayMu = winnerOgMu awaySigma = winnerOgSigma awayAppearances = winnerAppearances homeAppearances = loserAppearances awayWins = winnerWinCount - 1 awayLosses = psd[winnerCode]["losses"] homeWins = psd[loserCode]["wins"] homeLosses = loserLosses else: raise ValueError('neither code matches') return {"Home TS Mu": homeMu, "Home TS Sigma": homeSigma, "Away TS Mu": awayMu, "Away TS Sigma": awaySigma, "Home Lifetime Appearances": homeAppearances, "Away Lifetime Appearances": awayAppearances, "Home Tipper Wins": homeWins, "Home Tipper Losses": homeLosses, "Away Tipper Wins": awayWins, "Away Tipper Losses": awayLosses} def calculateTrueSkillDictionaryFromZero(): createPlayerTrueSkillDictionary() # clears the stored values, runTSForAllSeasons(ENVIRONMENT.ALL_SEASONS_LIST, winningBetThreshold=ENVIRONMENT.TS_TIPOFF_ODDS_THRESHOLD) print("\n", "trueskill dictionary updated for seasons", ENVIRONMENT.ALL_SEASONS_LIST, "\n") def updateTrueSkillDictionaryFromLastGame(): runTrueSkillForSeason(ENVIRONMENT.CURRENT_SEASON_CSV, winningBetThreshold=ENVIRONMENT.TS_TIPOFF_ODDS_THRESHOLD, startFromBeginning=False) print("\n", "trueskill dictionary updated from last game", "\n")
62.12766
291
0.738356
0
0
0
0
0
0
0
0
1,250
0.214041
acfbf91315ae8fa759e47178ec90f3b7a692cd5c
7,881
py
Python
warp/utils/config_parsing.py
j-helland/warp
2a71346f0ec4d4e6fd45ed3b5e972b683724287c
[ "Unlicense" ]
null
null
null
warp/utils/config_parsing.py
j-helland/warp
2a71346f0ec4d4e6fd45ed3b5e972b683724287c
[ "Unlicense" ]
null
null
null
warp/utils/config_parsing.py
j-helland/warp
2a71346f0ec4d4e6fd45ed3b5e972b683724287c
[ "Unlicense" ]
null
null
null
# std import datetime from copy import deepcopy from collections import deque import yaml # from .lazy_loader import LazyLoader as LL # yaml = LL('yaml', globals(), 'yaml') # json = LL('json', globals(), 'json') # types from typing import Dict, Any, Union, Tuple __all__ = [ 'load_config_file', 'save_config'] BASIC_TYPES: Tuple[type, ...] = ( type(None), bool, int, float, str, datetime.datetime, bytes, complex) ITERABLE_TYPES: Tuple[type, ...] = ( list, tuple, set, dict) class HyperParameter: verbose = False @classmethod def set_verbosity(cls, value): cls.verbose = value def __init__(self, values=None, spec_type=None, spec=None): # Default version is to provide a list of actual values if values and type(values) is not list: raise TypeError(f'hyperparameter values must be a list not {type(values)}') if values: if not isinstance(values[0],dict) and not isinstance(values[0],list): values = sorted(set(values)) if self.verbose: print('Found literal (unique) hparam values: ',values) elif len(values)==1 and isinstance(values[0],dict): raise TypeError(f'known bug/unsupported, hparam len(values)==1 but elm is a dict') else: # values = sorted(values) if self.verbose: print('Found literal hparam values: ',values) # Can support other value shorthands/generators if values is None: # A simple count or range(n) type if spec_type == 'int': values = [i for i in range(spec)] else: raise TypeError(f'no generator for hyperparameter spec.type: {spec_type}') # Could add another range type with low, high, stepsize... etc if self.verbose: print('Found constructable hparam values: ',values) self.values = values def set_value(dictionary, keychain, value): if len(keychain) == 1: dictionary[keychain[0]] = value return set_value(dictionary[keychain[0]],keychain[1:],value) return dictionary class BFTreeExpander: roots = {} # hparam_keys = set() # hparam_keychains = set() hparam_keychains = {} @classmethod def reset_roots(cls): cls.roots = {} @classmethod def get_roots(cls): return [v.root for k,v in cls.roots.items()] @classmethod def reset_keys(cls): # cls.hparam_keys = set() # cls.hparam_keychains = set() cls.hparam_keychains = {} # @classmethod # def get_hparam_key_list(cls): # return list(cls.hparam_keys) @classmethod def get_hparam_keychains(cls): return list(cls.hparam_keychains.keys()) # return cls.hparam_keychains def __init__(self, root): self.root = root self.queue = deque() self.id = id(self) self.roots[self.id] = self # recursive traverser def expand(self, node = None, keychain = []): if node is None: node = self.root if isinstance(node, HyperParameter): # self.hparam_keys.add(keychain[-1]) # self.hparam_keychains.add(".".join(keychain[1:])) # drop root key self.hparam_keychains[".".join(keychain[1:])] = None if len(node.values) == 1: set_value(self.root,keychain,node.values[0]) return False else: for val in node.values: new_root = set_value(deepcopy(self.root),keychain,val) new_tree = BFTreeExpander(new_root) return True # "expansion was performed" if isinstance(node, dict): for key,val in node.items(): if val is not None: new_keychain = keychain.copy() new_keychain.append(key) self.queue.append((val, new_keychain)) while len(self.queue) > 0: next_node, next_keychain = self.queue.popleft() expanded = self.expand(next_node, next_keychain) if expanded: # since we had to expand this tree further, # we can now remove it from the working set # pop w/ default None, instead of del, as this can get called repeatedly on way up self.roots.pop(self.id, None) return True # bubble up return False # no expansion performed def expand_config(orig_config): old_roots = [{'root': orig_config}] while True: old_ct = len(old_roots) new_roots = [] for input_root in old_roots: BFTreeExpander.reset_roots() bfte = BFTreeExpander(input_root) bfte.expand() new_roots.extend(bfte.get_roots()) if old_ct == len(new_roots): break old_roots = new_roots.copy() roots, keychains = [tree['root'] for tree in new_roots], BFTreeExpander.get_hparam_keychains() BFTreeExpander.reset_roots() BFTreeExpander.reset_keys() return roots, keychains ############ PyYAML Custom obj constructors/representers ############### def hparam_constructor(loader, node): fields = loader.construct_mapping(node, deep=True) hparam = HyperParameter(**fields) yield hparam def tuple_to_list_constructor(loader, node): return list(loader.construct_sequence(node, deep=True)) def hparam_representer(dumper, node): return dumper.represent_mapping(u'!HYPERPARAMETER', [("values",node.values)], flow_style=False ) # def load_config_file(path: str) -> Dict[str, Any]: def load_config_file(path: str) -> Tuple[list, list]: """Load a YAML file into a dict. Extensions accepted are `{.yml, .yaml}`. Arguments: path: The relative path to the YAML file to load. Returns: A dict version of the YAML file. """ yaml.add_constructor('!HYPERPARAMETER', hparam_constructor, yaml.FullLoader) yaml.add_representer(HyperParameter, hparam_representer) # HyperParameter.set_verbosity(args.verbose) file_ext = path.split('.')[-1] if file_ext in {'yml', 'yaml'}: with open(path, 'rb') as file: config = yaml.load(file, Loader=yaml.FullLoader) else: raise NotImplementedError('unrecognized file extension .{:s} for file {:s}'.format(file_ext, path)) # expanded_set, keychains = expand_config(config) return expand_config(config) # return config def typecheck_config(config: Dict[str, Any]) -> None: invalid_types = set() def recursive_typecheck(struct: Union[Dict[str, Any], Any]) -> bool: # Recurse through iterables if isinstance(struct, ITERABLE_TYPES): if isinstance(struct, dict): return all(map(recursive_typecheck, struct.values())) return all(map(recursive_typecheck, struct)) # Check against allowed types. Aggregate any found violations. else: if not isinstance(struct, BASIC_TYPES): invalid_types.add(type(struct)) return False return True if not recursive_typecheck(config): raise TypeError(f'config {config} contains invalid type(s) {invalid_types}') def save_config(path: str, config: Dict[str, Any]) -> None: try: typecheck_config(config) except TypeError as e: raise RuntimeError( [e, RuntimeError('Cannot cache runtime parameter values due to invalid type(s).')] ) # cache with open(path, 'w') as file: yaml.dump(config, file, default_flow_style=False)
31.398406
112
0.597005
3,904
0.495369
147
0.018652
503
0.063824
0
0
2,039
0.258724
acfc23f9ea827b83951d7b7cd523c92769d23ed2
5,223
py
Python
npword2vec/HuffmanTree.py
qiaoxiu/nlp
790234d559ed9d5cae5b10dd5013ebd8052b6db9
[ "Apache-2.0" ]
null
null
null
npword2vec/HuffmanTree.py
qiaoxiu/nlp
790234d559ed9d5cae5b10dd5013ebd8052b6db9
[ "Apache-2.0" ]
null
null
null
npword2vec/HuffmanTree.py
qiaoxiu/nlp
790234d559ed9d5cae5b10dd5013ebd8052b6db9
[ "Apache-2.0" ]
null
null
null
__author__ = 'multiangle' # 这是实现 霍夫曼树相关的文件, 主要用于 针对层次softmax进行 word2vec 优化方案的一种 ''' 至于 为什么要进行层次softmax 可以简单理解 因为词表很大 针对上完个类别单词进行softmax 计算量大 更新参数过多 无法训练,而采用softmax 层次化 只需要 计算几个有限单词的sigmod 就可以 更新参数也非常少 提高训练速度 什么是霍夫曼树 简单理解就是 将训练文本 进行词频统计 通过构建加权最短路径来构造二叉树 这样 词频高的 位置在前 词频低的位置在后 每一个 霍夫曼编码代表一个词 路径 并且是唯一 不是其他词的前缀 ''' import numpy as np class HuffmanTreeNode(): def __init__(self,value,possibility): # common part of leaf node and tree node # 词频概率,训练文本出现的次数 self.possibility = possibility # 左右子节点 self.left = None self.right = None # value of leaf node will be the word, and be # mid vector in tree node # 叶节点是学习的词向量 非叶子节点是中间变量 即 wx 与 xite self.value = value # the value of word # 存储霍夫曼码 self.Huffman = "" # store the huffman code def __str__(self): return 'HuffmanTreeNode object, value: {v}, possibility: {p}, Huffman: {h}'\ .format(v=self.value,p=self.possibility,h=self.Huffman) class HuffmanTree(): def __init__(self, word_dict, vec_len=15000): self.vec_len = vec_len # the length of word vector self.root = None # 所有词汇 word_dict_list = list(word_dict.values()) # 根据所有词汇信息 创建节点 node_list = [HuffmanTreeNode(x['word'],x['possibility']) for x in word_dict_list] # 构建霍夫曼树 self.build_tree(node_list) # self.build_CBT(node_list) # 生成霍夫曼树的霍夫曼编码 self.generate_huffman_code(self.root, word_dict) def build_tree(self,node_list): # node_list.sort(key=lambda x:x.possibility,reverse=True) # for i in range(node_list.__len__()-1)[::-1]: # top_node = self.merge(node_list[i],node_list[i+1]) # node_list.insert(i,top_node) # self.root = node_list[0] while node_list.__len__()>1: i1 = 0 # i1表示概率最小的节点 i2 = 1 # i2 概率第二小的节点 if node_list[i2].possibility < node_list[i1].possibility : [i1,i2] = [i2,i1] for i in range(2,node_list.__len__()): # 找到最小的两个节点 if node_list[i].possibility<node_list[i2].possibility : i2 = i if node_list[i2].possibility < node_list[i1].possibility : [i1,i2] = [i2,i1] #根据 叶节点1 和叶节点2 生成叶节点 也就是中间变量 其中 用来 存放xite top_node = self.merge(node_list[i1],node_list[i2]) # 删除节点1 和节点2 将 新生成的非叶节点进行 加入 以进行后续 循环构建霍夫曼树 if i1<i2: node_list.pop(i2) node_list.pop(i1) elif i1>i2: node_list.pop(i1) node_list.pop(i2) else: raise RuntimeError('i1 should not be equal to i2') node_list.insert(0,top_node) self.root = node_list[0] def build_CBT(self,node_list): # build a complete binary tree node_list.sort(key=lambda x:x.possibility,reverse=True) node_num = node_list.__len__() before_start = 0 while node_num>1 : for i in range(node_num>>1): top_node = self.merge(node_list[before_start+i*2],node_list[before_start+i*2+1]) node_list.append(top_node) if node_num%2==1: top_node = self.merge(node_list[before_start+i*2+2],node_list[-1]) node_list[-1] = top_node before_start = before_start + node_num node_num = node_num>>1 self.root = node_list[-1] def generate_huffman_code(self, node, word_dict): # # use recursion in this edition # if node.left==None and node.right==None : # word = node.value # code = node.Huffman # print(word,code) # word_dict[word]['Huffman'] = code # return -1 # # code = node.Huffman # if code==None: # code = "" # node.left.Huffman = code + "1" # node.right.Huffman = code + "0" # self.generate_huffman_code(node.left, word_dict) # self.generate_huffman_code(node.right, word_dict) # use stack butnot recursion in this edition # 左子树 编码是1 右子树 编码是0 先左子树 在右字数 设置编码链 stack = [self.root] while (stack.__len__()>0): node = stack.pop() # go along left tree while node.left or node.right : code = node.Huffman node.left.Huffman = code + "1" node.right.Huffman = code + "0" stack.append(node.right) node = node.left word = node.value code = node.Huffman # print(word,'\t',code.__len__(),'\t',node.possibility) word_dict[word]['Huffman'] = code def merge(self,node1,node2): # 新生成的非叶节点的词频是 俩个叶节点的加和 top_pos = node1.possibility + node2.possibility # 将非叶节点向量进行初始化 top_node = HuffmanTreeNode(np.zeros([1,self.vec_len]), top_pos) if node1.possibility >= node2.possibility : top_node.left = node1 top_node.right = node2 else: top_node.left = node2 top_node.right = node1 return top_node
35.290541
118
0.573234
5,321
0.876462
0
0
0
0
0
0
2,583
0.425465
acfe9f4e0aefbb7c974bcb3beaf946d90910c093
14,313
py
Python
grama/fit/fit_scikitlearn.py
Riya-1/py_grama
caafeac418ce0014b477e6feded06ccc1592b94d
[ "MIT" ]
13
2020-02-24T16:51:51.000Z
2022-03-30T18:56:55.000Z
grama/fit/fit_scikitlearn.py
zdelrosario/py_grama
43f1a76dc93dd33f02e8a7f8de3323894beefed0
[ "MIT" ]
78
2019-12-30T19:13:21.000Z
2022-02-23T18:17:54.000Z
grama/fit/fit_scikitlearn.py
Riya-1/py_grama
caafeac418ce0014b477e6feded06ccc1592b94d
[ "MIT" ]
7
2020-10-19T17:49:25.000Z
2021-08-15T20:46:52.000Z
__all__ = [ "fit_gp", "ft_gp", "fit_lm", "ft_lm", "fit_rf", "ft_rf", "fit_kmeans", "ft_kmeans", ] ## Fitting via sklearn package try: from sklearn.base import clone from sklearn.linear_model import LinearRegression from sklearn.gaussian_process import GaussianProcessRegressor from sklearn.gaussian_process.kernels import Kernel, RBF, ConstantKernel as Con from sklearn.cluster import KMeans from sklearn.ensemble import RandomForestRegressor except ModuleNotFoundError: raise ModuleNotFoundError("module sklearn not found") import grama as gr from copy import deepcopy from grama import add_pipe, pipe from pandas import concat, DataFrame, Series from toolz import curry from warnings import filterwarnings ## Helper functions and classes # -------------------------------------------------- def standardize_cols(df, ser_min, ser_max, var): """ @pre set(ser_min.index) == set(ser_max.index) """ df_std = df.copy() for v in var: den = ser_max[v] - ser_min[v] if den < 1e-16: den = 1 df_std[v] = (df_std[v] - ser_min[v]) / den return df_std def restore_cols(df, ser_min, ser_max, var): """ @pre set(ser_min.index) == set(ser_max.index) """ df_res = df.copy() for v in var: den = ser_max[v] - ser_min[v] if den < 1e-16: den = 1 df_res[v] = den * df[v] + ser_min[v] return df_res class FunctionGPR(gr.Function): def __init__(self, gpr, var, out, name, runtime, var_min, var_max): self.gpr = gpr # self.df_train = df_train self.var = var ## "Natural" outputs; what we're modeling self.out_nat = out ## Predicted outputs; mean and std self.out_mean = list(map(lambda s: s + "_mean", out)) self.out_sd = list(map(lambda s: s + "_sd", out)) self.out = self.out_mean + self.out_sd self.name = name self.runtime = runtime self.var_min = var_min self.var_max = var_max def eval(self, df): ## Check invariant; model inputs must be subset of df columns if not set(self.var).issubset(set(df.columns)): raise ValueError( "Model function `{}` var not a subset of given columns".format( self.name ) ) df_sd = standardize_cols(df, self.var_min, self.var_max, self.var) y, y_sd = self.gpr.predict(df_sd[self.var], return_std=True) return concat( ( DataFrame(data=y, columns=self.out_mean), DataFrame(data=y_sd, columns=self.out_sd), ), axis=1, ) def copy(self): func_new = FunctionGPR( self.gpr, self.df_train.copy(), self.var, self.out_nat, self.name, self.runtime, ) return func_new class FunctionRegressor(gr.Function): def __init__(self, regressor, var, out, name, runtime): """ Args: regressor (scikit Regressor): """ self.regressor = regressor self.var = var self.out = list(map(lambda s: s + "_mean", out)) self.name = name self.runtime = runtime def eval(self, df): ## Check invariant; model inputs must be subset of df columns if not set(self.var).issubset(set(df.columns)): raise ValueError( "Model function `{}` var not a subset of given columns".format( self.name ) ) ## Predict y = self.regressor.predict(df[self.var]) return DataFrame(data=y, columns=self.out) ## Fit GP model with sklearn # -------------------------------------------------- @curry def fit_gp( df, md=None, var=None, out=None, domain=None, density=None, kernels=None, seed=None, suppress_warnings=True, n_restart=5, alpha=1e-10, ): r"""Fit a gaussian process Fit a gaussian process to given data. Specify var and out, or inherit from an existing model. Note that the new model will have two outputs `y_mean, y_sd` for each original output `y`. The quantity `y_mean` is the best-fit value, while `y_sd` is a measure of predictive uncertainty. Args: df (DataFrame): Data for function fitting md (gr.Model): Model from which to inherit metadata var (list(str) or None): List of features or None for all except outputs out (list(str)): List of outputs to fit domain (gr.Domain): Domain for new model density (gr.Density): Density for new model seed (int or None): Random seed for fitting process kernels (sklearn.gaussian_process.kernels.Kernel or dict or None): Kernel for GP n_restart (int): Restarts for optimization alpha (float or iterable): Value added to diagonal of kernel matrix suppress_warnings (bool): Suppress warnings when fitting? Returns: gr.Model: A grama model with fitted function(s) Notes: - Wrapper for sklearn.gaussian_process.GaussianProcessRegressor """ if suppress_warnings: filterwarnings("ignore") n_obs, n_in = df.shape ## Infer fitting metadata, if available if not (md is None): domain = md.domain density = md.density out = md.out ## Check invariants if not set(out).issubset(set(df.columns)): raise ValueError("out must be subset of df.columns") ## Default input value if var is None: var = list(set(df.columns).difference(set(out))) ## Check more invariants set_inter = set(out).intersection(set(var)) if len(set_inter) > 0: raise ValueError( "out and var must be disjoint; intersect = {}".format(set_inter) ) if not set(var).issubset(set(df.columns)): raise ValueError("var must be subset of df.columns") ## Pre-process kernel selection if kernels is None: # Vectorize kernels = {o: None for o in out} elif isinstance(kernels, Kernel): kernels = {o: kernels for o in out} ## Pre-process data var_min = df[var].min() var_max = df[var].max() df_sd = standardize_cols(df, var_min, var_max, var) ## Construct gaussian process for each output functions = [] for output in out: # Define and fit model gpr = GaussianProcessRegressor( kernel=deepcopy(kernels[output]), random_state=seed, normalize_y=True, copy_X_train=True, n_restarts_optimizer=n_restart, alpha=alpha, ) gpr.fit(df_sd[var], df_sd[output]) name = "GP ({})".format(str(gpr.kernel_)) fun = FunctionGPR(gpr, var, [output], name, 0, var_min, var_max) functions.append(fun) ## Construct model return gr.Model(functions=functions, domain=domain, density=density) ft_gp = add_pipe(fit_gp) ## Fit random forest model with sklearn # -------------------------------------------------- @curry def fit_rf( df, md=None, var=None, out=None, domain=None, density=None, seed=None, suppress_warnings=True, **kwargs ): r"""Fit a random forest Fit a random forest to given data. Specify inputs and outputs, or inherit from an existing model. Args: df (DataFrame): Data for function fitting md (gr.Model): Model from which to inherit metadata var (list(str) or None): List of features or None for all except outputs out (list(str)): List of outputs to fit domain (gr.Domain): Domain for new model density (gr.Density): Density for new model seed (int or None): Random seed for fitting process suppress_warnings (bool): Suppress warnings when fitting? Keyword Arguments: n_estimators (int): criterion (int): max_depth (int or None): min_samples_split (int, float): min_samples_leaf (int, float): min_weight_fraction_leaf (float): max_features (int, float, string): max_leaf_nodes (int or None): min_impurity_decrease (float): min_impurity_split (float): bootstrap (bool): oob_score (bool): n_jobs (int or None): random_state (int): Returns: gr.Model: A grama model with fitted function(s) Notes: - Wrapper for sklearn.ensemble.RandomForestRegressor """ if suppress_warnings: filterwarnings("ignore") n_obs, n_in = df.shape ## Infer fitting metadata, if available if not (md is None): domain = md.domain density = md.density out = md.out ## Check invariants if not set(out).issubset(set(df.columns)): raise ValueError("out must be subset of df.columns") ## Default input value if var is None: var = list(set(df.columns).difference(set(out))) ## Check more invariants set_inter = set(out).intersection(set(var)) if len(set_inter) > 0: raise ValueError( "outputs and inputs must be disjoint; intersect = {}".format(set_inter) ) if not set(var).issubset(set(df.columns)): raise ValueError("var must be subset of df.columns") ## Construct gaussian process for each output functions = [] for output in out: rf = RandomForestRegressor(random_state=seed, **kwargs) rf.fit(df[var], df[output]) name = "RF" fun = FunctionRegressor(rf, var, [output], name, 0) functions.append(fun) ## Construct model return gr.Model(functions=functions, domain=domain, density=density) ft_rf = add_pipe(fit_rf) ## Fit linear model with sklearn # -------------------------------------------------- @curry def fit_lm( df, md=None, var=None, out=None, domain=None, density=None, seed=None, suppress_warnings=True, **kwargs ): r"""Fit a linear model Fit a linear model to given data. Specify inputs and outputs, or inherit from an existing model. Args: df (DataFrame): Data for function fitting md (gr.Model): Model from which to inherit metadata var (list(str) or None): List of features or None for all except outputs out (list(str)): List of outputs to fit domain (gr.Domain): Domain for new model density (gr.Density): Density for new model seed (int or None): Random seed for fitting process suppress_warnings (bool): Suppress warnings when fitting? Returns: gr.Model: A grama model with fitted function(s) Notes: - Wrapper for sklearn.ensemble.RandomForestRegressor """ if suppress_warnings: filterwarnings("ignore") n_obs, n_in = df.shape ## Infer fitting metadata, if available if not (md is None): domain = md.domain density = md.density out = md.out ## Check invariants if not set(out).issubset(set(df.columns)): raise ValueError("out must be subset of df.columns") ## Default input value if var is None: var = list(set(df.columns).difference(set(out))) ## Check more invariants set_inter = set(out).intersection(set(var)) if len(set_inter) > 0: raise ValueError( "outputs and inputs must be disjoint; intersect = {}".format(set_inter) ) if not set(var).issubset(set(df.columns)): raise ValueError("var must be subset of df.columns") ## Construct gaussian process for each output functions = [] for output in out: lm = LinearRegression(**kwargs) lm.fit(df[var], df[output]) name = "LM" fun = FunctionRegressor(lm, var, [output], name, 0) functions.append(fun) ## Construct model return gr.Model(functions=functions, domain=domain, density=density) ft_lm = add_pipe(fit_lm) ## Fit kmeans clustering model # -------------------------------------------------- @curry def fit_kmeans(df, var=None, colname="cluster_id", seed=None, **kwargs): r"""K-means cluster a dataset Create a cluster-labeling model on a dataset using the K-means algorithm. Args: df (DataFrame): Hybrid point results from gr.eval_hybrid() var (list or None): Variables in df on which to cluster. Use None to cluster on all variables. colname (string): Name of cluster id; will be output in cluster model. seed (int): Random seed for kmeans clustering Kwargs: n_clusters (int): Number of clusters to fit random_state (int or None): Returns: gr.Model: Model that labels input data Notes: - A wrapper for sklearn.cluster.KMeans References: Scikit-learn: Machine Learning in Python, Pedregosa et al. JMLR 12, pp. 2825-2830, 2011. Examples: >>> import grama as gr >>> from grama.data import df_stang >>> from grama.fit import ft_kmeans >>> X = gr.Intention() >>> md_cluster = ( >>> df_stang >>> >> ft_kmeans(var=["E", "mu"], n_clusters=2) >>> ) >>> ( >>> md_cluster >>> >> gr.ev_df(df_stang) >>> >> gr.tf_group_by(X.cluster_id) >>> >> gr.tf_summarize( >>> thick_mean=gr.mean(X.thick), >>> thick_sd=gr.sd(X.thick), >>> n=gr.n(X.index), >>> ) >>> ) """ ## Check invariants if var is None: var = list(df.columns).copy() else: var = list(var).copy() diff = set(var).difference(set(df.columns)) if len(diff) > 0: raise ValueError( "`var` must be subset of `df.columns`\n" "diff = {}".format(diff) ) ## Generate clustering kmeans = KMeans(random_state=seed, **kwargs).fit(df[var].values) ## Build grama model def fun_cluster(df): res = kmeans.predict(df[var].values) return DataFrame(data={colname: res}) md = gr.Model() >> gr.cp_vec_function(fun=fun_cluster, var=var, out=[colname]) return md ft_kmeans = add_pipe(fit_kmeans)
28.915152
96
0.593866
2,291
0.160064
0
0
10,080
0.704255
0
0
6,800
0.475093
acfed52bb6497ce5fb06e49460f94e77f3a7ee78
7,306
py
Python
VideoTranscriptClassification/video_indexer.py
MACEL94/media-services-video-indexer
b4076daa7a7cdad456ce696b50f77ce2f21ead22
[ "MIT" ]
54
2020-01-16T22:18:07.000Z
2022-03-24T15:58:16.000Z
VideoTranscriptClassification/video_indexer.py
MACEL94/media-services-video-indexer
b4076daa7a7cdad456ce696b50f77ce2f21ead22
[ "MIT" ]
10
2020-07-19T19:01:31.000Z
2022-02-09T09:49:00.000Z
VideoTranscriptClassification/video_indexer.py
MACEL94/media-services-video-indexer
b4076daa7a7cdad456ce696b50f77ce2f21ead22
[ "MIT" ]
43
2020-02-13T05:36:42.000Z
2022-03-09T15:39:57.000Z
# Original source code: https://github.com/bklim5/python_video_indexer_lib import os import re import time import datetime import requests def get_retry_after_from_message(message): match = re.search(r'Try again in (\d+) second', message or '') if match: return int(match.group(1)) return 30 # default to retry in 30 seconds class VideoIndexer(): def __init__(self, vi_subscription_key, vi_location, vi_account_id): self.vi_subscription_key = vi_subscription_key self.vi_location = vi_location self.vi_account_id = vi_account_id self.access_token = None self.access_token_timestamp = None self.video_name_to_id_dict = None self.get_access_token() def del_video(self, video_id): self.check_access_token() params = { 'accessToken': self.access_token } delete_video = requests.delete( 'https://api.videoindexer.ai/{loc}/Accounts/{acc_id}/Videos/{videoId}?{access_token}'.format( # NOQA E501 loc=self.vi_location, acc_id=self.vi_account_id, videoId=video_id, access_token=self.access_token ), params=params ) try: print(delete_video.json()) except Exception as ex: print("Response:", delete_video) return delete_video def get_access_token(self): print('Getting video indexer access token...') headers = { 'Ocp-Apim-Subscription-Key': self.vi_subscription_key } params = { 'allowEdit': 'true' } access_token_req = requests.get( 'https://api.videoindexer.ai/auth/{loc}/Accounts/{acc_id}/AccessToken'.format( # NOQA E501 loc=self.vi_location, acc_id=self.vi_account_id ), params=params, headers=headers ) access_token = access_token_req.text[1:-1] print('Access Token: {}'.format(access_token)) self.access_token = access_token self.access_token_timestamp = datetime.datetime.now() return access_token def get_all_videos_list(self): all_videos_list = [] done = False skip = 0 page_size = 200 while(not done): response = self.get_videos_list(page_size=page_size, skip=skip) all_videos_list.extend(response['results']) next_page = response['nextPage'] skip = next_page['skip'] page_size = next_page['pageSize'] done = next_page['done'] return all_videos_list def get_videos_list(self, page_size=25, skip=0): self.check_access_token() params = { 'accessToken': self.access_token, 'pageSize': page_size, 'skip': skip } print('Getting videos list..') get_videos_list = requests.get( 'https://api.videoindexer.ai/{loc}/Accounts/{acc_id}/Videos'.format( # NOQA E501 loc=self.vi_location, acc_id=self.vi_account_id ), params=params ) response = get_videos_list.json() return response def check_access_token(self): delta = datetime.datetime.now() - self.access_token_timestamp if delta > datetime.timedelta(minutes=50): self.get_access_token() def upload_to_video_indexer( self, video_url, name, force_upload_if_exists=False, video_language='English', streaming_preset='Default', indexing_preset='Default', verbose=False ): self.check_access_token() # file_name = os.path.basename(os.path.splitext(video_url)[0]) if self.video_name_to_id_dict is None: self.get_video_name_to_id_dict() if name in self.video_name_to_id_dict.keys(): if verbose: print("Video with the same name already exists in current Video Indexer account.") # NOQA E501 if not force_upload_if_exists: return self.video_name_to_id_dict[name] if verbose: print("'force_upload_if_exists' set to 'True' so uploading the file anyway.") if verbose: print('Uploading video to video indexer...') params = { 'streamingPreset': streaming_preset, 'indexingPreset': indexing_preset, 'language': video_language, 'name': name, 'accessToken': self.access_token } files = {} if "http" in video_url.lower(): params['videoUrl'] = video_url else: files = { 'file': open(video_url, 'rb') } retry = True while retry: upload_video_req = requests.post( 'https://api.videoindexer.ai/{loc}/Accounts/{acc_id}/Videos'.format( # NOQA E501 loc=self.vi_location, acc_id=self.vi_account_id ), params=params, files=files ) if upload_video_req.status_code == 200: retry = False break # hit throttling limit, sleep and retry if upload_video_req.status_code == 429: error_resp = upload_video_req.json() if verbose: print('Throttling limit hit. Error message: {}'.format( error_resp.get('message'))) retry_after = get_retry_after_from_message( error_resp.get('message')) time.sleep(retry_after + 1) continue if verbose: print('Error uploading video to video indexer: {}'.format( upload_video_req.json())) raise Exception('Error uploading video to video indexer') response = upload_video_req.json() return response['id'] def get_video_info(self, video_id, video_language='English', verbose=False): self.check_access_token() params = { 'accessToken': self.access_token, 'language': video_language } if verbose: print('Getting video info for: {}'.format(video_id)) get_video_info_req = requests.get( 'https://api.videoindexer.ai/{loc}/Accounts/{acc_id}/Videos/{video_id}/Index'.format( # NOQA E501 loc=self.vi_location, acc_id=self.vi_account_id, video_id=video_id ), params=params ) response = get_video_info_req.json() if response['state'] == 'Processing': if verbose: print('Video still processing, current status: {}'.format( response['videos'][0]['processingProgress'], )) return response def get_video_name_to_id_dict(self): all_videos = self.get_all_videos_list() names = [video['name'] for video in all_videos] ids = [video['id'] for video in all_videos] self.video_name_to_id_dict = dict(zip(names, ids)) return self.video_name_to_id_dict
33.668203
118
0.571722
6,952
0.951547
0
0
0
0
0
0
1,478
0.202299
acff916c9c13ec45d8705a7c78687da27d11f532
92
py
Python
parameters_8560.py
ksuhr1/CMPS183-hw3
d0450827912b7ec355a9e433c0c7e33d1b2610a0
[ "BSD-3-Clause" ]
null
null
null
parameters_8560.py
ksuhr1/CMPS183-hw3
d0450827912b7ec355a9e433c0c7e33d1b2610a0
[ "BSD-3-Clause" ]
null
null
null
parameters_8560.py
ksuhr1/CMPS183-hw3
d0450827912b7ec355a9e433c0c7e33d1b2610a0
[ "BSD-3-Clause" ]
null
null
null
password="pbkdf2(1000,20,sha512)$b24904a15adb4514$85f395bc9c1f6be8227d9f7540e54127cd4f0fdf"
46
91
0.891304
0
0
0
0
0
0
0
0
82
0.891304
acffa281e03e8947e11cd40b36de07ec4e8a852d
3,086
py
Python
app/__init__.py
calcutec/netbard
2c30979ad3ca1cee2f81c521942e8bffea6f81b2
[ "BSD-3-Clause" ]
null
null
null
app/__init__.py
calcutec/netbard
2c30979ad3ca1cee2f81c521942e8bffea6f81b2
[ "BSD-3-Clause" ]
6
2015-05-26T14:03:03.000Z
2015-05-26T20:22:18.000Z
app/__init__.py
calcutec/netbard
2c30979ad3ca1cee2f81c521942e8bffea6f81b2
[ "BSD-3-Clause" ]
null
null
null
import os from flask import Flask from flask.ext.sqlalchemy import SQLAlchemy from flask.ext.login import LoginManager from flask.ext.mail import Mail from config import ADMINS, MAIL_SERVER, MAIL_PORT, MAIL_USERNAME, \ MAIL_PASSWORD, SQLALCHEMY_DATABASE_URI from .momentjs import momentjs from flask.json import JSONEncoder from flask_wtf.csrf import CsrfProtect app = Flask(__name__) app.config.from_object('config') app.config['SQLALCHEMY_DATABASE_URI'] = SQLALCHEMY_DATABASE_URI db = SQLAlchemy(app) lm = LoginManager() lm.init_app(app) lm.login_view = 'login' lm.login_message = 'Please log in to access this page.' mail = Mail(app) CsrfProtect(app) app.config['OAUTH_CREDENTIALS'] = { 'facebook': { 'id': os.environ['FACEBOOK_AUTH'], 'secret': os.environ['FACEBOOK_AUTH_SECRET'] }, 'google': { 'id': os.environ['GOOGLE_AUTH'], 'secret': os.environ['GOOGLE_AUTH_SECRET'] } } class CustomJSONEncoder(JSONEncoder): """This class adds support for lazy translation texts to Flask's JSON encoder. This is necessary when flashing translated texts.""" def default(self, obj): from speaklater import is_lazy_string if is_lazy_string(obj): try: return unicode(obj) # python 2 except NameError: return str(obj) # python 3 return super(CustomJSONEncoder, self).default(obj) app.json_encoder = CustomJSONEncoder if not app.debug and MAIL_SERVER != '': import logging from logging.handlers import SMTPHandler credentials = None if MAIL_USERNAME or MAIL_PASSWORD: credentials = (MAIL_USERNAME, MAIL_PASSWORD) mail_handler = SMTPHandler((MAIL_SERVER, MAIL_PORT), 'no-reply@' + MAIL_SERVER, ADMINS, 'burtonblog failure', credentials) mail_handler.setLevel(logging.ERROR) app.logger.addHandler(mail_handler) if not app.debug and os.environ.get('HEROKU') is None: import logging from logging.handlers import RotatingFileHandler file_handler = RotatingFileHandler('tmp/burtonblog.log', 'a', 1 * 1024 * 1024, 10) file_handler.setLevel(logging.INFO) file_handler.setFormatter(logging.Formatter( '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')) app.logger.addHandler(file_handler) app.logger.setLevel(logging.INFO) app.logger.info('burtonblog startup') if os.environ.get('HEROKU') is not None: import logging stream_handler = logging.StreamHandler() app.logger.addHandler(stream_handler) app.logger.setLevel(logging.INFO) app.logger.info('burtonblog startup') app.jinja_env.globals['momentjs'] = momentjs app.config["S3_LOCATION"] = 'https://s3.amazonaws.com/netbardus/' app.config["S3_UPLOAD_DIRECTORY"] = 'user_imgs' app.config["S3_BUCKET"] = 'netbardus' app.config["AWS_ACCESS_KEY_ID"] = os.environ['AWS_ACCESS_KEY_ID'] app.config["AWS_SECRET_ACCESS_KEY"] = os.environ['AWS_SECRET_ACCESS_KEY'] from app import views, models
34.674157
79
0.697343
481
0.155865
0
0
0
0
0
0
741
0.240117
4a03ee9eb5b9f1623408d903facb0c0ce55d9557
2,904
py
Python
src/knarrow/cli/__main__.py
InCogNiTo124/knarrow
b0a19273a27e68899d982bcc0bf0938c60d3ec26
[ "Apache-2.0" ]
2
2021-10-10T11:12:53.000Z
2021-12-14T13:55:30.000Z
src/knarrow/cli/__main__.py
InCogNiTo124/knarrow
b0a19273a27e68899d982bcc0bf0938c60d3ec26
[ "Apache-2.0" ]
17
2021-09-30T21:51:28.000Z
2022-03-27T23:33:17.000Z
src/knarrow/cli/__main__.py
InCogNiTo124/knarrow
b0a19273a27e68899d982bcc0bf0938c60d3ec26
[ "Apache-2.0" ]
null
null
null
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from collections import Counter from functools import partial from pathlib import Path from knarrow import find_knee def gte_0(value): x = float(value) assert x >= 0.0 return x METHODS = [ "angle", "c_method", "distance", "distance_adjacent", "kneedle", "menger_anchored", "menger_successive", "ols_swiping", ] def get_parser(): parser = ArgumentParser(prog="knarrow", formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument( "-m", "--method", choices=(["all"] + METHODS), default="all", help="select the knee searching method" ) parser.add_argument( "--sort", action="store_true", help="sort the values before the knee search. By default is assumes the input is already sorted", ) parser.add_argument("--smoothing", default=0.0, type=gte_0, help="cublic spline smoothing parameter") parser.add_argument( "-d", "--delimiter", default=None, help="split the values with DELIMITER. If None, split by space" ) parser.add_argument( "-o", "--output", choices=["index", "value"], default="index", help=( "if output is `value`, this will return the row of the input file where the knee was detected. " "if output is `index`, the index of that row will be returned" ), ) parser.add_argument("files", nargs="*", default=["-"], help="a list of files. STDIN is denoted with `-`.") return parser def cli(method="all", files=None, sort=False, delimiter=None, output=None, smoothing=None): for filename in files: path = Path("/dev/stdin" if filename == "-" else filename) with path.open("r") as file: rows = list(map(str.strip, file)) split = partial(str.split, sep=delimiter) values = map(split, rows) numbers = list(tuple(float(value) for value in row) for row in values) indices = list(range(len(numbers))) if sort: indices.sort(key=lambda i: numbers[i]) key_function = (lambda x: x) if len(numbers[0]) == 1 else (lambda x: x[0]) numbers.sort(key=key_function) if method == "all": counter = Counter([find_knee(numbers, method=m, sort=False, smoothing=smoothing) for m in METHODS]) most_common = counter.most_common(1).pop(0) knee = most_common[0] else: knee = find_knee(numbers, method=method, sort=False, smoothing=smoothing) result = indices[knee] if output == "index" else rows[indices[knee]] print(path.name, result) return def main(): parser = get_parser() args = vars(parser.parse_args()) exit(cli(**args)) if __name__ == "__main__": main()
32.629213
115
0.608471
0
0
0
0
0
0
0
0
701
0.241391
4a074a679c554390585d0307ad19621a1d2bbeb2
2,129
py
Python
bitmovin_api_sdk/notifications/webhooks/encoding/encodings/encodings_api.py
jaythecaesarean/bitmovin-api-sdk-python
48166511fcb9082041c552ace55a9b66cc59b794
[ "MIT" ]
11
2019-07-03T10:41:16.000Z
2022-02-25T21:48:06.000Z
bitmovin_api_sdk/notifications/webhooks/encoding/encodings/encodings_api.py
jaythecaesarean/bitmovin-api-sdk-python
48166511fcb9082041c552ace55a9b66cc59b794
[ "MIT" ]
8
2019-11-23T00:01:25.000Z
2021-04-29T12:30:31.000Z
bitmovin_api_sdk/notifications/webhooks/encoding/encodings/encodings_api.py
jaythecaesarean/bitmovin-api-sdk-python
48166511fcb9082041c552ace55a9b66cc59b794
[ "MIT" ]
13
2020-01-02T14:58:18.000Z
2022-03-26T12:10:30.000Z
# coding: utf-8 from __future__ import absolute_import from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase from bitmovin_api_sdk.common.poscheck import poscheck_except from bitmovin_api_sdk.notifications.webhooks.encoding.encodings.finished.finished_api import FinishedApi from bitmovin_api_sdk.notifications.webhooks.encoding.encodings.error.error_api import ErrorApi from bitmovin_api_sdk.notifications.webhooks.encoding.encodings.transfer_error.transfer_error_api import TransferErrorApi from bitmovin_api_sdk.notifications.webhooks.encoding.encodings.live_input_stream_changed.live_input_stream_changed_api import LiveInputStreamChangedApi from bitmovin_api_sdk.notifications.webhooks.encoding.encodings.encoding_status_changed.encoding_status_changed_api import EncodingStatusChangedApi class EncodingsApi(BaseApi): @poscheck_except(2) def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None): # type: (str, str, str, BitmovinApiLoggerBase) -> None super(EncodingsApi, self).__init__( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger ) self.finished = FinishedApi( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger ) self.error = ErrorApi( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger ) self.transfer_error = TransferErrorApi( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger ) self.live_input_stream_changed = LiveInputStreamChangedApi( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger ) self.encoding_status_changed = EncodingStatusChangedApi( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger )
35.483333
152
0.699389
1,317
0.6186
0
0
1,284
0.6031
0
0
69
0.03241
4a07acc95882ce20e1dbf86d22afb6b05e9a8741
12,581
py
Python
notus/gtk_dbus/gtk_toaster.py
cnheider/notus
f284132e87d7b274c3ea239f216959987e670910
[ "Apache-2.0" ]
null
null
null
notus/gtk_dbus/gtk_toaster.py
cnheider/notus
f284132e87d7b274c3ea239f216959987e670910
[ "Apache-2.0" ]
null
null
null
notus/gtk_dbus/gtk_toaster.py
cnheider/notus
f284132e87d7b274c3ea239f216959987e670910
[ "Apache-2.0" ]
2
2021-02-04T15:04:16.000Z
2021-02-04T15:05:30.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import gi gi.require_version("Gtk", "3.0") from gi.repository import GdkPixbuf import time import dbus __author__ = "Christian Heider Nielsen" __doc__ = ( "Based on the notifications spec at: http://developer.gnome.org/notification-spec/" ) __version__ = "0.0.1" EXPIRES_DEFAULT = -1 EXPIRES_NEVER = 0 URGENCY_LOW = 0 URGENCY_NORMAL = 1 URGENCY_CRITICAL = 2 urgency_levels = [URGENCY_LOW, URGENCY_NORMAL, URGENCY_CRITICAL] IS_SETUP = False APP_NAME = f"unnamed_app_{time.time()}" HAVE_MAINLOOP = False NOTIFICATIONS_REGISTRY = {} __all__ = ["GtkToast"] def action_callback(nid, action, notifications_registry) -> None: """ :param nid: :type nid: :param action: :type action: :param notifications_registry: :type notifications_registry: :return: :rtype: """ nid, action = int(nid), str(action) try: n = notifications_registry[nid] except KeyError: # this message was created through some other program. return n.action_callback(action, notifications_registry) def closed_callback(nid, reason, notifications_registry) -> None: """ :param nid: :type nid: :param reason: :type reason: :param notifications_registry: :type notifications_registry: :return: :rtype: """ nid, reason = int(nid), int(reason) try: n = notifications_registry[nid] except KeyError: # this message was created through some other program. return n.closed_callback(n) del notifications_registry[nid] def no_op(*args): """No-op function for callbacks.""" pass # TODO: Object orient globals! class NotSetupError(RuntimeError): """Error raised if you try to communicate with the server before calling :func:`init`.""" pass class UnconstructedDbusObject(object): def __getattr__(self, name): raise NotSetupError("You must call toaster.init() first") dbus_interface = UnconstructedDbusObject() def init(app_name, mainloop=None): """Initialise the D-Bus connection. Must be called before you send any notifications, or retrieve server info or capabilities. To get callbacks from notifications, DBus must be integrated with a mainloop. There are three ways to achieve this: - Set a default mainloop (dbus.set_default_main_loop) before calling init() - Pass the mainloop parameter as a string 'glib' or 'qt' to integrate with those mainloops. (N.B. passing 'qt' currently makes that the default dbus mainloop, because that's the only way it seems to work.) - Pass the mainloop parameter a DBus compatible mainloop instance, such as dbus.mainloop.glib.DBusGMainLoop(). If you only want to display notifications, without receiving information back from them, you can safely omit mainloop.""" global APP_NAME, IS_SETUP, dbus_interface, HAVE_MAINLOOP if mainloop == "glib": from dbus.mainloop.glib import DBusGMainLoop mainloop = DBusGMainLoop() elif mainloop == "qt": from dbus.mainloop.qt import DBusQtMainLoop # For some reason, this only works if we make it the default mainloop # for dbus. That might make life tricky for anyone trying to juggle two # event loops, but I can't see any way round it. mainloop = DBusQtMainLoop(set_as_default=True) bus = dbus.SessionBus(mainloop=mainloop) dbus_obj = bus.get_object( "org.freedesktop.Notifications", "/org/freedesktop/Notifications" ) dbus_interface = dbus.Interface( dbus_obj, dbus_interface="org.freedesktop.Notifications" ) APP_NAME = app_name IS_SETUP = True if mainloop or dbus.get_default_main_loop(): HAVE_MAINLOOP = True dbus_interface.connect_to_signal("ActionInvoked", action_callback) dbus_interface.connect_to_signal("NotificationClosed", closed_callback) return True def is_initted(): """Has init() been called? Only exists for compatibility with pynotify.""" return IS_SETUP def get_app_name(): """Return appname. Only exists for compatibility with pynotify.""" return APP_NAME def de_init(): """Undo what init() does.""" global IS_SETUP, dbus_interface, HAVE_MAINLOOP IS_SETUP = False HAVE_MAINLOOP = False dbus_interface = UnconstructedDbusObject() # Retrieve basic server information -------------------------------------------- def get_server_caps(): """Get a list of server capabilities. These are short strings, listed `in the spec <http://people.gnome.org/~mccann/docs/notification-spec/notification-spec-latest.html#commands>`_. Vendors may also list extra capabilities with an 'x-' prefix, e.g. 'x-canonical-append'.""" return [str(x) for x in dbus_interface.GetCapabilities()] def get_server_info(): """Get basic information about the server.""" res = dbus_interface.GetServerInformation() return { "name": str(res[0]), "vendor": str(res[1]), "version": str(res[2]), "spec-version": str(res[3]), } class GtkToast(object): """A notification object. summary : str The title text message : str The body text, if the server has the 'body' capability. icon : str Path to an icon image, or the name of a stock icon. Stock icons available in Ubuntu are `listed here <https://wiki.ubuntu.com/NotificationDevelopmentGuidelines #How_do_I_get_these_slick_icons>`_. You can also set an icon from data in your application - see :meth:`set_icon_from_pixbuf`.""" _id = 0 _timeout = -1 # -1 = server default settings _closed_callback = no_op def __init__(self, title, body="", *, icon=""): self.title = title self.body = body self._hints = {} if isinstance(icon, GdkPixbuf.Pixbuf): self._icon = "" self.set_hint("icon_data", icon) else: self._icon = icon self._actions = {} self._data = {} # Any data the user wants to attach def show(self): """Ask the server to show the notification. Call this after you have finished setting any parameters of the notification that you want.""" nid = dbus_interface.Notify( APP_NAME, # app_name (spec names) self._id, # replaces_id self._icon, # app_icon self.title, # summary self.body, # body self._make_actions_array(), # actions self._hints, # hints self._timeout, # expire_timeout ) self._id = int(nid) if HAVE_MAINLOOP: NOTIFICATIONS_REGISTRY[self._id] = self return True def update(self, title, body="", *, icon=None): """Replace the summary and body of the notification, and optionally its icon. You should call :meth:`show` again after this to display the updated notification.""" self.title = title self.body = body if icon is not None: self._icon = icon def close(self): """Ask the server to close this notification.""" if self._id != 0: dbus_interface.CloseNotification(self._id) def set_hint(self, key, value): """n.set_hint(key, value) <--> n.hints[key] = value See `hints in the spec <http://people.gnome.org/~mccann/docs/notification-spec/notification-spec-latest .html#hints>`_. Only exists for compatibility with pynotify.""" self._hints[key] = value set_hint_string = set_hint_int32 = set_hint_double = set_hint def set_hint_byte(self, key, value): """Set a hint with a dbus byte value. The input value can be an integer or a bytes string of length 1.""" self._hints[key] = dbus.Byte(value) def set_urgency(self, level): """Set the urgency level to one of URGENCY_LOW, URGENCY_NORMAL or URGENCY_CRITICAL.""" if level not in urgency_levels: raise ValueError("Unknown urgency level specified", level) self.set_hint_byte("urgency", level) def set_category(self, category): """Set the 'category' hint for this notification. See `categories in the spec <http://people.gnome.org/~mccann/docs/notification-spec/notification-spec -latest.html#categories>`_.""" self._hints["category"] = category def set_timeout(self, timeout): """Set the display duration in milliseconds, or one of the special values EXPIRES_DEFAULT or EXPIRES_NEVER. This is a request, which the server might ignore. Only exists for compatibility with pynotify; you can simply set:: n.timeout = 5000""" if not isinstance(timeout, int): raise TypeError("timeout value was not int", timeout) self._timeout = timeout def get_timeout(self): """Return the timeout value for this notification. Only exists for compatibility with pynotify; you can inspect the timeout attribute directly.""" return self._timeout def add_action(self, action, label, callback, user_data=None): """Add an action to the notification. Check for the 'actions' server capability before using this. action : str A brief key. label : str The text displayed on the action button callback : callable A function taking at 2-3 parameters: the Notification object, the action key and (if specified) the user_data. user_data : An extra argument to pass to the callback.""" self._actions[action] = (label, callback, user_data) def _make_actions_array(self): """Make the actions array to send over DBus.""" arr = [] for action, (label, callback, user_data) in self._actions.items(): arr.append(action) arr.append(label) return arr def _action_callback(self, action): """Called when the user selects an action on the notification, to dispatch it to the relevant user-specified callback.""" try: label, callback, user_data = self._actions[action] except KeyError: return if user_data is None: callback(self, action) else: callback(self, action, user_data) def connect(self, event, callback): """Set the callback for the notification closing; the only valid value for event is 'closed' (the parameter is kept for compatibility with pynotify). The callback will be called with the :class:`Notification` instance.""" if event != "closed": raise ValueError("'closed' is the only valid value for event", event) self._closed_callback = callback def set_data(self, key, value): """n.set_data(key, value) <--> n.data[key] = value Only exists for compatibility with pynotify.""" self._data[key] = value def get_data(self, key): """n.get_data(key) <--> n.data[key] Only exists for compatibility with pynotify.""" return self._data[key] def set_icon_from_pixbuf(self, icon): """Set a custom icon from a GdkPixbuf.""" self._hints["icon_data"] = self._get_icon_struct(icon) @staticmethod def _get_icon_struct(icon): return ( icon.get_width(), icon.get_height(), icon.get_rowstride(), icon.get_has_alpha(), icon.get_bits_per_sample(), icon.get_n_channels(), dbus.ByteArray(icon.get_pixels()), ) def set_location(self, x, y): """Set the notification location as (x, y), if the server supports it.""" if (not isinstance(x, int)) or (not isinstance(y, int)): raise TypeError("x and y must both be ints", (x, y)) self._hints["x"] = x self._hints["y"] = y if __name__ == "__main__": import gi gi.require_version("Gtk", "3.0") from gi.repository import Gtk init("Test") helper = Gtk.Button() a_icon = helper.render_icon(Gtk.STOCK_DIALOG_INFO, Gtk.IconSize.DIALOG) t = GtkToast("Title", "Body") t.set_icon_from_pixbuf(a_icon) for i in range(10): t.title = f"Title{i}" t.body = f"Body{i}" t.show() time.sleep(0.1) if i == 4: a_icon = helper.render_icon(Gtk.STOCK_DIALOG_QUESTION, Gtk.IconSize.DIALOG) t.set_icon_from_pixbuf(a_icon)
30.835784
111
0.642636
7,169
0.569828
0
0
323
0.025674
0
0
6,137
0.487799
4a0addc7ea0061a30f327c50cf69e8d1c80df2ec
2,863
py
Python
get_Exploitdb_CSV_SUPERSEDED.py
NadimKawwa/CybersecurityThreatIdentification
e088dbb861342676337b4c9d385e6abfb6463291
[ "MIT" ]
3
2021-01-15T10:28:54.000Z
2021-11-09T17:55:45.000Z
get_Exploitdb_CSV_SUPERSEDED.py
NadimKawwa/CybersecurityThreatIdentification
e088dbb861342676337b4c9d385e6abfb6463291
[ "MIT" ]
null
null
null
get_Exploitdb_CSV_SUPERSEDED.py
NadimKawwa/CybersecurityThreatIdentification
e088dbb861342676337b4c9d385e6abfb6463291
[ "MIT" ]
2
2021-02-05T17:35:48.000Z
2021-04-23T18:56:21.000Z
from time import sleep from pymongo import MongoClient from FakePersona import getPage base_url = "https://www.exploit-db.com" def getExploitCategories(): #access page as fake persona soup = getPage(base_url) #find all list items <li> categories = soup.find("ul", {"class":"w-nav-list"}).findAll("li", {"class":"level_1"})[1] #get anchor categories = [i.find("a")['href'] for i in categories.findAll("li")] return categories def getCategoryTable(pageSoup): table = pageSoup.find("table", {"class": "exploit_list"}).findAll("tr") return table def streamData(data,collection): client = MongoClient(host='localhost', port=27017) db = client.exploits db[collection].insert(data) def streamExploitTableSoup(tableSoup, category, database): for i in tableSoup: try: rows = i.findAll("td") rows = rows[0:1] + rows[3::] if len(rows) == 5: date = rows[0].getText() verification = rows[1].find("img")['title'].strip() exploitLink = rows[2].find("a") title,link= exploitLink.getText().replace("\n","").strip(), exploitLink['href'] if "-" in title: appattack = title.split("-") application = appattack[0] attack = appattack[1] else: application = title attack = title platform = rows[3].find("a").getText().replace("\n","").strip() author = rows[4].find("a").getText().replace("\n","").strip() #datetime = date.split("-") #data = {"date":{"fulldate":date,"year":datetime[0], "month":datetime[1], "day":datetime[2]}, # "attack":{"application":application,"vector":attack}, # "platform":platform, "author":author, "link":link, "verification":verification} #streamData(data=data, collection=database) print("{0},{1},{2},{3},{4},{5}".format(date, application, attack,platform, author, verification, link)) except: pass def crawlCategoryTables(categoryLink): category = categoryLink.split("/")[-2] categoryPage =getPage(categoryLink) lastPage = int(categoryPage.find("main").find("div", {"class":"pagination"}).findAll("a")[-1]['href'].split("=")[-1]) for i in range(1,(lastPage+1)): if i % 20 == 0: sleep(60) newUrl = categoryLink + "?order_by=date_published&order=desc&pg="+str(i) ts = getCategoryTable(getPage(newUrl)) streamExploitTableSoup(ts, category = category, database =category) if __name__ == "__main__": exploitCategories = getExploitCategories() crawlCategoryTables(exploitCategories[3])
34.083333
121
0.567586
0
0
0
0
0
0
0
0
677
0.236465
4a0c17dcadf4678d289fa2db3d23b33230f5a519
2,303
py
Python
example/django_example/polls/tests.py
dmsimard/dynaconf
ec394ab07e3b522879c8be678c65ebeb05fc2b59
[ "MIT" ]
null
null
null
example/django_example/polls/tests.py
dmsimard/dynaconf
ec394ab07e3b522879c8be678c65ebeb05fc2b59
[ "MIT" ]
null
null
null
example/django_example/polls/tests.py
dmsimard/dynaconf
ec394ab07e3b522879c8be678c65ebeb05fc2b59
[ "MIT" ]
null
null
null
from django.conf import settings from django.test import TestCase # Create your tests here. class SettingsTest(TestCase): def test_settings(self): self.assertEqual(settings.SERVER, 'prodserver.com') self.assertEqual( settings.STATIC_URL, '/changed/in/settings.toml/by/dynaconf/') self.assertEqual(settings.USERNAME, 'admin_user_from_env') self.assertEqual(settings.PASSWORD, 'My5up3r53c4et') self.assertEqual(settings.get('PASSWORD'), 'My5up3r53c4et') self.assertEqual(settings.FOO, 'It overrides every other env') with settings.using_env('development'): self.assertEqual(settings.SERVER, 'devserver.com') self.assertEqual(settings.PASSWORD, False) self.assertEqual(settings.USERNAME, 'admin_user_from_env') self.assertEqual(settings.FOO, 'It overrides every other env') self.assertEqual(settings.SERVER, 'prodserver.com') self.assertEqual(settings.PASSWORD, 'My5up3r53c4et') self.assertEqual(settings.USERNAME, 'admin_user_from_env') self.assertEqual(settings.FOO, 'It overrides every other env') with settings.using_env('staging'): self.assertEqual(settings.SERVER, 'stagingserver.com') self.assertEqual(settings.PASSWORD, False) self.assertEqual(settings.USERNAME, 'admin_user_from_env') self.assertEqual(settings.FOO, 'It overrides every other env') self.assertEqual(settings.SERVER, 'prodserver.com') self.assertEqual(settings.PASSWORD, 'My5up3r53c4et') self.assertEqual(settings.USERNAME, 'admin_user_from_env') self.assertEqual(settings.FOO, 'It overrides every other env') with settings.using_env('customenv'): self.assertEqual(settings.SERVER, 'customserver.com') self.assertEqual(settings.PASSWORD, False) self.assertEqual(settings.USERNAME, 'admin_user_from_env') self.assertEqual(settings.FOO, 'It overrides every other env') self.assertEqual(settings.SERVER, 'prodserver.com') self.assertEqual(settings.PASSWORD, 'My5up3r53c4et') self.assertEqual(settings.USERNAME, 'admin_user_from_env') self.assertEqual(settings.FOO, 'It overrides every other env')
46.06
74
0.696049
2,207
0.958315
0
0
0
0
0
0
656
0.284846
4a0d70ed28500ae0edbad8023f3ee20304c7c976
869
py
Python
docker/dempcap/pcapminey/core/ThreadPool/Pool.py
JakubOrzol/dockerfiles
d04ead31d053dbe62b1e98b33e3a2852e335b41c
[ "MIT" ]
203
2016-03-02T14:13:34.000Z
2022-03-30T06:08:56.000Z
docker/dempcap/pcapminey/core/ThreadPool/Pool.py
Axonius/dockerfiles
f2135e9abb468ee8db339ec27b2ba737acbbaef6
[ "MIT" ]
7,201
2018-12-24T17:14:17.000Z
2022-03-31T13:39:12.000Z
docker/dempcap/pcapminey/core/ThreadPool/Pool.py
Axonius/dockerfiles
f2135e9abb468ee8db339ec27b2ba737acbbaef6
[ "MIT" ]
94
2018-12-17T10:59:21.000Z
2022-03-29T12:59:30.000Z
# -*- coding: utf8 -*- __author__ = 'Viktor Winkelmann' from Queue import Queue from Worker import Worker class Pool: def __init__(self, size): self.size = size self.workers = [] self.tasks = Queue() def _removeDeadWorkers(self): self.workers = [w for w in self.workers if w.isAlive()] def map_async(self, func, objects, callback): self._removeDeadWorkers() if not len(self.workers) == 0: raise Exception('ThreadPool is still working! Adding new jobs is not allowed!') for object in objects: self.tasks.put((func, object, callback)) for id in range(self.size): self.workers.append(Worker(id, self.tasks)) for worker in self.workers: worker.start() def join(self): for worker in self.workers: worker.join()
27.15625
91
0.604143
761
0.875719
0
0
0
0
0
0
103
0.118527
4a0dfb98063d6ae4066c076c37f14fe15e355d66
1,792
py
Python
tabnet/download_prepare_covertype.py
kiss2u/google-research
2cd66234656f9e2f4218ed90a2d8aa9cf3139093
[ "Apache-2.0" ]
1
2020-02-10T12:50:17.000Z
2020-02-10T12:50:17.000Z
tabnet/download_prepare_covertype.py
kiss2u/google-research
2cd66234656f9e2f4218ed90a2d8aa9cf3139093
[ "Apache-2.0" ]
7
2021-08-25T16:15:53.000Z
2022-02-10T03:26:55.000Z
tabnet/download_prepare_covertype.py
kiss2u/google-research
2cd66234656f9e2f4218ed90a2d8aa9cf3139093
[ "Apache-2.0" ]
1
2021-02-11T06:11:31.000Z
2021-02-11T06:11:31.000Z
# coding=utf-8 # Copyright 2020 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Downloads and prepares the Forest Covertype dataset.""" import gzip import os import shutil import pandas as pd from sklearn.model_selection import train_test_split import wget url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/covtype/covtype.data.gz' os.mkdir('./data') filename = wget.download(url) with gzip.open(filename, 'rb') as f_in: with open('data/covtype.csv', 'wb') as f_out: shutil.copyfileobj(f_in, f_out) df = pd.read_csv('data/covtype.csv') n_total = len(df) # Train, val and test split follows # Rory Mitchell, Andrey Adinets, Thejaswi Rao, and Eibe Frank. # Xgboost: Scalable GPU accelerated learning. arXiv:1806.11248, 2018. train_val_indices, test_indices = train_test_split( range(n_total), test_size=0.2, random_state=0) train_indices, val_indices = train_test_split( train_val_indices, test_size=0.2 / 0.6, random_state=0) traindf = df.iloc[train_indices] valdf = df.iloc[val_indices] testdf = df.iloc[test_indices] traindf = traindf.sample(frac=1) traindf.to_csv('data/train.csv', index=False, header=False) valdf.to_csv('data/val.csv', index=False, header=False) testdf.to_csv('data/test.csv', index=False, header=False)
34.461538
89
0.760045
0
0
0
0
0
0
0
0
997
0.556362
4a0e887c11bff05cc63fa41b81253f35cd3d4db2
845
py
Python
mockapi/test/urls.py
AKSharma01/mock_form
e21ac891fd0f31be37329351ca1f500b512f6251
[ "Apache-2.0" ]
null
null
null
mockapi/test/urls.py
AKSharma01/mock_form
e21ac891fd0f31be37329351ca1f500b512f6251
[ "Apache-2.0" ]
null
null
null
mockapi/test/urls.py
AKSharma01/mock_form
e21ac891fd0f31be37329351ca1f500b512f6251
[ "Apache-2.0" ]
null
null
null
from flask import Flask, request, render_template, url_for from views import * app = Flask(__name__) app.secret_key = "mockapi.dev" urls = [ ('/', ['GET'], Start.as_view('view')), ('/login', ['GET','POST'], Log.as_view('log_alllist')), #- Login into mockapi ('/logout', ['GET'], Logout.as_view('logout')), #- Logout ('/register', ['POST'], Reg.as_view('reg_alllist')), #- Register user to create their mockform ('/mockapi', ['GET','POST'], Dashboard.as_view('dashboard')), # List all the mock API s ('/mockapi/new', ['POST'], CreateForm.as_view('createnewform')), #- New Mock API Form ('/mockapi/edit/<slug>/<version>', ['GET','POST','PUT'], EditJson.as_view('editslug')), #- Edit {{version}} ('/mockapi/<slug>/<version>', ['GET'], ViewJson.as_view('viewmock')) #- View mock apis version wise ]
52.8125
108
0.618935
0
0
0
0
0
0
0
0
444
0.525444