hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4043f84908b97607d02cc9c6faf2b455d08e20a4 | 1,055 | py | Python | scripts/commands/html/actions/search.py | stevekineeve88/orb | 284cc78659e88e85e8773599da3bda382a8bb833 | [
"MIT"
] | null | null | null | scripts/commands/html/actions/search.py | stevekineeve88/orb | 284cc78659e88e85e8773599da3bda382a8bb833 | [
"MIT"
] | null | null | null | scripts/commands/html/actions/search.py | stevekineeve88/orb | 284cc78659e88e85e8773599da3bda382a8bb833 | [
"MIT"
] | null | null | null | import click
import requests
from bs4 import BeautifulSoup
from modules.Word.managers.DictionaryManager import DictionaryManager
import re
| 30.142857 | 70 | 0.6 |
404408dcaaf3ec9278595ad0836bc4bc90af7ec0 | 81 | py | Python | asf_search/constants/DATASET/__init__.py | jhkennedy/Discovery-asf_search | 4ec45e8a85cd626ea92f83937df9f8f04e0f7f4f | [
"BSD-3-Clause"
] | null | null | null | asf_search/constants/DATASET/__init__.py | jhkennedy/Discovery-asf_search | 4ec45e8a85cd626ea92f83937df9f8f04e0f7f4f | [
"BSD-3-Clause"
] | 1 | 2021-04-01T16:30:56.000Z | 2021-04-01T16:30:56.000Z | asf_search/constants/DATASET/__init__.py | jhkennedy/Discovery-asf_search | 4ec45e8a85cd626ea92f83937df9f8f04e0f7f4f | [
"BSD-3-Clause"
] | null | null | null | """Datasets to be used in search and related functions"""
from .DATASET import * | 27 | 57 | 0.740741 |
404411fc8cdef43afe8b983d66104ed1efd7c616 | 16,089 | py | Python | cell2cell/plotting/cci_plot.py | ckmah/cell2cell | ce18bbb63e12f9b1da8699567dec9a2a8b78f824 | [
"BSD-3-Clause"
] | 16 | 2020-09-30T01:53:43.000Z | 2022-03-25T09:58:54.000Z | cell2cell/plotting/cci_plot.py | ckmah/cell2cell | ce18bbb63e12f9b1da8699567dec9a2a8b78f824 | [
"BSD-3-Clause"
] | 2 | 2021-08-09T21:26:54.000Z | 2021-11-08T14:47:39.000Z | cell2cell/plotting/cci_plot.py | ckmah/cell2cell | ce18bbb63e12f9b1da8699567dec9a2a8b78f824 | [
"BSD-3-Clause"
] | 3 | 2021-11-08T07:47:44.000Z | 2022-03-30T18:40:00.000Z | # -*- coding: utf-8 -*-
import matplotlib as mpl
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib import pyplot as plt
from cell2cell.clustering import compute_linkage
from cell2cell.preprocessing.manipulate_dataframes import check_symmetry
from cell2cell.plotting.aesthetics import map_colors_to_metadata
def clustermap_cci(interaction_space, method='ward', optimal_leaf=True, metadata=None, sample_col='#SampleID',
group_col='Groups', meta_cmap='gist_rainbow', colors=None, excluded_cells=None, title='',
cbar_title='CCI score', cbar_fontsize=18, filename=None, **kwargs):
'''Generates a clustermap (heatmap + dendrograms from a hierarchical
clustering) based on CCI scores of cell-cell pairs.
Parameters
----------
interaction_space : cell2cell.core.interaction_space.InteractionSpace
Interaction space that contains all a distance matrix after running the
the method compute_pairwise_cci_scores. Alternatively, this object
can be a numpy-array or a pandas DataFrame. Also, a
SingleCellInteractions or a BulkInteractions object after running
the method compute_pairwise_cci_scores.
method : str, default='ward'
Clustering method for computing a linkage as in
scipy.cluster.hierarchy.linkage
optimal_leaf : boolean, default=True
Whether sorting the leaf of the dendrograms to have a minimal distance
between successive leaves. For more information, see
scipy.cluster.hierarchy.optimal_leaf_ordering
metadata : pandas.Dataframe, default=None
Metadata associated with the cells, cell types or samples in the
matrix containing CCI scores. If None, cells will not be colored
by major groups.
sample_col : str, default='#SampleID'
Column in the metadata for the cells, cell types or samples
in the matrix containing CCI scores.
group_col : str, default='Groups'
Column in the metadata containing the major groups of cells, cell types
or samples in the matrix with CCI scores.
meta_cmap : str, default='gist_rainbow'
Name of the color palette for coloring the major groups of cells.
colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells. If colors is specified, meta_cmap will be
ignored.
excluded_cells : list, default=None
List containing cell names that are present in the interaction_space
object but that will be excluded from this plot.
title : str, default=''
Title of the clustermap.
cbar_title : str, default='CCI score'
Title for the colorbar, depending on the score employed.
cbar_fontsize : int, default=18
Font size for the colorbar title as well as labels for axes X and Y.
filename : str, default=None
Path to save the figure of the elbow analysis. If None, the figure is not
saved.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
hier : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
'''
if hasattr(interaction_space, 'distance_matrix'):
print('Interaction space detected as an InteractionSpace class')
distance_matrix = interaction_space.distance_matrix
space_type = 'class'
elif (type(interaction_space) is np.ndarray) or (type(interaction_space) is pd.core.frame.DataFrame):
print('Interaction space detected as a distance matrix')
distance_matrix = interaction_space
space_type = 'matrix'
elif hasattr(interaction_space, 'interaction_space'):
print('Interaction space detected as a Interactions class')
if not hasattr(interaction_space.interaction_space, 'distance_matrix'):
raise ValueError('First run the method compute_pairwise_interactions() in your interaction' + \
' object to generate a distance matrix.')
else:
interaction_space = interaction_space.interaction_space
distance_matrix = interaction_space.distance_matrix
space_type = 'class'
else:
raise ValueError('First run the method compute_pairwise_interactions() in your interaction' + \
' object to generate a distance matrix.')
# Drop excluded cells
if excluded_cells is not None:
df = distance_matrix.loc[~distance_matrix.index.isin(excluded_cells),
~distance_matrix.columns.isin(excluded_cells)]
else:
df = distance_matrix
# Check symmetry to get linkage
symmetric = check_symmetry(df)
if (not symmetric) & (type(interaction_space) is pd.core.frame.DataFrame):
assert set(df.index) == set(df.columns), 'The distance matrix does not have the same elements in rows and columns'
# Obtain info for generating plot
linkage = _get_distance_matrix_linkages(df=df,
kwargs=kwargs,
method=method,
optimal_ordering=optimal_leaf,
symmetric=symmetric
)
kwargs_ = kwargs.copy()
# PLOT CCI MATRIX
if space_type == 'class':
df = interaction_space.interaction_elements['cci_matrix']
else:
df = distance_matrix
if excluded_cells is not None:
df = df.loc[~df.index.isin(excluded_cells),
~df.columns.isin(excluded_cells)]
# Colors
if metadata is not None:
col_colors = map_colors_to_metadata(metadata=metadata,
ref_df=df,
colors=colors,
sample_col=sample_col,
group_col=group_col,
cmap=meta_cmap)
if not symmetric:
row_colors = col_colors
else:
row_colors = None
else:
col_colors = None
row_colors = None
# Plot hierarchical clustering (triangular)
hier = _plot_triangular_clustermap(df=df,
symmetric=symmetric,
linkage=linkage,
col_colors=col_colors,
row_colors=row_colors,
title=title,
cbar_title=cbar_title,
cbar_fontsize=cbar_fontsize,
**kwargs_)
if ~symmetric:
hier.ax_heatmap.set_xlabel('Receiver cells', fontsize=cbar_fontsize)
hier.ax_heatmap.set_ylabel('Sender cells', fontsize=cbar_fontsize)
if filename is not None:
plt.savefig(filename, dpi=300,
bbox_inches='tight')
return hier
def _get_distance_matrix_linkages(df, kwargs, method='ward', optimal_ordering=True, symmetric=None):
'''Computes linkages for the CCI matrix.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores in a form of distances (that is, smaller
values represent stronger interactions). Diagonal must be filled
by zeros.
kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
method : str, default='ward'
Clustering method for computing a linkage as in
scipy.cluster.hierarchy.linkage
optimal_ordering : boolean, default=True
Whether sorting the leaf of the dendrograms to have a minimal distance
between successive leaves. For more information, see
scipy.cluster.hierarchy.optimal_leaf_ordering
symmetric : boolean, default=None
Whether df is symmetric.
Returns
-------
linkage : ndarray
The hierarchical clustering of cells encoded as a linkage matrix.
'''
if symmetric is None:
symmetric = check_symmetry(df)
if symmetric:
if 'col_cluster' in kwargs.keys():
kwargs['row_cluster'] = kwargs['col_cluster']
if kwargs['col_cluster']:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
elif 'row_cluster' in kwargs.keys():
if kwargs['row_cluster']:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
else:
linkage = compute_linkage(df, method=method, optimal_ordering=optimal_ordering)
else:
linkage = None
return linkage
def _triangularize_distance_matrix(df, linkage=None, symmetric=None, **kwargs):
'''Generates a mask to plot the upper triangle of the CCI matrix.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores. Must be a symmetric matrix.
linkage : ndarray, default=None
The hierarchical clustering of cells encoded as a linkage matrix.
symmetric : boolean, default=None
Whether df is symmetric.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
mask : ndarray
Mask that contains ones in the places to be hidden in the clustermap.
Only the diagonal and the upper triangle are not masked (contain
zeros).
'''
if symmetric is None:
symmetric = check_symmetry(df)
# Triangular matrix
if symmetric:
order_map = dict()
if linkage is None:
mask = np.ones((df.shape[0], df.shape[1]))
for i in range(mask.shape[0]):
for j in range(i, mask.shape[1]):
mask[i, j] = 0
else:
# Plot hierarchical clustering for getting indexes according to linkage
hier = sns.clustermap(df,
col_linkage=linkage,
row_linkage=linkage,
**kwargs
)
plt.close()
ind_order = hier.dendrogram_col.reordered_ind
mask = np.zeros((df.shape[0], df.shape[1]))
for i, ind in enumerate(ind_order):
order_map[i] = ind
filter_list = [order_map[j] for j in range(i)]
mask[ind, filter_list] = 1
else:
mask = None
return mask
def _plot_triangular_clustermap(df, symmetric=None, linkage=None, mask=None, col_colors=None, row_colors=None,
title='', cbar_title='CCI score', cbar_fontsize=12, **kwargs):
'''Plots a triangular clustermap based on a mask.
Parameters
----------
df : pandas.DataFrame
Contains the CCI scores. Must be a symmetric matrix.
linkage : ndarray, default=None
The hierarchical clustering of cells encoded as a linkage matrix.
mask : ndarray, default=None
Mask that contains ones in the places to be hidden in the clustermap.
Only the diagonal and the upper triangle are not masked (contain
zeros). If None, a mask will be computed based on the CCI matrix
symmetry.
col_colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells in the columns.
row_colors : dict, default=None
Dictionary containing tuples in the RGBA format for indicating colors
of major groups of cells in the rows.
title : str, default=''
Title of the clustermap.
cbar_title : str, default='CCI score'
Title for the colorbar, depending on the score employed.
cbar_fontsize : int, default=12
Font size for the colorbar title as well as labels for axes X and Y.
**kwargs : dict
Dictionary containing arguments for the seaborn.clustermap function.
Returns
-------
hier : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
'''
if symmetric is None:
symmetric = check_symmetry(df)
if mask is None:
mask = _triangularize_distance_matrix(df=df,
linkage=linkage,
symmetric=symmetric,
**kwargs
)
hier = sns.clustermap(df,
col_linkage=linkage,
row_linkage=linkage,
mask=mask,
col_colors=col_colors,
row_colors=row_colors,
**kwargs
)
hier = _move_xticks_triangular_clustermap(clustermap=hier,
symmetric=symmetric
)
# Title
if len(title) > 0:
hier.ax_col_dendrogram.set_title(title, fontsize=16)
# Color bar label
cbar = hier.ax_heatmap.collections[0].colorbar
cbar.ax.set_ylabel(cbar_title, fontsize=cbar_fontsize)
cbar.ax.yaxis.set_label_position("left")
return hier
def _move_xticks_triangular_clustermap(clustermap, symmetric=True):
'''Moves xticks to the diagonal when plotting a symmetric matrix
in the form of a upper triangle.
Parameters
---------
clustermap : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance.
symmetric : boolean, default=None
Whether the CCI matrix plotted in the clustermap is symmetric.
Returns
-------
clustermap : seaborn.matrix.ClusterGrid
A seaborn ClusterGrid instance, with the xticks moved to the
diagonal if the CCI matrix was symmetric. If not, the same
input clustermap is returned, but with rotated xtick labels.
'''
if symmetric:
# Apply offset transform to all xticklabels.
clustermap.ax_row_dendrogram.set_visible(False)
clustermap.ax_heatmap.tick_params(bottom=False) # Hide xtick line
x_labels = clustermap.ax_heatmap.xaxis.get_majorticklabels()
dpi_x = clustermap.fig.dpi_scale_trans.to_values()[0]
dpi_y = clustermap.fig.dpi_scale_trans.to_values()[3]
x0 = clustermap.ax_heatmap.transData.transform(x_labels[0].get_position())
x1 = clustermap.ax_heatmap.transData.transform(x_labels[1].get_position())
ylims = clustermap.ax_heatmap.get_ylim()
bottom_points = clustermap.ax_heatmap.transData.transform((1.0, ylims[0]))[1]
for i, xl in enumerate(x_labels):
# Move labels in dx and dy points.
swap_xy = (1.0, xl.get_position()[0] + 0.5)
new_y_points = clustermap.ax_heatmap.transData.transform(swap_xy)[1]
dx = -0.5 * abs(x1[0] - x0[0]) / dpi_x
dy = (new_y_points - bottom_points) / dpi_y
offset = mpl.transforms.ScaledTranslation(dx, dy, clustermap.fig.dpi_scale_trans)
xl.set_transform(xl.get_transform() + offset)
if symmetric:
rot = 45
else:
rot = 90
va = 'center'
clustermap.ax_heatmap.set_xticklabels(clustermap.ax_heatmap.xaxis.get_majorticklabels(),
rotation=rot,
rotation_mode='anchor',
va='bottom',
ha='right') # , fontsize=9.5)
clustermap.ax_heatmap.set_yticklabels(clustermap.ax_heatmap.yaxis.get_majorticklabels(),
rotation=0,
va=va,
ha='left') # , fontsize=9.5)
return clustermap | 38.307143 | 122 | 0.605134 |
40465e4dbbb9334d5135c8ffe536947ae617c71d | 1,051 | py | Python | var/spack/repos/builtin.mock/packages/gnuconfig/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin.mock/packages/gnuconfig/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 8 | 2021-11-09T20:28:40.000Z | 2022-03-15T03:26:33.000Z | var/spack/repos/builtin.mock/packages/gnuconfig/package.py | jeanbez/spack | f4e51ce8f366c85bf5aa0eafe078677b42dae1ba | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2019-02-08T20:37:20.000Z | 2019-03-31T15:19:26.000Z | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack.package import *
| 29.194444 | 78 | 0.669838 |
40472eab6c9976684dd368889d9c68536758019e | 378 | py | Python | mp4box/parsing/ctts.py | abhijeetbhagat/mp4box | 841ff0ef70c7f5a96548f47414bba69c00aa2f5e | [
"BSD-3-Clause"
] | 7 | 2019-08-14T03:03:51.000Z | 2021-11-14T19:10:00.000Z | mp4box/parsing/ctts.py | wanyhamo/mp4box | c5c73cd37c01bd9d637f1f3ed82221065dc86d6f | [
"BSD-3-Clause"
] | 10 | 2019-08-03T16:27:08.000Z | 2019-09-10T10:05:23.000Z | mp4box/parsing/ctts.py | abhijeetbhagat/mp4box | 841ff0ef70c7f5a96548f47414bba69c00aa2f5e | [
"BSD-3-Clause"
] | 7 | 2019-08-19T17:58:03.000Z | 2021-03-03T07:25:54.000Z | from mp4box.box import CompositionTimeToSampleBox
| 29.076923 | 58 | 0.693122 |
4047883a8f6ee83210f65d3f654ff142172cb4a8 | 24,485 | py | Python | MsLightweaverManager.py | Goobley/MsLightweaver | 6383867ba2a7ab00df947c8470b438d9eadcc321 | [
"MIT"
] | null | null | null | MsLightweaverManager.py | Goobley/MsLightweaver | 6383867ba2a7ab00df947c8470b438d9eadcc321 | [
"MIT"
] | 1 | 2020-05-05T13:49:54.000Z | 2021-04-29T12:41:40.000Z | MsLightweaverManager.py | Goobley/MsLightweaver | 6383867ba2a7ab00df947c8470b438d9eadcc321 | [
"MIT"
] | null | null | null | import pickle
import numpy as np
import matplotlib.pyplot as plt
from lightweaver.rh_atoms import H_6_atom, C_atom, O_atom, OI_ord_atom, Si_atom, Al_atom, Fe_atom, FeI_atom, MgII_atom, N_atom, Na_atom, S_atom, CaII_atom
from lightweaver.atmosphere import Atmosphere, ScaleType
from lightweaver.atomic_table import DefaultAtomicAbundance
from lightweaver.atomic_set import RadiativeSet, SpeciesStateTable
from lightweaver.molecule import MolecularTable
from lightweaver.LwCompiled import LwContext
from lightweaver.utils import InitialSolution, planck, NgOptions, ConvergenceError, compute_radiative_losses, integrate_line_losses
import lightweaver.constants as Const
import lightweaver as lw
from typing import List
from copy import deepcopy
from MsLightweaverAtoms import H_6, CaII, H_6_nasa, CaII_nasa
import os
import os.path as path
import time
from radynpy.matsplotlib import OpcFile
from radynpy.utils import hydrogen_absorption
from numba import njit
from pathlib import Path
from scipy.linalg import solve
from scipy.interpolate import interp1d, PchipInterpolator
# from HydroWeno.Simulation import Grid
# from HydroWeno.Advector import Advector
# from HydroWeno.BCs import zero_grad_bc
# from HydroWeno.Weno import reconstruct_weno_nm_z
import warnings
from traceback import print_stack
from weno4 import weno4
from RadynAdvection import an_sol, an_rad_sol, an_gml_sol
import pdb
# https://stackoverflow.com/a/21901260
import subprocess
def convert_atomic_pops(atom):
d = {}
if atom.pops is not None:
d['n'] = atom.pops
else:
d['n'] = atom.pops
d['nStar'] = atom.nStar
d['radiativeRates'] = atom.radiativeRates
return d
def distill_pops(eqPops):
d = {}
for atom in eqPops.atomicPops:
d[atom.element.name] = convert_atomic_pops(atom)
return d
| 37.611367 | 260 | 0.575128 |
404999f8afb17c0ba6be91ab0f875db288f28bae | 1,895 | py | Python | common/writeExcel.py | lixiaofeng1993/DjangoBlog | 94d062324367b8a30edf8d29e2e661c822bcb7c1 | [
"MIT"
] | null | null | null | common/writeExcel.py | lixiaofeng1993/DjangoBlog | 94d062324367b8a30edf8d29e2e661c822bcb7c1 | [
"MIT"
] | 6 | 2020-06-06T00:44:08.000Z | 2022-01-13T01:52:46.000Z | common/writeExcel.py | lixiaofeng1993/DjangoBlog | 94d062324367b8a30edf8d29e2e661c822bcb7c1 | [
"MIT"
] | null | null | null | # coding:utf-8
from openpyxl import load_workbook
import openpyxl
from openpyxl.styles import Font, colors
def copy_excel(cese_path, report_path):
"""
report_path
:param cese_path:
:param report_path:
:return:
"""
wb2 = openpyxl.Workbook()
wb2.save(report_path) # excel
#
wb1 = openpyxl.load_workbook(cese_path)
wb2 = openpyxl.load_workbook(report_path)
sheets1 = wb1.sheetnames
sheets2 = wb2.sheetnames
sheet1 = wb1[sheets1[0]] # sheet
sheet2 = wb2[sheets2[0]]
max_row = sheet1.max_row #
max_column = sheet1.max_column #
for m in list(range(1, max_row + 1)):
for n in list(range(97, 97 + max_column)): # chr(97)='a'
n = chr(n) # ASCII,excel a b c
i = '%s%d' % (n, m) #
cell1 = sheet1[i].value #
sheet2[i].value = cell1 #
wb2.save(report_path) #
wb1.close() # excel
wb2.close()
if __name__ == "__main__":
# copy_excel("demo_api_3.xlsx", "test111.xlsx")
wt = Write_excel("test111.xlsx")
wt.write(4, 5, "HELLEOP")
wt.write(4, 6, "HELLEOP")
| 28.712121 | 65 | 0.601055 |
404a28cfcd9e972210d8ead53be99918c37812fc | 1,904 | py | Python | test/cts/tool/CTSConverter/src/nn/specs/V1_1/depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py | zhaoming0/webml-polyfill | 56cf96eff96665da0f5fd7ef86fd5748f4bd22b9 | [
"Apache-2.0"
] | 255 | 2020-05-22T07:45:29.000Z | 2022-03-29T23:58:22.000Z | test/cts/tool/CTSConverter/src/nn/specs/V1_1/depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py | zhaoming0/webml-polyfill | 56cf96eff96665da0f5fd7ef86fd5748f4bd22b9 | [
"Apache-2.0"
] | 5,102 | 2020-05-22T07:48:33.000Z | 2022-03-31T23:43:39.000Z | test/cts/tool/CTSConverter/src/nn/specs/V1_1/depthwise_conv2d_float_weights_as_inputs_relaxed.mod.py | ibelem/webml-polyfill | aaf1ba4f5357eaf6e89bf9990f5bdfb543cd2bc2 | [
"Apache-2.0"
] | 120 | 2020-05-22T07:51:08.000Z | 2022-02-16T19:08:05.000Z | #
# Copyright (C) 2018 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
model = Model()
i1 = Input("op1", "TENSOR_FLOAT32", "{1, 3, 3, 2}")
f1 = Input("op2", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
b1 = Input("op3", "TENSOR_FLOAT32", "{4}")
pad0 = Int32Scalar("pad0", 0)
act = Int32Scalar("act", 0)
stride = Int32Scalar("stride", 1)
cm = Int32Scalar("channelMultiplier", 2)
output = Output("op4", "TENSOR_FLOAT32", "{1, 2, 2, 4}")
model = model.Operation("DEPTHWISE_CONV_2D",
i1, f1, b1,
pad0, pad0, pad0, pad0,
stride, stride,
cm, act).To(output)
model = model.RelaxedExecution(True)
# Example 1. Input in operand 0,
input0 = {i1: # input 0
[10, 21, 10, 22, 10, 23,
10, 24, 10, 25, 10, 26,
10, 27, 10, 28, 10, 29],
f1:
[.25, 0, .2, 0,
.25, 0, 0, .3,
.25, 0, 0, 0,
.25, .1, 0, 0],
b1:
[1, 2, 3, 4]}
# (i1 (conv) f1) + b1
# filter usage:
# in_ch1 * f_1 --> output_d1
# in_ch1 * f_2 --> output_d2
# in_ch2 * f_3 --> output_d3
# in_ch3 * f_4 --> output_d4
output0 = {output: # output 0
[11, 3, 7.2, 10.6,
11, 3, 7.4, 10.9,
11, 3, 7.8, 11.5,
11, 3, 8.0, 11.8]}
# Instantiate an example
Example((input0, output0))
| 31.733333 | 74 | 0.56355 |
404a73f48e1b3ca8bb85958c0c604a1931f4d34f | 1,450 | py | Python | jina/executors/evaluators/rank/recall.py | sdsd0101/jina | 1a835d9015c627a2cbcdc58ee3d127962ada1bc9 | [
"Apache-2.0"
] | 2 | 2020-10-19T17:06:19.000Z | 2020-10-22T14:10:55.000Z | jina/executors/evaluators/rank/recall.py | ayansiddiqui007/jina | 2a764410de47cc11e53c8f652ea1095d5dab5435 | [
"Apache-2.0"
] | null | null | null | jina/executors/evaluators/rank/recall.py | ayansiddiqui007/jina | 2a764410de47cc11e53c8f652ea1095d5dab5435 | [
"Apache-2.0"
] | null | null | null | from typing import Sequence, Any
from jina.executors.evaluators.rank import BaseRankingEvaluator
from jina.executors.evaluators.decorators import as_aggregator
| 35.365854 | 117 | 0.648966 |
404be03a1fd1048c68239ebc361551f5a1526980 | 270 | py | Python | tests/schema_mapping/structures/example5.py | danny-vayu/typedpy | e97735a742acbd5f1133e23f08cf43836476686a | [
"MIT"
] | null | null | null | tests/schema_mapping/structures/example5.py | danny-vayu/typedpy | e97735a742acbd5f1133e23f08cf43836476686a | [
"MIT"
] | null | null | null | tests/schema_mapping/structures/example5.py | danny-vayu/typedpy | e97735a742acbd5f1133e23f08cf43836476686a | [
"MIT"
] | null | null | null | from typedpy import Array, DoNotSerialize, Structure, mappers
| 20.769231 | 73 | 0.674074 |
404bea024a89b873fc6d227cd6a12a54af3b3b8c | 3,447 | py | Python | src/semantic_parsing_with_constrained_lm/eval.py | microsoft/semantic_parsing_with_constrained_lm | 7e3c099500c3102e46d7a47469fe6840580c2b11 | [
"MIT"
] | 17 | 2021-09-22T13:08:37.000Z | 2022-03-27T10:39:53.000Z | src/semantic_parsing_with_constrained_lm/eval.py | microsoft/semantic_parsing_with_constrained_lm | 7e3c099500c3102e46d7a47469fe6840580c2b11 | [
"MIT"
] | 1 | 2022-03-12T01:05:15.000Z | 2022-03-12T01:05:15.000Z | src/semantic_parsing_with_constrained_lm/eval.py | microsoft/semantic_parsing_with_constrained_lm | 7e3c099500c3102e46d7a47469fe6840580c2b11 | [
"MIT"
] | 1 | 2021-12-16T22:26:54.000Z | 2021-12-16T22:26:54.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import dataclasses
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import Dict, Generic, List, Optional, Sequence, TypeVar
from semantic_parsing_with_constrained_lm.datum import FullDatum, FullDatumSub
from semantic_parsing_with_constrained_lm.model import ModelResult
Pred = TypeVar("Pred")
Target = TypeVar("Target")
# TODO: Replcae this with a more flexible function suited to each domain
| 32.214953 | 81 | 0.630403 |
404c32173164735222505b93f1ef2b7219cec987 | 8,913 | py | Python | lib/surface/spanner/operations/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 2 | 2019-11-10T09:17:07.000Z | 2019-12-18T13:44:08.000Z | lib/surface/spanner/operations/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | null | null | null | lib/surface/spanner/operations/list.py | google-cloud-sdk-unofficial/google-cloud-sdk | 2a48a04df14be46c8745050f98768e30474a1aac | [
"Apache-2.0"
] | 1 | 2020-07-25T01:40:19.000Z | 2020-07-25T01:40:19.000Z | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for spanner operations list."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import textwrap
from googlecloudsdk.api_lib.spanner import backup_operations
from googlecloudsdk.api_lib.spanner import database_operations
from googlecloudsdk.api_lib.spanner import instance_config_operations
from googlecloudsdk.api_lib.spanner import instance_operations
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exceptions
from googlecloudsdk.command_lib.spanner import flags
def _CommonRun(args):
"""Performs run actions common to all List stages."""
is_database_type = (
args.type == 'DATABASE_RESTORE' or args.type == 'DATABASE' or
args.type == 'DATABASE_CREATE' or args.type == 'DATABASE_UPDATE_DDL')
if args.backup or args.type == 'BACKUP':
# Update output table for backup operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
metadata.name.split('/').slice(-1:).join():label=BACKUP,
metadata.database.split('/').slice(-1).join():label=SOURCE_DATABASE,
metadata.progress.startTime:label=START_TIME,
metadata.progress.endTime:label=END_TIME
)
""")
if args.type == 'DATABASE_RESTORE':
# Update output table for restore operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
metadata.name.split('/').slice(-1:).join():label=RESTORED_DATABASE,
metadata.backupInfo.backup.split('/').slice(-1).join():label=SOURCE_BACKUP,
metadata.progress.startTime:label=START_TIME,
metadata.progress.endTime:label=END_TIME
)
""")
elif is_database_type:
# Update output table for database operations.
# pylint:disable=protected-access
args._GetParser().ai.display_info.AddFormat("""
table(
name.basename():label=OPERATION_ID,
metadata.statements.join(sep="\n"),
done():label=DONE,
metadata.'@type'.split('.').slice(-1:).join(),
database().split('/').slice(-1:).join():label=DATABASE_ID
)
""")
# Checks that user only specified either database or backup flag.
if (args.IsSpecified('database') and args.IsSpecified('backup')):
raise c_exceptions.InvalidArgumentException(
'--database or --backup',
'Must specify either --database or --backup. To search backups for a '
'specific database, use the --database flag with --type=BACKUP')
# Checks that the user did not specify the backup flag with the type filter
# set to a database operation type.
if (args.IsSpecified('backup') and is_database_type):
raise c_exceptions.InvalidArgumentException(
'--backup or --type',
'The backup flag cannot be used with the type flag set to a '
'database operation type.')
if args.type == 'INSTANCE':
if args.IsSpecified('database'):
raise c_exceptions.InvalidArgumentException(
'--database or --type',
'The `--database` flag cannot be used with `--type=INSTANCE`.')
if args.IsSpecified('backup'):
raise c_exceptions.InvalidArgumentException(
'--backup or --type',
'The `--backup` flag cannot be used with `--type=INSTANCE`.')
if args.type == 'BACKUP':
if args.database:
db_filter = backup_operations.BuildDatabaseFilter(args.instance,
args.database)
return backup_operations.List(args.instance, db_filter)
if args.backup:
return backup_operations.ListGeneric(args.instance, args.backup)
return backup_operations.List(args.instance)
if is_database_type:
type_filter = database_operations.BuildDatabaseOperationTypeFilter(
args.type)
return database_operations.ListDatabaseOperations(args.instance,
args.database,
type_filter)
if args.backup:
return backup_operations.ListGeneric(args.instance, args.backup)
if args.database:
return database_operations.List(args.instance, args.database)
return instance_operations.List(args.instance)
| 37.766949 | 95 | 0.674072 |
404cc5bda9dc3a1cc2cecd17725e8022aeed3ad0 | 49,844 | py | Python | pgmpy/tests/test_models/test_SEM.py | predictive-analytics-lab/pgmpy | 6c2a31641adc72793acd130d007190fdb1632271 | [
"MIT"
] | null | null | null | pgmpy/tests/test_models/test_SEM.py | predictive-analytics-lab/pgmpy | 6c2a31641adc72793acd130d007190fdb1632271 | [
"MIT"
] | null | null | null | pgmpy/tests/test_models/test_SEM.py | predictive-analytics-lab/pgmpy | 6c2a31641adc72793acd130d007190fdb1632271 | [
"MIT"
] | null | null | null | import os
import unittest
import numpy as np
import networkx as nx
import numpy.testing as npt
from pgmpy.models import SEM, SEMGraph, SEMAlg
| 41.536667 | 100 | 0.455862 |
404d173b85da7aa2302b72d549875f4086a67bcc | 1,790 | py | Python | data_scripts/translation.py | wangcongcong123/transection | 3b931ce09c9b5e03ec6afdea6f58a317ad07361b | [
"MIT"
] | 4 | 2021-01-11T06:21:27.000Z | 2021-12-19T17:49:07.000Z | data_scripts/translation.py | wangcongcong123/transection | 3b931ce09c9b5e03ec6afdea6f58a317ad07361b | [
"MIT"
] | null | null | null | data_scripts/translation.py | wangcongcong123/transection | 3b931ce09c9b5e03ec6afdea6f58a317ad07361b | [
"MIT"
] | 2 | 2021-01-21T02:48:49.000Z | 2021-03-19T09:45:52.000Z | # coding=utf-8
# This script is finished following HF's datasets' template:
# https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py
# More examples as references to write a customized dataset can be found here:
# https://github.com/huggingface/datasets/tree/master/datasets
from __future__ import absolute_import, division, print_function
import json
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
"""
_TRAIN_DOWNLOAD_URL = "data/train.json"
_VAL_DOWNLOAD_URL = "data/val.json"
| 32.545455 | 103 | 0.622346 |
404f20db207c728bba35266d11df1248aa4d138a | 7,941 | py | Python | utils/chat_formatting.py | lyricalpaws/snekbot | 704197777dbaa284d163a95642e224d6efe2c4b2 | [
"MIT"
] | 13 | 2018-11-26T15:55:28.000Z | 2022-02-05T16:07:02.000Z | utils/chat_formatting.py | lyricalpaws/snekbot | 704197777dbaa284d163a95642e224d6efe2c4b2 | [
"MIT"
] | 8 | 2018-11-12T19:04:01.000Z | 2018-11-23T15:11:55.000Z | utils/chat_formatting.py | lyricalpaws/snekbot | 704197777dbaa284d163a95642e224d6efe2c4b2 | [
"MIT"
] | 23 | 2019-01-01T23:53:37.000Z | 2022-03-12T14:52:45.000Z | import itertools
from typing import Sequence, Iterator
# Source: https://github.com/Cog-Creators/Red-DiscordBot/blob/V3/develop/redbot/core/utils/chat_formatting.py
def error(text: str) -> str:
"""Get text prefixed with an error emoji.
Returns
-------
str
The new message.
"""
return "\N{NO ENTRY SIGN} {}".format(text)
def warning(text: str) -> str:
"""Get text prefixed with a warning emoji.
Returns
-------
str
The new message.
"""
return "\N{WARNING SIGN} {}".format(text)
def info(text: str) -> str:
"""Get text prefixed with an info emoji.
Returns
-------
str
The new message.
"""
return "\N{INFORMATION SOURCE} {}".format(text)
def question(text: str) -> str:
"""Get text prefixed with a question emoji.
Returns
-------
str
The new message.
"""
return "\N{BLACK QUESTION MARK ORNAMENT} {}".format(text)
def bold(text: str) -> str:
"""Get the given text in bold.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "**{}**".format(text)
def box(text: str, lang: str = "") -> str:
"""Get the given text in a code block.
Parameters
----------
text : str
The text to be marked up.
lang : `str`, optional
The syntax highlighting language for the codeblock.
Returns
-------
str
The marked up text.
"""
ret = "```{}\n{}\n```".format(lang, text)
return ret
def inline(text: str) -> str:
"""Get the given text as inline code.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "`{}`".format(text)
def italics(text: str) -> str:
"""Get the given text in italics.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "*{}*".format(text)
def bordered(*columns: Sequence[str], ascii_border: bool = False) -> str:
"""Get two blocks of text in a borders.
Note
----
This will only work with a monospaced font.
Parameters
----------
*columns : `sequence` of `str`
The columns of text, each being a list of lines in that column.
ascii_border : bool
Whether or not the border should be pure ASCII.
Returns
-------
str
The bordered text.
"""
borders = {
"TL": "-" if ascii_border else "", # Top-left
"TR": "-" if ascii_border else "", # Top-right
"BL": "-" if ascii_border else "", # Bottom-left
"BR": "-" if ascii_border else "", # Bottom-right
"HZ": "-" if ascii_border else "", # Horizontal
"VT": "|" if ascii_border else "", # Vertical
}
sep = " " * 4 # Separator between boxes
widths = tuple(
max(len(row) for row in column) + 9 for column in columns
) # width of each col
colsdone = [False] * len(columns) # whether or not each column is done
lines = [sep.join("{TL}" + "{HZ}" * width + "{TR}" for width in widths)]
for line in itertools.zip_longest(*columns):
row = []
for colidx, column in enumerate(line):
width = widths[colidx]
done = colsdone[colidx]
if column is None:
if not done:
# bottom border of column
column = "{HZ}" * width
row.append("{BL}" + column + "{BR}")
colsdone[colidx] = True # mark column as done
else:
# leave empty
row.append(" " * (width + 2))
else:
column += " " * (width - len(column)) # append padded spaces
row.append("{VT}" + column + "{VT}")
lines.append(sep.join(row))
final_row = []
for width, done in zip(widths, colsdone):
if not done:
final_row.append("{BL}" + "{HZ}" * width + "{BR}")
else:
final_row.append(" " * (width + 2))
lines.append(sep.join(final_row))
return "\n".join(lines).format(**borders)
def strikethrough(text: str) -> str:
"""Get the given text with a strikethrough.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "~~{}~~".format(text)
def underline(text: str) -> str:
"""Get the given text with an underline.
Parameters
----------
text : str
The text to be marked up.
Returns
-------
str
The marked up text.
"""
return "__{}__".format(text)
def escape(text: str, *, mass_mentions: bool = False, formatting: bool = False) -> str:
"""Get text with all mass mentions or markdown escaped.
Parameters
----------
text : str
The text to be escaped.
mass_mentions : `bool`, optional
Set to :code:`True` to escape mass mentions in the text.
formatting : `bool`, optional
Set to :code:`True` to escpae any markdown formatting in the text.
Returns
-------
str
The escaped text.
"""
if mass_mentions:
text = text.replace("@everyone", "@\u200beveryone")
text = text.replace("@here", "@\u200bhere")
if formatting:
text = (
text.replace("`", "\\`")
.replace("*", "\\*")
.replace("_", "\\_")
.replace("~", "\\~")
)
return text
| 27.28866 | 109 | 0.546783 |
404ff68f947024e93fe50b765fa029be24f36c84 | 35,410 | py | Python | strategy/trade/strategymargintrade.py | firebird631/siis | 8d64e8fb67619aaa5c0a62fda9de51dedcd47796 | [
"PostgreSQL"
] | null | null | null | strategy/trade/strategymargintrade.py | firebird631/siis | 8d64e8fb67619aaa5c0a62fda9de51dedcd47796 | [
"PostgreSQL"
] | null | null | null | strategy/trade/strategymargintrade.py | firebird631/siis | 8d64e8fb67619aaa5c0a62fda9de51dedcd47796 | [
"PostgreSQL"
] | null | null | null | # @date 2018-12-28
# @author Frederic Scherma, All rights reserved without prejudices.
# @license Copyright (c) 2018 Dream Overflow
# Strategy trade for margin with multiples positions.
from __future__ import annotations
from typing import TYPE_CHECKING, Optional, Tuple
if TYPE_CHECKING:
from trader.trader import Trader
from instrument.instrument import Instrument
from strategy.strategytrader import StrategyTrader
from strategy.strategytradercontext import StrategyTraderContextBuilder
from common.signal import Signal
from trader.order import Order
from .strategytrade import StrategyTrade
import logging
logger = logging.getLogger('siis.strategy.margintrade')
#
# persistence
#
#
# stats
#
| 37.352321 | 119 | 0.553347 |
4050379eb6d6d6226db82e9fbcbba3933c358e43 | 718 | py | Python | src/pyramid_debugtoolbar_api_sqlalchemy/__init__.py | jvanasco/pyramid_debugtoolbar_api_sqla | 286a7e4ee32e7dd64f31813c46d59e0651534cbd | [
"MIT"
] | null | null | null | src/pyramid_debugtoolbar_api_sqlalchemy/__init__.py | jvanasco/pyramid_debugtoolbar_api_sqla | 286a7e4ee32e7dd64f31813c46d59e0651534cbd | [
"MIT"
] | null | null | null | src/pyramid_debugtoolbar_api_sqlalchemy/__init__.py | jvanasco/pyramid_debugtoolbar_api_sqla | 286a7e4ee32e7dd64f31813c46d59e0651534cbd | [
"MIT"
] | null | null | null | # local
from .panels import SqlalchemyCsvDebugPanel
__VERSION__ = "0.3.1"
# ==============================================================================
def includeme(config):
"""
Pyramid hook to install this debugtoolbar plugin.
Update your ENVIRONMENT.ini file
debugtoolbar.includes = pyramid_debugtoolbar_api_sqlalchemy
"""
config.add_debugtoolbar_panel(SqlalchemyCsvDebugPanel)
config.add_route(
"debugtoolbar.api_sqlalchemy.queries.csv",
"/api-sqlalchemy/sqlalchemy-{request_id}.csv",
)
config.scan("pyramid_debugtoolbar_api_sqlalchemy.views")
config.commit()
# ==============================================================================
| 24.758621 | 80 | 0.558496 |
4050460227ae968820c1eb94e5dff24549e4e557 | 1,165 | py | Python | ultron/utilities/zlib_engine.py | wangjiehui11235/ultron | ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7 | [
"Apache-2.0"
] | 4 | 2019-06-06T09:38:49.000Z | 2022-01-29T00:02:11.000Z | ultron/utilities/zlib_engine.py | wangjiehui11235/ultron | ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7 | [
"Apache-2.0"
] | 1 | 2022-02-11T03:43:10.000Z | 2022-02-11T03:43:10.000Z | ultron/utilities/zlib_engine.py | wangjiehui11235/ultron | ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7 | [
"Apache-2.0"
] | 8 | 2019-06-02T13:11:00.000Z | 2021-11-11T01:06:22.000Z | # -*- coding: utf-8 -*-
import os,os.path
import zipfile | 32.361111 | 66 | 0.581974 |
4050b29c16a41f96705714cdbf17492431b85f0e | 1,985 | py | Python | scripts/instances2inventory.py | TipaZloy/coda-automation | 20c00a92f2e3088e205677c0db96b3ed5c82b238 | [
"Apache-2.0"
] | null | null | null | scripts/instances2inventory.py | TipaZloy/coda-automation | 20c00a92f2e3088e205677c0db96b3ed5c82b238 | [
"Apache-2.0"
] | null | null | null | scripts/instances2inventory.py | TipaZloy/coda-automation | 20c00a92f2e3088e205677c0db96b3ed5c82b238 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import boto
import boto.ec2
import sys
from pprint import pprint
from collections import defaultdict
output = defaultdict(lambda: [])
comments = defaultdict(lambda: {})
skip_region_strings = ['us-gov', 'cn-', 'ca-']
#skip_region_strings = ['us-gov', 'cn-', 'ca-', 'eu-', 'ap-']
if len(sys.argv) > 1:
filter = sys.argv[1]
else:
filter = False
regions = boto.ec2.regions()
for region in regions:
if any (skip_string in region.name for skip_string in skip_region_strings):
continue
print('# Querying region:', region)
ec2conn = boto.connect_ec2(region=region)
reservations = ec2conn.get_all_instances()
instances = [i for r in reservations for i in r.instances]
for i in instances:
if filter:
if 'Name' in i.tags:
if filter not in i.tags['Name']:
continue
if 'running' not in i.state:
continue
if 'Name' in i.tags:
if 'Packer' in i.tags['Name']: continue
if i.tags['Name'].count('_') == 2:
try:
(net, group, num) = i.tags['Name'].split('_')
myregion = region.name
except:
print('Error parsing ', i.tags['Name'])
continue
elif i.tags['Name'].count('_') == 3:
try:
(net, myregion, group, num) = i.tags['Name'].split('_')
except:
print('Error parsing ', i.tags['Name'])
continue
groupname = "%ss" % group
else:
print('NONAME', end='')
groupname = 'unknown'
i.tags['Name'] = 'NONE'
output[groupname].append(i.public_dns_name)
try:
comments[groupname][i.public_dns_name] = "# %s\t%s\t%s\t%s\t%s" % (i.tags['Name'], myregion, i.instance_type, i.ip_address, i.launch_time)
except:
comments[groupname][i.public_dns_name] = "# MISSING DATA"
for group in output:
print("[%s]" % group)
hostlist = output[group]
hostlist.sort()
for host in hostlist:
print("%s \t%s" % (host, comments[group][host]))
print("\n")
| 25.126582 | 144 | 0.602519 |
4050f12cd3fda3e62426b196e960faffe455d7f7 | 938 | py | Python | selfdrive/crash.py | darknight111/openpilot3 | a0c755fbe1889f26404a8225816f57e89fde7bc2 | [
"MIT"
] | 19 | 2020-08-05T12:11:58.000Z | 2022-03-07T01:18:56.000Z | selfdrive/crash.py | darknight111/openpilot3 | a0c755fbe1889f26404a8225816f57e89fde7bc2 | [
"MIT"
] | 18 | 2020-08-20T05:17:38.000Z | 2021-12-06T09:02:00.000Z | selfdrive/crash.py | darknight111/openpilot3 | a0c755fbe1889f26404a8225816f57e89fde7bc2 | [
"MIT"
] | 25 | 2020-08-30T09:10:14.000Z | 2022-02-20T02:31:13.000Z | """Install exception handler for process crash."""
from selfdrive.swaglog import cloudlog
from selfdrive.version import version
import sentry_sdk
from sentry_sdk.integrations.threading import ThreadingIntegration
| 33.5 | 102 | 0.735608 |
4051ffa508f128d4ca3a6951f908adec0dd2fce3 | 1,235 | py | Python | 0000_examples/grasping_antipodal_planning.py | huzhengtao14z/wrs | d567787ca41818f1756c325b304215faf7f10f29 | [
"MIT"
] | null | null | null | 0000_examples/grasping_antipodal_planning.py | huzhengtao14z/wrs | d567787ca41818f1756c325b304215faf7f10f29 | [
"MIT"
] | null | null | null | 0000_examples/grasping_antipodal_planning.py | huzhengtao14z/wrs | d567787ca41818f1756c325b304215faf7f10f29 | [
"MIT"
] | null | null | null | import math
import visualization.panda.world as wd
import modeling.geometric_model as gm
import modeling.collision_model as cm
import grasping.planning.antipodal as gpa
import robot_sim.end_effectors.grippers.yumi_gripper.yumi_gripper as yg
base = wd.World(cam_pos=[1, 1, 1],w=960,
h=540, lookat_pos=[0, 0, 0])
gm.gen_frame().attach_to(base)
# object
object_tube = cm.CollisionModel("objects/tubebig.stl")
object_tube.set_rgba([.9, .75, .35, 1])
object_tube.attach_to(base)
# hnd_s
gripper_s = yg.YumiGripper()
grasp_info_list = gpa.plan_grasps(gripper_s, object_tube,
angle_between_contact_normals=math.radians(177),
openning_direction='loc_x',
max_samples=15, min_dist_between_sampled_contact_points=.005,
contact_offset=.005)
gpa.write_pickle_file('tubebig', grasp_info_list, './', 'yumi_tube_big.pickle')
for grasp_info in grasp_info_list:
jaw_width, jaw_center_pos, jaw_center_rotmat, hnd_pos, hnd_rotmat = grasp_info
gripper_s.grip_at_with_jcpose(jaw_center_pos, jaw_center_rotmat, jaw_width)
gripper_s.gen_meshmodel(rgba=(1,0,0,0.01)).attach_to(base)
base.run() | 44.107143 | 95 | 0.701215 |
40544e3050932f38de418744707458dee5d3337b | 60,103 | py | Python | keystone/assignment/core.py | pritha-srivastava/keystone | 69abe058328954becdea13cc245459f2ba2342fc | [
"Apache-2.0"
] | null | null | null | keystone/assignment/core.py | pritha-srivastava/keystone | 69abe058328954becdea13cc245459f2ba2342fc | [
"Apache-2.0"
] | null | null | null | keystone/assignment/core.py | pritha-srivastava/keystone | 69abe058328954becdea13cc245459f2ba2342fc | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Main entry point into the Assignment service."""
import copy
import itertools
from oslo_log import log
from keystone.common import cache
from keystone.common import driver_hints
from keystone.common import manager
from keystone.common import provider_api
import keystone.conf
from keystone import exception
from keystone.i18n import _
from keystone import notifications
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
PROVIDERS = provider_api.ProviderAPIs
# This is a general cache region for assignment administration (CRUD
# operations).
MEMOIZE = cache.get_memoization_decorator(group='role')
# This builds a discrete cache region dedicated to role assignments computed
# for a given user + project/domain pair. Any write operation to add or remove
# any role assignment should invalidate this entire cache region.
COMPUTED_ASSIGNMENTS_REGION = cache.create_region(name='computed assignments')
MEMOIZE_COMPUTED_ASSIGNMENTS = cache.get_memoization_decorator(
group='role',
region=COMPUTED_ASSIGNMENTS_REGION)
| 44.520741 | 79 | 0.606842 |
4059ed80d6a8d54038d707dea3406a21f8501339 | 3,193 | py | Python | single-shot-pose/lib/linemod_dataset.py | take-cheeze/models | 3ded8fd062c57f20f6154cac2dd0d998181de755 | [
"MIT"
] | 112 | 2018-04-18T07:13:03.000Z | 2022-03-11T03:36:34.000Z | single-shot-pose/lib/linemod_dataset.py | take-cheeze/models | 3ded8fd062c57f20f6154cac2dd0d998181de755 | [
"MIT"
] | 16 | 2018-05-11T11:41:08.000Z | 2021-04-24T03:50:54.000Z | single-shot-pose/lib/linemod_dataset.py | take-cheeze/models | 3ded8fd062c57f20f6154cac2dd0d998181de755 | [
"MIT"
] | 45 | 2018-04-18T07:13:06.000Z | 2021-12-22T03:46:18.000Z | import numpy as np
import os
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.utils import read_image
linemod_object_diameters = {
'ape': 0.103,
'benchvise': 0.286908,
'cam': 0.173,
'can': 0.202,
'cat': 0.155,
'driller': 0.262,
'duck': 0.109,
'eggbox': 0.176364,
'glue': 0.176,
'holepuncher': 0.162,
'iron': 0.303153,
'lamp': 0.285155,
'phone': 0.213}
| 30.409524 | 75 | 0.551832 |
405b957bd7045b5d856865ed3de04736c0fcea38 | 10,857 | py | Python | DQM/BeamMonitor/test/44X_beam_dqm_sourceclient-live_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | DQM/BeamMonitor/test/44X_beam_dqm_sourceclient-live_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | DQM/BeamMonitor/test/44X_beam_dqm_sourceclient-live_cfg.py | nistefan/cmssw | ea13af97f7f2117a4f590a5e654e06ecd9825a5b | [
"Apache-2.0"
] | null | null | null | import FWCore.ParameterSet.Config as cms
process = cms.Process("BeamMonitor")
#----------------------------
# Common part for PP and H.I Running
#-----------------------------
process.load("DQM.Integration.test.inputsource_cfi")
#--------------------------
# HLT Filter
process.load("HLTrigger.special.HLTTriggerTypeFilter_cfi")
# 0=random, 1=physics, 2=calibration, 3=technical
process.hltTriggerTypeFilter.SelectedTriggerType = 1
#----------------------------
# DQM Live Environment
#-----------------------------
process.load("DQM.Integration.test.environment_cfi")
process.dqmEnv.subSystemFolder = 'BeamMonitor'
import DQMServices.Components.DQMEnvironment_cfi
process.dqmEnvPixelLess = DQMServices.Components.DQMEnvironment_cfi.dqmEnv.clone()
process.dqmEnvPixelLess.subSystemFolder = 'BeamMonitor_PixelLess'
#----------------------------
# BeamMonitor
#-----------------------------
process.load("DQM.BeamMonitor.BeamMonitor_cff")
process.load("DQM.BeamMonitor.BeamMonitorBx_cff")
process.load("DQM.BeamMonitor.BeamMonitor_PixelLess_cff")
process.load("DQM.BeamMonitor.BeamConditionsMonitor_cff")
#### SETUP TRACKING RECONSTRUCTION ####
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load("DQM.Integration.test.FrontierCondition_GT_cfi")
process.load("Configuration.StandardSequences.RawToDigi_Data_cff")
# Change Beam Monitor variables
if process.dqmSaver.producer.value() is "Playback":
process.dqmBeamMonitor.BeamFitter.WriteAscii = False
process.dqmBeamMonitor.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.txt'
process.dqmBeamMonitor.BeamFitter.WriteDIPAscii = True
process.dqmBeamMonitor.BeamFitter.DIPFileName = '/nfshome0/dqmdev/BeamMonitorDQM/BeamFitResults.txt'
else:
process.dqmBeamMonitor.BeamFitter.WriteAscii = True
process.dqmBeamMonitor.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.txt'
process.dqmBeamMonitor.BeamFitter.WriteDIPAscii = True
process.dqmBeamMonitor.BeamFitter.DIPFileName = '/nfshome0/dqmpro/BeamMonitorDQM/BeamFitResults.txt'
#process.dqmBeamMonitor.BeamFitter.SaveFitResults = False
#process.dqmBeamMonitor.BeamFitter.OutputFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults.root'
process.dqmBeamMonitorBx.BeamFitter.WriteAscii = True
process.dqmBeamMonitorBx.BeamFitter.AsciiFileName = '/nfshome0/yumiceva/BeamMonitorDQM/BeamFitResults_Bx.txt'
## TKStatus
process.dqmTKStatus = cms.EDAnalyzer("TKStatus",
BeamFitter = cms.PSet(
DIPFileName = process.dqmBeamMonitor.BeamFitter.DIPFileName
)
)
process.dqmcommon = cms.Sequence(process.dqmEnv
*process.dqmSaver)
process.monitor = cms.Sequence(process.dqmBeamMonitor)
#--------------------------
# Proton-Proton Stuff
#--------------------------
if (process.runType.getRunType() == process.runType.pp_run or process.runType.getRunType() == process.runType.cosmic_run):
print "Running pp"
process.EventStreamHttpReader.SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('HLT_L1*',
'HLT_Jet*',
'HLT_*Cosmic*',
'HLT_HT*',
'HLT_MinBias_*',
'HLT_Physics*',
'HLT_ZeroBias_v*')
)
process.load("Configuration.StandardSequences.Reconstruction_cff")
process.load("RecoTracker.IterativeTracking.iterativeTk_cff")
## Pixelless Tracking
process.load('RecoTracker/Configuration/RecoTrackerNotStandard_cff')
process.MeasurementTracker.pixelClusterProducer = cms.string("")
# Offline Beam Spot
process.load("RecoVertex.BeamSpotProducer.BeamSpot_cff")
## Offline PrimaryVertices
import RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi
process.offlinePrimaryVertices = RecoVertex.PrimaryVertexProducer.OfflinePrimaryVertices_cfi.offlinePrimaryVertices.clone()
process.dqmBeamMonitor.OnlineMode = True
process.dqmBeamMonitor.resetEveryNLumi = 5
process.dqmBeamMonitor.resetPVEveryNLumi = 5
process.dqmBeamMonitor.PVFitter.minNrVerticesForFit = 25
process.dqmBeamMonitor.BeamFitter.TrackCollection = cms.untracked.InputTag('generalTracks')
process.offlinePrimaryVertices.TrackLabel = cms.InputTag("generalTracks")
process.offlinePrimaryVertices.label=cms.string("")
process.offlinePrimaryVertices.minNdof=cms.double(0.0)
process.offlinePrimaryVertices.useBeamConstraint=cms.bool(False)
#TriggerName for selecting pv for DIP publication, NO wildcard needed here
#it will pick all triggers which has these strings in theri name
process.dqmBeamMonitor.jetTrigger = cms.untracked.vstring("HLT_ZeroBias_v",
"HLT_Jet300_v",
"HLT_QuadJet70_v")
process.dqmBeamMonitor.hltResults = cms.InputTag("TriggerResults","","HLT")
#fast general track reco
process.iterTracking =cms.Sequence(process.InitialStep
*process.LowPtTripletStep
*process.PixelPairStep
*process.DetachedTripletStep
*process.MixedTripletStep
*process.PixelLessStep
*process.TobTecStep
*process.generalTracks)
process.tracking_FirstStep = cms.Sequence(process.siPixelDigis
*process.siStripDigis
*process.trackerlocalreco
*process.offlineBeamSpot
*process.recopixelvertexing
*process.iterTracking)
process.p = cms.Path(process.scalersRawToDigi
*process.dqmTKStatus
*process.hltTriggerTypeFilter
*process.dqmcommon
*process.tracking_FirstStep
*process.offlinePrimaryVertices
*process.monitor)
#--------------------------------------------------
# Heavy Ion Stuff
#--------------------------------------------------
if (process.runType.getRunType() == process.runType.hi_run):
print "Running HI"
process.castorDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.csctfDigis.producer = cms.InputTag("rawDataRepacker")
process.dttfDigis.DTTF_FED_Source = cms.InputTag("rawDataRepacker")
process.ecalDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.ecalPreshowerDigis.sourceTag = cms.InputTag("rawDataRepacker")
process.gctDigis.inputLabel = cms.InputTag("rawDataRepacker")
process.gtDigis.DaqGtInputTag = cms.InputTag("rawDataRepacker")
process.gtEvmDigis.EvmGtInputTag = cms.InputTag("rawDataRepacker")
process.hcalDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.muonCSCDigis.InputObjects = cms.InputTag("rawDataRepacker")
process.muonDTDigis.inputLabel = cms.InputTag("rawDataRepacker")
process.muonRPCDigis.InputLabel = cms.InputTag("rawDataRepacker")
process.scalersRawToDigi.scalersInputTag = cms.InputTag("rawDataRepacker")
#----------------------------
# Event Source
#-----------------------------
process.EventStreamHttpReader.SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring(
'HLT_HI*'
)
)
process.dqmBeamMonitor.OnlineMode = True ## in MC the LS are not ordered??
process.dqmBeamMonitor.resetEveryNLumi = 10
process.dqmBeamMonitor.resetPVEveryNLumi = 10
process.dqmBeamMonitor.BeamFitter.MinimumTotalLayers = 3 ## using pixel triplets
process.dqmBeamMonitor.PVFitter.minNrVerticesForFit = 20
process.dqmBeamMonitor.jetTrigger = cms.untracked.vstring("HLT_HI")
process.dqmBeamMonitor.hltResults = cms.InputTag("TriggerResults","","HLT")
## Load Heavy Ion Sequence
process.load("Configuration.StandardSequences.ReconstructionHeavyIons_cff") ## HI sequences
# Select events based on the pixel cluster multiplicity
import HLTrigger.special.hltPixelActivityFilter_cfi
process.multFilter = HLTrigger.special.hltPixelActivityFilter_cfi.hltPixelActivityFilter.clone(
inputTag = cms.InputTag('siPixelClusters'),
minClusters = cms.uint32(150),
maxClusters = cms.uint32(50000)
)
process.filter_step = cms.Sequence( process.siPixelDigis
*process.siPixelClusters
#*process.multFilter
)
process.HIRecoForDQM = cms.Sequence( process.siPixelDigis
*process.siPixelClusters
*process.siPixelRecHits
*process.offlineBeamSpot
*process.hiPixelVertices
*process.hiPixel3PrimTracks
)
# use HI pixel tracking and vertexing
process.dqmBeamMonitor.BeamFitter.TrackCollection = cms.untracked.InputTag('hiPixel3PrimTracks')
process.dqmBeamMonitorBx.BeamFitter.TrackCollection = cms.untracked.InputTag('hiPixel3PrimTracks')
process.dqmBeamMonitor.primaryVertex = cms.untracked.InputTag('hiSelectedVertex')
process.dqmBeamMonitor.PVFitter.VertexCollection = cms.untracked.InputTag('hiSelectedVertex')
# make pixel vertexing less sensitive to incorrect beamspot
process.hiPixel3ProtoTracks.RegionFactoryPSet.RegionPSet.originRadius = 0.2
process.hiPixel3ProtoTracks.RegionFactoryPSet.RegionPSet.fixedError = 0.5
process.hiSelectedProtoTracks.maxD0Significance = 100
process.hiPixelAdaptiveVertex.TkFilterParameters.maxD0Significance = 100
process.hiPixelAdaptiveVertex.vertexCollections.useBeamConstraint = False
#not working due to wrong tag of reco
process.hiPixelAdaptiveVertex.vertexCollections.maxDistanceToBeam = 1.0
process.p = cms.Path(process.scalersRawToDigi
*process.dqmTKStatus
*process.hltTriggerTypeFilter
*process.filter_step
*process.HIRecoForDQM
*process.dqmcommon
*process.monitor)
| 42.410156 | 127 | 0.644377 |
405c6e44b37edbad093dd87de80a9e8b880c990d | 3,036 | py | Python | tests/routes/test_hackers.py | TorrentofShame/hackathon-2021-backend | a85989421df8ad900b01ad026dbe713312b0a54e | [
"MIT"
] | null | null | null | tests/routes/test_hackers.py | TorrentofShame/hackathon-2021-backend | a85989421df8ad900b01ad026dbe713312b0a54e | [
"MIT"
] | null | null | null | tests/routes/test_hackers.py | TorrentofShame/hackathon-2021-backend | a85989421df8ad900b01ad026dbe713312b0a54e | [
"MIT"
] | null | null | null | # flake8: noqa
import json
from src.models.hacker import Hacker
from tests.base import BaseTestCase
from datetime import datetime
| 28.641509 | 96 | 0.560606 |
405e5ce74a48720ac95f86fcad8f93d05cb3edfc | 13,330 | py | Python | open_cp/sources/chicago.py | sumau/PredictCode | e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8 | [
"Artistic-2.0"
] | 18 | 2017-04-19T09:17:19.000Z | 2021-05-24T08:53:28.000Z | open_cp/sources/chicago.py | sumau/PredictCode | e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8 | [
"Artistic-2.0"
] | 8 | 2017-06-11T17:46:35.000Z | 2021-06-07T10:49:10.000Z | open_cp/sources/chicago.py | sumau/PredictCode | e2a2d5a8fa5d83f011c33e18d4ce6ac7e1429aa8 | [
"Artistic-2.0"
] | 10 | 2017-07-19T18:29:37.000Z | 2020-11-12T22:06:45.000Z | """
sources.chicago
===============
Reads a CSV file in the format (as of April 2017) of data available from:
- https://catalog.data.gov/dataset/crimes-one-year-prior-to-present-e171f
- https://catalog.data.gov/dataset/crimes-2001-to-present-398a4
The default data is loaded from a file "chicago.csv" which should be downloaded
from one of the above links. The format of the data, frustratingly, differs
between the snapshot of last year, and the total.
The data is partly anonymous in that the address within a block is obscured,
while the geocoding seems complicated (work in progress to understand)...
The crime type "HOMICIDE" is reported multiple times in the dataset.
"""
import csv as _csv
import os.path as _path
import datetime
import numpy as _np
from ..data import TimedPoints
_datadir = None
_default_filename = "chicago.csv"
_FEET_IN_METERS = 3937 / 1200
def set_data_directory(datadir):
"""Set the default location for search for the default input file."""
global _datadir
_datadir = datadir
def get_default_filename():
"""Returns the default filename, if available. Otherwise raises
AttributeError.
"""
global _datadir
if _datadir is None:
raise AttributeError("datadir not set; call `set_data_directory()`.")
return _path.join(_datadir, _default_filename)
def date_from_iso(iso_string):
"""Convert a datetime string in ISO format into a :class:`datetime`
instance.
:param iso_string: Like "2017-10-23T05:12:39"
:return: A :class:`datetime` instance.
"""
return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S")
_FIELDS = {
"snapshot" : {
"_DESCRIPTION_FIELD" : ' PRIMARY DESCRIPTION',
"_X_FIELD" : 'X COORDINATE',
"_Y_FIELD" : 'Y COORDINATE',
"_TIME_FIELD" : 'DATE OF OCCURRENCE',
"_GEOJSON_LOOKUP" : {"case": 'CASE#',
"address": "BLOCK",
"location": ' LOCATION DESCRIPTION',
"crime": ' PRIMARY DESCRIPTION',
"type": ' SECONDARY DESCRIPTION',
"timestamp": 'DATE OF OCCURRENCE'},
"GEOJSON_COORDS" : ('LONGITUDE', 'LATITUDE'),
"DT_CONVERT" : _date_from_csv
},
"all" : {
"_DESCRIPTION_FIELD" : 'Primary Type',
"_X_FIELD" : 'X Coordinate',
"_Y_FIELD" : 'Y Coordinate',
"_TIME_FIELD" : 'Date',
"_GEOJSON_LOOKUP" : {"case": 'Case Number',
"address": "Block",
"location": 'Location Description',
"crime": 'Primary Type',
"type": 'Description',
"timestamp": 'Date'},
"GEOJSON_COORDS" : ('Longitude', 'Latitude'),
"DT_CONVERT" : _date_from_csv
},
"gen" : {
"_DESCRIPTION_FIELD" : 'CRIME',
"_X_FIELD" : 'X',
"_Y_FIELD" : 'Y',
"_TIME_FIELD" : 'TIMESTAMP',
"_GEOJSON_LOOKUP" : {"case": 'CASE',
"address": "BLOCK",
"location": 'LOCATION',
"crime": 'CRIME',
"type": 'SUB-TYPE',
"timestamp": 'TIMESTAMP'},
"GEOJSON_COORDS" : ('X', 'Y'),
"DT_CONVERT" : _date_from_csv
}
}
_FIELDS["all_other"] = dict(_FIELDS["all"])
_FIELDS["all_other"]["DT_CONVERT"] = _date_from_other
def default_burglary_data():
"""Load the default data, if available, giving just "THEFT" data.
:return: An instance of :class:`open_cp.data.TimedPoints` or `None`.
"""
try:
return load(get_default_filename(), {"THEFT"})
except Exception:
return None
def load(file, primary_description_names, to_meters=True, type="snapshot"):
"""Load data from a CSV file in the expected format.
:param file: Name of the CSV file load, or a file-like object.
:param primary_description_names: Set of names to search for in the
"primary description field". E.g. pass `{"THEFT"}` to return only the
"theft" crime type.
:param to_meters: Convert the coordinates to meters; True by default.
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:return: An instance of :class:`open_cp.data.TimedPoints` or `None`.
"""
dic = _get_dic(type)
if isinstance(file, str):
with open(file) as file:
data = _load_to_list(file, dic, primary_description_names)
else:
data = _load_to_list(file, dic, primary_description_names)
data.sort(key = lambda triple : triple[0])
xcoords = _np.empty(len(data))
ycoords = _np.empty(len(data))
for i, (_, x, y) in enumerate(data):
xcoords[i], ycoords[i] = x, y
times = [t for t, _, _ in data]
if to_meters:
xcoords /= _FEET_IN_METERS
ycoords /= _FEET_IN_METERS
return TimedPoints.from_coords(times, xcoords, ycoords)
def load_to_GeoJSON(filename, type="snapshot"):
"""Load the specified CSV file to a list of GeoJSON (see
http://geojson.org/) features. Events with no location data have `None`
as the geometry. Timestamps are converted to standard ISO string format.
The returned "properties" have these keys:
- "case" for the "CASE#" field
- "crime" for the "PRIMARY DESCRIPTION" field
- "type" for the "SECONDARY DESCRIPTION" field
- "location" for the "LOCATION DESCRIPTION" field
- "timestamp" for the "DATE OF OCCURRENCE" field
- "address" for the "BLOCK" field
:param filename: Filename of the CSV file to process
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:return: List of Python dictionaries in GeoJSON format.
"""
return list(generate_GeoJSON_Features(filename, type))
try:
import geopandas as gpd
import shapely.geometry as _geometry
except:
gpd = None
_geometry = None
def convert_null_geometry_to_empty(frame):
"""Utility method. Convert any geometry in the geoDataFrame which is
"null" (`None` or empty) to a Point type geometry which is empty. The
returned geoDateFrame is suitable for projecting and other geometrical
transformations.
"""
newgeo = frame.geometry.map(null_to_point)
return frame.set_geometry(newgeo)
def convert_null_geometry_to_none(frame):
"""Utility method. Convert any geometry in the geoDataFrame which is
"null" (`None` or empty) to `None`. The returned geoDateFrame is suitable
for saving.
"""
newgeo = frame.geometry.map(null_to_none)
return frame.set_geometry(newgeo)
def load_to_geoDataFrame(filename, datetime_as_string=True,
type="snapshot", empty_geometry="none"):
"""Return the same data as :func:`load_to_GeoJSON` but as a geoPandas
data-frame.
:param filename: Filename of the CSV file to process
:param datetime_as_string: Write the timestamp as an ISO formatted string.
Defaults to True which is best for saving the dataframe as e.g. a shape
file. Set to False to get timestamps as python objects, which is best
for using (geo)pandas to analyse the data.
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:param empty_geometry: Either "none" to return `None` as the geometry of
crimes which have no location data in the CSV file (this is correct if
you wish to save the data-frame); or "empty" to return an empty `Point`
type (which is correct, for example, if you wish to re-project the
data-frame). Yes, GeoPandas appears to be annoying like this.
"""
geo_data = load_to_GeoJSON(filename, type=type)
if not datetime_as_string:
for feature in geo_data:
feature["properties"]["timestamp"] = _date_from_iso(feature["properties"]["timestamp"])
frame = gpd.GeoDataFrame.from_features(geo_data)
if empty_geometry == "none":
pass
elif empty_geometry == "empty":
frame = convert_null_geometry_to_empty(frame)
else:
raise ValueError("Unknown `empty_geometry` parameter `{}`".format(empty_geometry))
frame.crs = {"init":"epsg:4326"}
return frame
_sides = None
def get_side(name):
"""Return a geometry (a polygon, typically) of the outline of the shape
of the given "side" of Chicago, projected to {"init":"epsg:2790"}, which
is Illinois in metres.
Needs the file "Chicago_Areas.geojson" to be in the "datadir". This can
be downloaded from:
https://data.cityofchicago.org/Facilities-Geographic-Boundaries/Boundaries-Community-Areas-current-/cauq-8yn6
:param name: One of "Far North", "Northwest", "North", "West", "Central",
"South", "Southwest", "Far Southwest", "Far Southeast"
"""
_load_sides()
return _sides[_sides.side == name].unary_union
| 37.130919 | 113 | 0.641485 |
405e96dac8375ff59b836544a212c81d70fbb3ff | 2,140 | py | Python | Codility/Lesson/0011.Sieve-of-Eratosthenes/CountSemiprimes/CountSemiprimes.py | kimi0230/LeetcodeGolang | 2b276e49b67d7f66731ce6c629cd1390642af230 | [
"MIT"
] | 4 | 2021-07-21T01:16:11.000Z | 2022-01-11T07:43:51.000Z | Codility/Lesson/0011.Sieve-of-Eratosthenes/CountSemiprimes/CountSemiprimes.py | kimi0230/LeetcodeGolang | 2b276e49b67d7f66731ce6c629cd1390642af230 | [
"MIT"
] | null | null | null | Codility/Lesson/0011.Sieve-of-Eratosthenes/CountSemiprimes/CountSemiprimes.py | kimi0230/LeetcodeGolang | 2b276e49b67d7f66731ce6c629cd1390642af230 | [
"MIT"
] | null | null | null | # https://github.com/Anfany/Codility-Lessons-By-Python3/blob/master/L11_Sieve%20of%20Eratosthenes/11.2%20CountSemiprimes.md
def solution(N, P, Q):
"""
PQN, O(N * log(log(N)) + M)
:param N:
:param P:
:param Q:
:return: ,
"""
# 34(1, 3, 9, 27)(1, 5, 25, 125)
# N0
semi_prime = []
k =0
for i in range(1, N + 1):
factor_count = 0
sign = 0
for j in range(1, int(i ** 0.5) + 1):
if i % j == 0:
factor_count += 1
f = i / j
if f != j:
if f == j ** 2:
sign = 1
semi_prime.append(0)
break
else:
factor_count += 1
if factor_count > 4:
sign = 1
semi_prime.append(0)
break
if sign != 1:
if factor_count >= 3:
semi_prime.append(i)
else:
semi_prime.append(0)
index_dict = {} #
semi_dict = {} #
count = 0
for index, value in enumerate(semi_prime):
if value != 0:
count += 1
index_dict[value] = count
semi_dict[value] = 0
else:
index_dict[index + 1] = count
# index_dict {1: 0, 2: 0, 3: 0, 4: 1, 5: 1, 6: 2, 7: 2, 8: 2, 9: 3, 10: 4, 11: 4, 12: 4, 13: 4, 14: 5, 15: 6, 16: 6, 17: 6, 18: 6, 19: 6, 20: 6, 21: 7, 22: 8, 23: 8, 24: 8, 25: 9, 26: 10}
#semi_dict {4: 0, 6: 0, 9: 0, 10: 0, 14: 0, 15: 0, 21: 0, 22: 0, 25: 0, 26: 0}
print("index_dict",index_dict)
print("semi_dict",semi_dict)
result_list = [] #
for i, j in zip(P, Q):
if i in semi_dict:
result_list.append(index_dict[j] - index_dict[i] + 1)
else:
result_list.append(index_dict[j] - index_dict[i])
return result_list
if __name__ == '__main__':
solution(26,[1, 4, 16],[26, 10, 20]) | 33.4375 | 191 | 0.482243 |
4060cef76afd120f8b88cf8abb7104b1c967dfca | 2,614 | py | Python | src/zope/formlib/errors.py | zopefoundation/zope.formlib | af2d587a6eb24e59e95a8b1feb7aafc5d3b87ba4 | [
"ZPL-2.1"
] | 4 | 2018-05-09T04:16:25.000Z | 2021-03-05T17:27:21.000Z | src/zope/formlib/errors.py | zopefoundation/zope.formlib | af2d587a6eb24e59e95a8b1feb7aafc5d3b87ba4 | [
"ZPL-2.1"
] | 25 | 2016-03-24T15:23:08.000Z | 2021-03-05T16:53:53.000Z | src/zope/formlib/errors.py | zopefoundation/zope.formlib | af2d587a6eb24e59e95a8b1feb7aafc5d3b87ba4 | [
"ZPL-2.1"
] | 5 | 2015-02-11T13:32:06.000Z | 2018-05-09T04:16:26.000Z | ##############################################################################
#
# Copyright (c) 2006 Zope Foundation and Contributors.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Error related things.
"""
try:
from html import escape
except ImportError: # pragma: NO COVER
from cgi import escape
from zope.component import adapter
from zope.interface import implementer
from zope.interface import Invalid
from zope.i18n import Message
from zope.i18n import translate
from zope.publisher.interfaces.browser import IBrowserRequest
from zope.publisher.browser import BrowserPage
from zope.formlib.interfaces import IWidgetInputErrorView
from zope.formlib.interfaces import IInvalidCSRFTokenError
| 34.394737 | 78 | 0.653405 |
4061946ebfbadada4a68b023604bd5475c508749 | 6,090 | py | Python | src/packagedcode/about.py | sthagen/nexB-scancode-toolkit | 12cc1286df78af898fae76fa339da2bb50ad51b9 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | src/packagedcode/about.py | sthagen/nexB-scancode-toolkit | 12cc1286df78af898fae76fa339da2bb50ad51b9 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | src/packagedcode/about.py | sthagen/nexB-scancode-toolkit | 12cc1286df78af898fae76fa339da2bb50ad51b9 | [
"Apache-2.0",
"CC-BY-4.0"
] | null | null | null | #
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import io
import os
from pathlib import Path
import saneyaml
from packagedcode import models
from packageurl import PackageURL
# TODO: Override get_package_resource so it returns the Resource that the ABOUT file is describing
TRACE = os.environ.get('SCANCODE_DEBUG_PACKAGE', False)
if TRACE:
import logging
import sys
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout)
logger.setLevel(logging.DEBUG)
| 36.25 | 98 | 0.621182 |
4061e49b5b1d7dddbcbb3f8df2b62b73c065877a | 2,359 | py | Python | gazepattern/eyedetector/admin.py | AriRodriguezCruz/mcfgpr | c6f83f8e68bbab0054a7ea337feab276fc0790fc | [
"MIT"
] | null | null | null | gazepattern/eyedetector/admin.py | AriRodriguezCruz/mcfgpr | c6f83f8e68bbab0054a7ea337feab276fc0790fc | [
"MIT"
] | 12 | 2020-06-05T22:56:39.000Z | 2022-02-10T10:35:13.000Z | gazepattern/eyedetector/admin.py | AriRodriguezCruz/mcfgpr | c6f83f8e68bbab0054a7ea337feab276fc0790fc | [
"MIT"
] | 1 | 2019-10-06T23:40:45.000Z | 2019-10-06T23:40:45.000Z | # -*- coding: utf-8 -*-
#django
from django.contrib import admin
from django.db import transaction
#python
import csv
from decimal import Decimal
#gazepattern
from .models import Experiment, ExperimentPoint, Image, ImageRectangle, ExperimentPointCSV, ExperimentFunction
procesar.short_description = "Procesar CSV para generar experiments points"
admin.site.register(ExperimentPointCSV, ExperimentPointCSVAdmin)
admin.site.register(ExperimentPoint, ExperimentPointAdmin)
admin.site.register(Image, ImageAdmin)
admin.site.register(Experiment, ExperimentAdmin)
admin.site.register(ImageRectangle, ImageRectangleAdmin)
admin.site.register(ExperimentFunction, ExperimentFunctionAdmin) | 31.878378 | 110 | 0.676982 |
4061ef1026efc595fdfdf42014af88613e5012a6 | 2,634 | py | Python | orders/tests/test_views.py | ms0680146/Order_System | 934c3849ad0d72c0ce560706a6857870935e8599 | [
"MIT"
] | null | null | null | orders/tests/test_views.py | ms0680146/Order_System | 934c3849ad0d72c0ce560706a6857870935e8599 | [
"MIT"
] | null | null | null | orders/tests/test_views.py | ms0680146/Order_System | 934c3849ad0d72c0ce560706a6857870935e8599 | [
"MIT"
] | null | null | null | from django.test import TestCase, Client
from django.urls import reverse
from orders.models import Order, OrderItem
from datetime import datetime
from django.utils.timezone import get_current_timezone
import pytz | 34.207792 | 118 | 0.612756 |
406203c920d38242adfa5e5ed2a39070a52fd1c1 | 373 | py | Python | codigo/hexagonal/app/adapter/light_bulb_repository.py | VulturARG/charla_01 | 43a53fded4f3205a02b00993a523e2f94b79fc99 | [
"Apache-2.0"
] | null | null | null | codigo/hexagonal/app/adapter/light_bulb_repository.py | VulturARG/charla_01 | 43a53fded4f3205a02b00993a523e2f94b79fc99 | [
"Apache-2.0"
] | null | null | null | codigo/hexagonal/app/adapter/light_bulb_repository.py | VulturARG/charla_01 | 43a53fded4f3205a02b00993a523e2f94b79fc99 | [
"Apache-2.0"
] | null | null | null | from codigo.hexagonal.app.domain.switchable_repository import Switchable
| 26.642857 | 72 | 0.646113 |
4062ba894ee618c56f6c5822e3859495a6c3298f | 541 | py | Python | aula12/ex1.py | otaviobizulli/python-exercices | 2c61f014bf481fa463721b174ddd4238bf8d0cb3 | [
"MIT"
] | null | null | null | aula12/ex1.py | otaviobizulli/python-exercices | 2c61f014bf481fa463721b174ddd4238bf8d0cb3 | [
"MIT"
] | null | null | null | aula12/ex1.py | otaviobizulli/python-exercices | 2c61f014bf481fa463721b174ddd4238bf8d0cb3 | [
"MIT"
] | null | null | null | from random import randint
menor = 100
linha = 0
maior = 0
m = []
for i in range(10):
m.append([])
for j in range(10):
m[i].append(randint(1,99))
for i in range(10):
for j in range(10):
print(f'{m[i][j]:2}',end=' ')
print()
for i in range(10):
for j in range(10):
if m[i][j] > maior:
maior = m[i][j]
linha = i
for i in range(10):
if m[linha][i] < menor:
menor = m[linha][i]
print(f'o minimax {menor}, com o maior sendo {maior} na linha {linha+1}.')
| 16.393939 | 76 | 0.51756 |
4063e065b5e1d8a9952507fe4d95419e55a2613a | 1,153 | py | Python | src/token_classification/format.py | adriens63/BERT_fine_tuning_for_MLM_and_token_classification | 89ff0d8ed12da370b1f8757ae9db8d725143a5bb | [
"Apache-2.0"
] | null | null | null | src/token_classification/format.py | adriens63/BERT_fine_tuning_for_MLM_and_token_classification | 89ff0d8ed12da370b1f8757ae9db8d725143a5bb | [
"Apache-2.0"
] | 1 | 2021-12-10T15:26:05.000Z | 2021-12-10T15:26:05.000Z | src/token_classification/format.py | adriens63/BERT_fine_tuning_for_MLM_and_token_classification | 89ff0d8ed12da370b1f8757ae9db8d725143a5bb | [
"Apache-2.0"
] | 3 | 2021-12-05T12:43:23.000Z | 2021-12-10T15:42:40.000Z | import os.path as osp
import argparse
import yaml
from src.token_classification.archs.data_formatter import *
# ********************* launch formating ***********************
# cmd to launch : python -m src.token_classification.format --config ./src/token_classification/config/config.yml
if __name__ == '__main__':
parser = argparse.ArgumentParser(description = 'formatting for labeling')
parser.add_argument('--config', type=str, required=True, help='path to yaml config')
args = parser.parse_args()
with open(args.config, 'r') as f:
config = yaml.safe_load(f)
asigning_variables(config)
print('.... Start formatting')
path = osp.join(config['path'], config['offres'])
yaml_path = osp.join(config['path'], config['yaml'])
formatter = Formatter(path, yaml_path)
formatter.generate_name()
formatter.load()
formatter.sort_desc()
formatter.format_to_jsonl_in_proportions(n_desc = config['n_sequences'])
print('done;')
print()
print('/!\ Be careful to change the owner of the file before pasting it in doccano with the following command : sudo chown <user> <file>')
| 32.942857 | 142 | 0.674761 |
4063f5350f19ec0fcf289e841719b7191b72872c | 6,393 | py | Python | add.py | cleolepart/timedomain | 340e3fa614bca2dc333c9723893951318356dccf | [
"MIT"
] | null | null | null | add.py | cleolepart/timedomain | 340e3fa614bca2dc333c9723893951318356dccf | [
"MIT"
] | null | null | null | add.py | cleolepart/timedomain | 340e3fa614bca2dc333c9723893951318356dccf | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
import os, sys, time
import numpy as np
import scipy.sparse
import scipy.linalg
import scipy.sparse.linalg
from astropy.table import Table, Column
import multiprocessing
from desiutil.log import get_logger
from desispec.interpolation import resample_flux
from desispec.spectra import Spectra
from desispec.resolution import Resolution
from desispec.fiberbitmasking import get_all_fiberbitmask_with_amp, get_all_nonamp_fiberbitmask_val, get_justamps_fiberbitmask
from desispec.specscore import compute_coadd_scores
from desispec.coaddition import coadd_fibermap
def add(spectra, cosmics_nsig=0.) :
"""
Coaddition the spectra for each target and each camera. The input spectra is modified.
Args:
spectra: desispec.spectra.Spectra object
Options:
cosmics_nsig: float, nsigma clipping threshold for cosmics rays
"""
log = get_logger()
targets = np.unique(spectra.fibermap["TARGETID"])
ntarget=targets.size
log.debug("number of targets= {}".format(ntarget))
for b in spectra.bands :
log.debug("coadding band '{}'".format(b))
nwave=spectra.wave[b].size
tflux=np.zeros((ntarget,nwave),dtype=spectra.flux[b].dtype)
tivar=np.zeros((ntarget,nwave),dtype=spectra.ivar[b].dtype)
if spectra.mask is not None :
tmask=np.zeros((ntarget,nwave),dtype=spectra.mask[b].dtype)
else :
tmask=None
trdata=np.zeros((ntarget,spectra.resolution_data[b].shape[1],nwave),dtype=spectra.resolution_data[b].dtype)
fiberstatus_bits = get_all_fiberbitmask_with_amp(b)
good_fiberstatus = ( (spectra.fibermap["FIBERSTATUS"] & fiberstatus_bits) == 0 )
for i,tid in enumerate(targets) :
jj=np.where( (spectra.fibermap["TARGETID"]==tid) & good_fiberstatus )[0]
#- if all spectra were flagged as bad (FIBERSTATUS != 0), contine
#- to next target, leaving tflux and tivar=0 for this target
if len(jj) == 0:
continue
if cosmics_nsig is not None and cosmics_nsig > 0 and len(jj)>2 :
# interpolate over bad measurements
# to be able to compute gradient next
# to a bad pixel and identify outlier
# many cosmics residuals are on edge
# of cosmic ray trace, and so can be
# next to a masked flux bin
grad=[]
gradvar=[]
for j in jj :
if spectra.mask is not None :
ttivar = spectra.ivar[b][j]*(spectra.mask[b][j]==0)
else :
ttivar = spectra.ivar[b][j]
good = (ttivar>0)
bad = ~good
if np.sum(good)==0 :
continue
nbad = np.sum(bad)
ttflux = spectra.flux[b][j].copy()
if nbad>0 :
ttflux[bad] = np.interp(spectra.wave[b][bad],spectra.wave[b][good],ttflux[good])
ttivar = spectra.ivar[b][j].copy()
if nbad>0 :
ttivar[bad] = np.interp(spectra.wave[b][bad],spectra.wave[b][good],ttivar[good])
ttvar = 1./(ttivar+(ttivar==0))
ttflux[1:] = ttflux[1:]-ttflux[:-1]
ttvar[1:] = ttvar[1:]+ttvar[:-1]
ttflux[0] = 0
grad.append(ttflux)
gradvar.append(ttvar)
#tivar_unmasked= np.sum(spectra.ivar[b][jj],axis=0)
tivar_unmasked = 1 / np.sum(1/spectra.ivar[b][jj],axis=0)
if spectra.mask is not None :
ivarjj=spectra.ivar[b][jj]*(spectra.mask[b][jj]==0)
else :
ivarjj=spectra.ivar[b][jj]
if cosmics_nsig is not None and cosmics_nsig > 0 and len(jj)>2 :
grad=np.array(grad)
gradvar=np.array(gradvar)
gradivar=(gradvar>0)/np.array(gradvar+(gradvar==0))
nspec=grad.shape[0]
sgradivar=np.sum(gradivar)
if sgradivar>0 :
meangrad=np.sum(gradivar*grad,axis=0)/sgradivar
deltagrad=grad-meangrad
chi2=np.sum(gradivar*deltagrad**2,axis=0)/(nspec-1)
bad = (chi2>cosmics_nsig**2)
nbad = np.sum(bad)
if nbad>0 :
log.info("masking {} values for targetid={}".format(nbad,tid))
badindex=np.where(bad)[0]
for bi in badindex :
k=np.argmax(gradivar[:,bi]*deltagrad[:,bi]**2)
ivarjj[k,bi]=0.
log.debug("masking spec {} wave={}".format(k,spectra.wave[b][bi]))
#tivar[i]=np.sum(ivarjj,axis=0)
tivar[i]= 1 / np.sum(1/ivarjj,axis=0)
tflux[i]=np.sum(spectra.flux[b][jj],axis=0)
for r in range(spectra.resolution_data[b].shape[1]) :
trdata[i,r]=np.sum((spectra.resolution_data[b][jj,r]),axis=0) # not sure applying mask is wise here
bad=(tivar[i]==0)
if np.sum(bad)>0 :
tivar[i][bad] = 1 / np.sum(1/spectra.ivar[b][jj][:,bad],axis=0) # if all masked, keep original ivar
tflux[i][bad] = np.sum(spectra.flux[b][jj][:,bad],axis=0)
ok=(tivar[i]>0)
#if np.sum(ok)>0 :
#tflux[i][ok] /= tivar[i][ok]
ok=(tivar_unmasked>0)
if np.sum(ok)>0 :
trdata[i][:,ok] /= tivar_unmasked[ok]
if spectra.mask is not None :
tmask[i] = np.bitwise_or.reduce(spectra.mask[b][jj],axis=0)
spectra.flux[b] = tflux
spectra.ivar[b] = tivar
if spectra.mask is not None :
spectra.mask[b] = tmask
spectra.resolution_data[b] = trdata
if spectra.scores is not None:
orig_scores = Table(spectra.scores.copy())
orig_scores['TARGETID'] = spectra.fibermap['TARGETID']
else:
orig_scores = None
spectra.fibermap=coadd_fibermap(spectra.fibermap)
spectra.scores=None
compute_coadd_scores(spectra, orig_scores, update_coadd=True)
| 40.980769 | 126 | 0.552323 |
40642da36f0613fe957f14edea19df84f13b530a | 2,525 | py | Python | pontoon/pretranslation/tests/test_pretranslate.py | timvisee/pontoon | aec1ef7b5c5d56c3be28fecf1147945d2622bbad | [
"BSD-3-Clause"
] | null | null | null | pontoon/pretranslation/tests/test_pretranslate.py | timvisee/pontoon | aec1ef7b5c5d56c3be28fecf1147945d2622bbad | [
"BSD-3-Clause"
] | null | null | null | pontoon/pretranslation/tests/test_pretranslate.py | timvisee/pontoon | aec1ef7b5c5d56c3be28fecf1147945d2622bbad | [
"BSD-3-Clause"
] | null | null | null | from mock import patch
import pytest
from pontoon.base.models import User
from pontoon.pretranslation.pretranslate import get_translations
from pontoon.test.factories import (
EntityFactory,
TranslationMemoryFactory,
)
| 34.121622 | 87 | 0.693069 |
406526a2d40a76aa8b9a7ce0c6aadecb3ce65af4 | 9,615 | py | Python | cubes/common.py | digitalsatori/cubes | 140133e8c2e3f2ff60631cc3ebc9966d16c1655e | [
"MIT"
] | 1,020 | 2015-01-02T03:05:26.000Z | 2022-02-12T18:48:51.000Z | cubes/common.py | digitalsatori/cubes | 140133e8c2e3f2ff60631cc3ebc9966d16c1655e | [
"MIT"
] | 259 | 2015-01-02T22:35:14.000Z | 2021-09-02T04:20:41.000Z | cubes/common.py | digitalsatori/cubes | 140133e8c2e3f2ff60631cc3ebc9966d16c1655e | [
"MIT"
] | 288 | 2015-01-08T00:42:26.000Z | 2022-03-31T17:25:10.000Z | # -*- encoding: utf-8 -*-
"""Utility functions for computing combinations of dimensions and hierarchy
levels"""
from __future__ import absolute_import
import re
import os.path
import json
from collections import OrderedDict
from .errors import ModelInconsistencyError, ArgumentError, ConfigurationError
from . import compat
__all__ = [
"IgnoringDictionary",
"MissingPackage",
"localize_common",
"localize_attributes",
"get_localizable_attributes",
"decamelize",
"to_identifier",
"assert_instance",
"assert_all_instances",
"read_json_file",
"sorted_dependencies",
]
def assert_instance(obj, class_, label):
"""Raises ArgumentError when `obj` is not instance of `cls`"""
if not isinstance(obj, class_):
raise ModelInconsistencyError("%s should be sublcass of %s, "
"provided: %s" % (label,
class_.__name__,
type(obj).__name__))
def assert_all_instances(list_, class_, label="object"):
"""Raises ArgumentError when objects in `list_` are not instances of
`cls`"""
for obj in list_ or []:
assert_instance(obj, class_, label="object")
def optional_import(name, feature=None, source=None, comment=None):
"""Optionally import package `name`. If package does not exist, import a
placeholder object, that raises an exception with more detailed
description about the missing package."""
try:
return __import__(name)
except ImportError:
return MissingPackage(name, feature, source, comment)
def expand_dictionary(record, separator='.'):
"""Return expanded dictionary: treat keys are paths separated by
`separator`, create sub-dictionaries as necessary"""
result = {}
for key, value in record.items():
current = result
path = key.split(separator)
for part in path[:-1]:
if part not in current:
current[part] = {}
current = current[part]
current[path[-1]] = value
return result
def localize_common(obj, trans):
"""Localize common attributes: label and description"""
if "label" in trans:
obj.label = trans["label"]
if "description" in trans:
obj.description = trans["description"]
def localize_attributes(attribs, translations):
"""Localize list of attributes. `translations` should be a dictionary with
keys as attribute names, values are dictionaries with localizable
attribute metadata, such as ``label`` or ``description``."""
for (name, atrans) in translations.items():
attrib = attribs[name]
localize_common(attrib, atrans)
def get_localizable_attributes(obj):
"""Returns a dictionary with localizable attributes of `obj`."""
# FIXME: use some kind of class attribute to get list of localizable attributes
locale = {}
try:
if obj.label:
locale["label"] = obj.label
except:
pass
try:
if obj.description:
locale["description"] = obj.description
except:
pass
return locale
def to_label(name, capitalize=True):
"""Converts `name` into label by replacing underscores by spaces. If
`capitalize` is ``True`` (default) then the first letter of the label is
capitalized."""
label = name.replace("_", " ")
if capitalize:
label = label.capitalize()
return label
def coalesce_option_value(value, value_type, label=None):
"""Convert string into an object value of `value_type`. The type might be:
`string` (no conversion), `integer`, `float`, `list` comma separated
list of strings.
"""
value_type = value_type.lower()
try:
if value_type in ('string', 'str'):
return_value = str(value)
elif value_type == 'list':
if isinstance(value, compat.string_type):
return_value = value.split(",")
else:
return_value = list(value)
elif value_type == "float":
return_value = float(value)
elif value_type in ["integer", "int"]:
return_value = int(value)
elif value_type in ["bool", "boolean"]:
if not value:
return_value = False
elif isinstance(value, compat.string_type):
return_value = value.lower() in ["1", "true", "yes", "on"]
else:
return_value = bool(value)
else:
raise ArgumentError("Unknown option value type %s" % value_type)
except ValueError:
if label:
label = "parameter %s " % label
else:
label = ""
raise ArgumentError("Unable to convert %svalue '%s' into type %s" %
(label, astring, value_type))
return return_value
def coalesce_options(options, types):
"""Coalesce `options` dictionary according to types dictionary. Keys in
`types` refer to keys in `options`, values of `types` are value types:
string, list, float, integer or bool."""
out = {}
for key, value in options.items():
if key in types:
out[key] = coalesce_option_value(value, types[key], key)
else:
out[key] = value
return out
def read_json_file(path, kind=None):
"""Read a JSON from `path`. This is convenience function that provides
more descriptive exception handling."""
kind = "%s " % str(kind) if kind else ""
if not os.path.exists(path):
raise ConfigurationError("Can not find %sfile '%s'"
% (kind, path))
try:
f = compat.open_unicode(path)
except IOError:
raise ConfigurationError("Can not open %sfile '%s'"
% (kind, path))
try:
content = json.load(f)
except ValueError as e:
raise SyntaxError("Syntax error in %sfile %s: %s"
% (kind, path, str(e)))
finally:
f.close()
return content
def sorted_dependencies(graph):
"""Return keys from `deps` ordered by dependency (topological sort).
`deps` is a dictionary where keys are strings and values are list of
strings where keys is assumed to be dependant on values.
Example::
A ---> B -+--> C
|
+--> D --> E
Will be: ``{"A": ["B"], "B": ["C", "D"], "D": ["E"],"E": []}``
"""
graph = dict((key, set(value)) for key, value in graph.items())
# L Empty list that will contain the sorted elements
L = []
# S Set of all nodes with no dependencies (incoming edges)
S = set(parent for parent, req in graph.items() if not req)
while S:
# remove a node n from S
n = S.pop()
# insert n into L
L.append(n)
# for each node m with an edge e from n to m do
# (n that depends on m)
parents = [parent for parent, req in graph.items() if n in req]
for parent in parents:
graph[parent].remove(n)
# remove edge e from the graph
# if m has no other incoming edges then insert m into S
if not graph[parent]:
S.add(parent)
# if graph has edges then -> error
nonempty = [k for k, v in graph.items() if v]
if nonempty:
raise ArgumentError("Cyclic dependency of: %s"
% ", ".join(nonempty))
return L
| 30.141066 | 83 | 0.584191 |
40665e1c58be6db40c3e5c0613a58755896c8a6f | 4,366 | py | Python | wavenet_iaf.py | Ella77/ClariNet | 1a2eea899f5c28b34beb6fb08725f38309e7e053 | [
"MIT"
] | 126 | 2019-05-23T03:37:43.000Z | 2021-08-02T20:15:22.000Z | wavenet_iaf.py | Ella77/ClariNet | 1a2eea899f5c28b34beb6fb08725f38309e7e053 | [
"MIT"
] | 4 | 2019-06-05T11:30:51.000Z | 2022-03-17T09:01:29.000Z | wavenet_iaf.py | Ella77/ClariNet | 1a2eea899f5c28b34beb6fb08725f38309e7e053 | [
"MIT"
] | 24 | 2019-05-23T03:37:39.000Z | 2021-12-23T22:29:01.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules import Conv, ResBlock
| 39.690909 | 111 | 0.584517 |
4066b6af1e7dfd680248198011c778dca827452b | 828 | py | Python | tests/__init__.py | ybelleguic/openbrokerapi | 9d6019dd1b6649c9d0cb0dee4b3236e0ee209dbc | [
"MIT"
] | 36 | 2017-10-06T15:16:21.000Z | 2021-07-30T16:25:59.000Z | tests/__init__.py | ybelleguic/openbrokerapi | 9d6019dd1b6649c9d0cb0dee4b3236e0ee209dbc | [
"MIT"
] | 167 | 2017-09-28T23:38:33.000Z | 2022-03-28T21:18:49.000Z | tests/__init__.py | ybelleguic/openbrokerapi | 9d6019dd1b6649c9d0cb0dee4b3236e0ee209dbc | [
"MIT"
] | 26 | 2017-09-29T13:46:38.000Z | 2022-01-05T08:49:25.000Z | try:
from gevent import monkey
monkey.patch_all()
except ImportError:
# fine if no gevent is available
pass
import base64
import logging
from unittest.mock import Mock
from flask.app import Flask
from flask_testing import TestCase
from openbrokerapi.api import BrokerCredentials
from openbrokerapi.log_util import basic_config
| 23.657143 | 67 | 0.641304 |
4067311b4e6925a510e59163839cef51d453a910 | 5,234 | py | Python | ansible/lib/ansible/modules/extras/network/f5/bigip_gtm_wide_ip.py | kiv-box/kafka | debec1c4bc8c43776070ee447a53b55fef42bd52 | [
"Apache-2.0"
] | null | null | null | ansible/lib/ansible/modules/extras/network/f5/bigip_gtm_wide_ip.py | kiv-box/kafka | debec1c4bc8c43776070ee447a53b55fef42bd52 | [
"Apache-2.0"
] | null | null | null | ansible/lib/ansible/modules/extras/network/f5/bigip_gtm_wide_ip.py | kiv-box/kafka | debec1c4bc8c43776070ee447a53b55fef42bd52 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, Michael Perzel
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: bigip_gtm_wide_ip
short_description: "Manages F5 BIG-IP GTM wide ip"
description:
- "Manages F5 BIG-IP GTM wide ip"
version_added: "2.0"
author:
- Michael Perzel (@perzizzle)
- Tim Rupp (@caphrim007)
notes:
- "Requires BIG-IP software version >= 11.4"
- "F5 developed module 'bigsuds' required (see http://devcentral.f5.com)"
- "Best run as a local_action in your playbook"
- "Tested with manager and above account privilege level"
requirements:
- bigsuds
options:
lb_method:
description:
- LB method of wide ip
required: true
choices: ['return_to_dns', 'null', 'round_robin',
'ratio', 'topology', 'static_persist', 'global_availability',
'vs_capacity', 'least_conn', 'lowest_rtt', 'lowest_hops',
'packet_rate', 'cpu', 'hit_ratio', 'qos', 'bps',
'drop_packet', 'explicit_ip', 'connection_rate', 'vs_score']
wide_ip:
description:
- Wide IP name
required: true
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Set lb method
local_action: >
bigip_gtm_wide_ip
server=192.0.2.1
user=admin
password=mysecret
lb_method=round_robin
wide_ip=my-wide-ip.example.com
'''
try:
import bigsuds
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.f5 import bigip_api, f5_argument_spec
if __name__ == '__main__':
main()
| 31.914634 | 97 | 0.643676 |
4067cec9a6ceb8438c7e66edc2d29eb2148964ae | 1,323 | py | Python | sql/src/test/resources/joins/create_sample_table.py | MichelaSalvemini/Modelli_project | b70d505f9c3fef4a5f857fdccaa60b1b64c8a71d | [
"Apache-2.0"
] | 677 | 2016-01-04T04:05:50.000Z | 2022-03-24T06:37:27.000Z | sql/src/test/resources/joins/create_sample_table.py | MichelaSalvemini/Modelli_project | b70d505f9c3fef4a5f857fdccaa60b1b64c8a71d | [
"Apache-2.0"
] | 249 | 2015-12-29T03:41:31.000Z | 2020-09-02T03:11:30.000Z | sql/src/test/resources/joins/create_sample_table.py | MichelaSalvemini/Modelli_project | b70d505f9c3fef4a5f857fdccaa60b1b64c8a71d | [
"Apache-2.0"
] | 148 | 2015-12-29T03:25:48.000Z | 2021-08-25T03:59:52.000Z | #! /usr/bin/env python
from __future__ import print_function
import pandas as pd
import numpy as np
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Generate sample tables to test joins.')
parser.add_argument('--num-rows', '-r', type=int, default=100)
parser.add_argument('--num-cols', '-c', type=int, required=True)
parser.add_argument('--num-distinct-vals', '-d', type=int, required=True)
parser.add_argument('--num-cols-overlap', '-o', type=int, default=1)
args = parser.parse_args()
NUM_ROWS = args.num_rows
NUM_COLS = args.num_cols
NUM_DISTINCT_VALS = args.num_distinct_vals
num_overlap = args.num_cols_overlap
if num_overlap > NUM_COLS:
print('--num-cols-overlap cannot be greater than --num-cols')
import sys
sys.exit(1)
generate_csv(0, 'table_a.csv')
generate_csv(NUM_COLS - num_overlap, 'table_b.csv')
| 30.068182 | 77 | 0.670446 |
4067fffb2bd9b7aaa8d3273ea742884e5f876e2d | 1,219 | py | Python | Advanced/1- Introduction/5- Index_words.py | AlirezaMojtabavi/Python_Practice | c0128d6ce4cf172d93cc4e48861e7980e8e016a2 | [
"MIT"
] | null | null | null | Advanced/1- Introduction/5- Index_words.py | AlirezaMojtabavi/Python_Practice | c0128d6ce4cf172d93cc4e48861e7980e8e016a2 | [
"MIT"
] | null | null | null | Advanced/1- Introduction/5- Index_words.py | AlirezaMojtabavi/Python_Practice | c0128d6ce4cf172d93cc4e48861e7980e8e016a2 | [
"MIT"
] | 1 | 2020-11-14T07:19:26.000Z | 2020-11-14T07:19:26.000Z |
indexWords = list()
phrase = str(input())
phraseList = phrase.split(" ")
length = len(phraseList)
for item in phraseList :
item = item.strip()
if phrase != "" :
for i in range(1, length-1) :
lengthOfWord = len(phraseList[i])
if phraseList[i][0].isupper() :
if PreviousWord(phraseList, phraseList[i])[-1] != "." :
if phraseList[i][-1]=="." or phraseList[i][-1]=="," :
indexWords.append(i + 1)
indexWords.append(phraseList[i][: lengthOfWord-1])
elif phraseList[i][-1]== "]" and phraseList[i][-2]== "'" :
indexWords.append(i + 1)
indexWords.append(phraseList[i][: lengthOfWord-2])
else :
indexWords.append(i + 1)
indexWords.append(phraseList[i])
else:
print("None")
lengthOfIndexWord = len(indexWords)
if lengthOfIndexWord == 0 :
print("None")
else:
for i in range(0, lengthOfIndexWord//2):
print("%i:%s" %(indexWords[2*i],indexWords[(2*i)+1])) | 31.25641 | 74 | 0.538966 |
40686bfbfab402b52cf133e6f6f5366a147289d1 | 14,107 | py | Python | appengine/findit/handlers/test/completed_build_pubsub_ingestor_test.py | xswz8015/infra | f956b78ce4c39cc76acdda47601b86794ae0c1ba | [
"BSD-3-Clause"
] | null | null | null | appengine/findit/handlers/test/completed_build_pubsub_ingestor_test.py | xswz8015/infra | f956b78ce4c39cc76acdda47601b86794ae0c1ba | [
"BSD-3-Clause"
] | 4 | 2022-03-17T18:58:21.000Z | 2022-03-17T18:58:22.000Z | appengine/findit/handlers/test/completed_build_pubsub_ingestor_test.py | xswz8015/infra | f956b78ce4c39cc76acdda47601b86794ae0c1ba | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import base64
import json
import mock
import webapp2
from google.appengine.api import taskqueue
from go.chromium.org.luci.buildbucket.proto.build_pb2 import Build
from testing_utils.testing import AppengineTestCase
from common.findit_http_client import FinditHttpClient
from common.waterfall import buildbucket_client
from handlers import completed_build_pubsub_ingestor
from model.isolated_target import IsolatedTarget
| 40.654179 | 78 | 0.607712 |
40686c4879d63aced85e26a35f076b9028592fdb | 24,660 | py | Python | sdk/python/pulumi_azure_native/containerservice/v20191027preview/open_shift_managed_cluster.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/containerservice/v20191027preview/open_shift_managed_cluster.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/containerservice/v20191027preview/open_shift_managed_cluster.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['OpenShiftManagedClusterArgs', 'OpenShiftManagedCluster']
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
agent_pool_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAgentPoolProfileArgs']]]]] = None,
auth_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterAuthProfileArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
master_pool_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMasterPoolProfileArgs']]] = None,
monitor_profile: Optional[pulumi.Input[pulumi.InputType['OpenShiftManagedClusterMonitorProfileArgs']]] = None,
network_profile: Optional[pulumi.Input[pulumi.InputType['NetworkProfileArgs']]] = None,
open_shift_version: Optional[pulumi.Input[str]] = None,
plan: Optional[pulumi.Input[pulumi.InputType['PurchasePlanArgs']]] = None,
refresh_cluster: Optional[pulumi.Input[bool]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
router_profiles: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OpenShiftRouterProfileArgs']]]]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OpenShiftManagedClusterArgs.__new__(OpenShiftManagedClusterArgs)
__props__.__dict__["agent_pool_profiles"] = agent_pool_profiles
__props__.__dict__["auth_profile"] = auth_profile
__props__.__dict__["location"] = location
__props__.__dict__["master_pool_profile"] = master_pool_profile
__props__.__dict__["monitor_profile"] = monitor_profile
__props__.__dict__["network_profile"] = network_profile
if open_shift_version is None and not opts.urn:
raise TypeError("Missing required property 'open_shift_version'")
__props__.__dict__["open_shift_version"] = open_shift_version
__props__.__dict__["plan"] = plan
__props__.__dict__["refresh_cluster"] = refresh_cluster
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["router_profiles"] = router_profiles
__props__.__dict__["tags"] = tags
__props__.__dict__["cluster_version"] = None
__props__.__dict__["fqdn"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["public_hostname"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:containerservice/v20191027preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20180930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20180930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20190430:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190430:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-native:containerservice/v20190930preview:OpenShiftManagedCluster"), pulumi.Alias(type_="azure-nextgen:containerservice/v20190930preview:OpenShiftManagedCluster")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(OpenShiftManagedCluster, __self__).__init__(
'azure-native:containerservice/v20191027preview:OpenShiftManagedCluster',
resource_name,
__props__,
opts)
| 47.514451 | 856 | 0.679927 |
40686f7cd56545ec9981f33c3903dd74fd6b1048 | 326 | py | Python | django_drf_server/quiz/migrations/0017_remove_quiz_questions.py | pammalPrasanna/quizie | 3c03552c39ef3d7e613f5b613479df4ef8d44ac1 | [
"MIT"
] | null | null | null | django_drf_server/quiz/migrations/0017_remove_quiz_questions.py | pammalPrasanna/quizie | 3c03552c39ef3d7e613f5b613479df4ef8d44ac1 | [
"MIT"
] | null | null | null | django_drf_server/quiz/migrations/0017_remove_quiz_questions.py | pammalPrasanna/quizie | 3c03552c39ef3d7e613f5b613479df4ef8d44ac1 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.4 on 2021-06-17 02:01
from django.db import migrations
| 18.111111 | 47 | 0.588957 |
4069e772d72345dc8c5aa0533940bffe33f5921a | 18,348 | py | Python | main.py | khan-git/webRecipies | 4fa9f9bc3c9809f82c5c8fd94dbb604da3443dcb | [
"MIT"
] | null | null | null | main.py | khan-git/webRecipies | 4fa9f9bc3c9809f82c5c8fd94dbb604da3443dcb | [
"MIT"
] | null | null | null | main.py | khan-git/webRecipies | 4fa9f9bc3c9809f82c5c8fd94dbb604da3443dcb | [
"MIT"
] | null | null | null | # -*- coding: iso-8859-1 -*-
import os
import shutil
import datetime
import sqlite3
from flask import Flask, request, session, render_template, g, redirect, url_for, abort, flash, make_response
from random import randint
import json
import urllib2
import json
from json.decoder import JSONObject
from werkzeug.utils import secure_filename
UPLOAD_FOLDER = '/tmp'
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])
DBBACKUPPATH = os.path.abspath('db_backup')
if os.path.exists(DBBACKUPPATH) == False:
os.mkdir(DBBACKUPPATH)
app = Flask(__name__)
#app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app = Flask(__name__)
app.config.from_object(__name__)
# Load default config and override config from an environment variable
app.config.update(dict(
DATABASE=os.path.join(app.root_path, 'recipes.db'),
SECRET_KEY='development key',
USERNAME='admin',
PASSWORD='default',
UPLOAD_FOLDER='/tmp'
))
app.config['UPPLOAD_FOLDER'] = '/tmp'
app.config.from_envvar('FLASKR_SETTINGS', silent=True)
def connect_db():
"""Connects to the specific database."""
if os.path.exists(app.config['DATABASE']) == False:
cmd = 'sqlite3 recipes.db < database.sql'
os.system(cmd)
rv = sqlite3.connect(app.config['DATABASE'])
rv.row_factory = sqlite3.Row
return rv
def get_db():
"""Opens a new database connection if there is none yet for the
current application context.
"""
if not hasattr(g, 'sqlite_db'):
g.sqlite_db = connect_db()
return g.sqlite_db
def queryDbFetchOne(query):
"""Query database, return one result"""
db = get_db()
cur = db.cursor()
cur.execute(query)
return cur.fetchone()
def queryDbFetchAll(query):
"""Query database, return one result"""
db = get_db()
cur = db.cursor()
cur.execute(query)
return cur.fetchall()
def getRecipe(recipeKey):
"""Get recipe data"""
return queryDbFetchOne('SELECT * FROM recipes WHERE key="%s"'%recipeKey)
def getIngredients(recipeKey):
"""Get all ingredients for a recipe"""
return queryDbFetchAll('SELECT * FROM recipeAmount WHERE recipeKey="%s"'%recipeKey)
def getNextKey():
"""Get next number for key"""
currentHighKey = queryDbFetchOne('SELECT key FROM recipes ORDER BY key DESC')
if currentHighKey is None:
print "IS none %s"%currentHighKey
currentHighKey = 0
else:
currentHighKey = int(currentHighKey[0])
return currentHighKey +1
def insertIntoDb(table, names, values):
"""Insert into database"""
if len(values) != len(names):
return None
query = 'INSERT INTO %s (%s) VALUES(%s)'%(table, ', '.join(names), ', '.join(values))
rowId = None
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
finally:
return rowId
def doRawQuery(query):
"""Do a raw query"""
rowId = None
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
finally:
return rowId
def updateDb(table, names, values, where):
"""Update row in table"""
if len(values) != len(names):
return None
query = 'UPDATE %s SET '%(table)
qPairs = []
for name, value in zip(names,values):
qPairs.append('%s=%s'%(name,value))
query += ', '.join(x for x in qPairs)
query += ' %s'%where
rowId = None
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
finally:
return rowId
# return redirect('login', code=304)
def deleteAmount(recipeKey):
query = 'DELETE FROM recipeAmount WHERE recipeKey=%s'%recipeKey
try:
db = get_db()
cur = db.cursor()
cur = get_db().cursor()
cur.execute(query)
db.commit()
rowId = cur.lastrowid
except:
db.rollback()
msg = "error in delete operation"
print msg
finally:
return rowId
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def displayRecipe(recipe):
values = {'key':recipe['key'],
'title': recipe['title'],
'instructions': recipe['instructions'],
'portions': recipe['portions'],
'ingredients': getIngredients(recipe['key']),
'pageId': 'displayRecipe',
'popupMenuId': 'popupMenuId%d'%randint(1, 1048)
}
return render_template('displayRecipe_template.html', **values)
def getFridgeJSON():
fridgeContent = queryDbFetchAll('SELECT key, title, fridge.portions AS portions FROM recipes INNER JOIN fridge ON recipes.key = fridge.recipeKey')
fridgeJson = []
for row in fridgeContent:
rowJson = {}
for key in row.keys():
rowJson[key] = row[key]
fridgeJson.append(rowJson)
return json.dumps(fridgeJson)
# Update fridge content
def dobackup(name):
dbF = open(os.path.join(DBBACKUPPATH, name), 'w')
con = get_db()
dbF.write('\n'.join(con.iterdump()).encode('utf8'))
dbF.close()
if __name__ == "__main__":
# import logging
# file_handler = RotatingFileHandler('/tmp/receptakuten.log', bakupCount=5)
# file_handler.setLevel(logging.WARNING)
# app.logger.addHandler(file_handler)
app.run(host="0.0.0.0", debug=True)
# app.run(debug=True)
| 33 | 150 | 0.601373 |
406a21613d9b1dbc55f543cfe42bc9ef9b68a79c | 1,749 | py | Python | tests/bugs/core_2678_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/bugs/core_2678_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/bugs/core_2678_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: bugs.core_2678
# title: Full outer join cannot use available indices (very slow execution)
# decription:
# tracker_id: CORE-2678
# min_versions: ['3.0']
# versions: 3.0
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 3.0
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
create table td_data1 (
c1 varchar(20) character set win1251 not null collate win1251,
c2 integer not null,
c3 date not null,
d1 float not null
);
create index idx_td_data1 on td_data1(c1,c2,c3);
commit;
create table td_data2 (
c1 varchar(20) character set win1251 not null collate win1251,
c2 integer not null,
c3 date not null,
d2 float not null
);
create index idx_td_data2 on td_data2(c1,c2,c3);
commit;
set planonly;
select
d1.c1, d2.c1,
d1.c2, d2.c2,
d1.c3, d2.c3,
coalesce(sum(d1.d1), 0) t1,
coalesce(sum(d2.d2), 0) t2
from td_data1 d1
full join td_data2 d2
on
d2.c1 = d1.c1
and d2.c2 = d1.c2
and d2.c3 = d1.c3
group by
d1.c1, d2.c1,
d1.c2, d2.c2,
d1.c3, d2.c3;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
PLAN SORT (JOIN (JOIN (D2 NATURAL, D1 INDEX (IDX_TD_DATA1)), JOIN (D1 NATURAL, D2 INDEX (IDX_TD_DATA2))))
"""
| 23.958904 | 109 | 0.619211 |
406ae0237fa650007fb4d1f31a942b053762212b | 962 | py | Python | application/model/entity/category.py | UniversidadeDeVassouras/labproghiper-2020.1-MatheusTelles-p1 | d0d81fc82d031f7add9e38add765aad0c404ee35 | [
"Apache-2.0"
] | 1 | 2020-07-15T14:23:30.000Z | 2020-07-15T14:23:30.000Z | application/model/entity/category.py | UniversidadeDeVassouras/labproghiper-2020.1-MatheusTelles-p1 | d0d81fc82d031f7add9e38add765aad0c404ee35 | [
"Apache-2.0"
] | null | null | null | application/model/entity/category.py | UniversidadeDeVassouras/labproghiper-2020.1-MatheusTelles-p1 | d0d81fc82d031f7add9e38add765aad0c404ee35 | [
"Apache-2.0"
] | null | null | null | from flask import current_app | 24.666667 | 60 | 0.613306 |
406b1ddec2cc13a47e3515d6e9a2b41aa445fd1b | 76 | py | Python | cookietemple/create/templates/cli/cli_python/{{ cookiecutter.project_slug_no_hyphen }}/tests/__init__.py | e2jk/cookietemple | 86af5622cdabe9ae446048536571898716939f29 | [
"Apache-2.0"
] | 117 | 2020-11-23T02:07:23.000Z | 2022-03-21T16:14:53.000Z | cookietemple/create/templates/cli/cli_python/{{ cookiecutter.project_slug_no_hyphen }}/tests/__init__.py | e2jk/cookietemple | 86af5622cdabe9ae446048536571898716939f29 | [
"Apache-2.0"
] | 226 | 2020-10-19T19:58:13.000Z | 2022-03-27T18:54:30.000Z | cookietemple/create/templates/cli/cli_python/{{ cookiecutter.project_slug_no_hyphen }}/tests/__init__.py | e2jk/cookietemple | 86af5622cdabe9ae446048536571898716939f29 | [
"Apache-2.0"
] | 9 | 2020-11-24T12:45:10.000Z | 2022-03-13T15:58:23.000Z | """Test suite for the {{ cookiecutter.project_slug_no_hyphen }} package."""
| 38 | 75 | 0.736842 |
406bcb88801f876f8613c7d8e41183ccf8efc7dd | 153 | py | Python | bricks/ev3dev/modules/pybricks/robotics.py | ZPhilo/pybricks-micropython | bf3072b6f7dd87b60e50d7c2130ca3c393a5709f | [
"MIT"
] | 115 | 2020-06-15T16:43:14.000Z | 2022-03-21T21:11:57.000Z | bricks/ev3dev/modules/pybricks/robotics.py | ZPhilo/pybricks-micropython | bf3072b6f7dd87b60e50d7c2130ca3c393a5709f | [
"MIT"
] | 83 | 2020-06-17T17:19:29.000Z | 2022-03-08T18:50:35.000Z | bricks/ev3dev/modules/pybricks/robotics.py | BertLindeman/pybricks-micropython | 8f22a99551100e66ddf08d014d9f442f22b33b4d | [
"MIT"
] | 40 | 2020-06-15T18:36:39.000Z | 2022-03-28T13:22:43.000Z | # SPDX-License-Identifier: MIT
# Copyright (c) 2018-2020 The Pybricks Authors
"""Pybricks robotics module."""
from _pybricks.robotics import DriveBase
| 21.857143 | 46 | 0.771242 |
406bff6901669314a484753b5d5e8d18397cb7b2 | 3,693 | py | Python | flask-app/web_app/storage_manager/storage_manager.py | PetrMokrov/back_end_project | 4dd58d61e637d10872fe58a154dc89f6d0829d94 | [
"MIT"
] | null | null | null | flask-app/web_app/storage_manager/storage_manager.py | PetrMokrov/back_end_project | 4dd58d61e637d10872fe58a154dc89f6d0829d94 | [
"MIT"
] | null | null | null | flask-app/web_app/storage_manager/storage_manager.py | PetrMokrov/back_end_project | 4dd58d61e637d10872fe58a154dc89f6d0829d94 | [
"MIT"
] | 1 | 2019-04-02T12:30:13.000Z | 2019-04-02T12:30:13.000Z | #!/usr/bin/env python
import psycopg2
import time
from ..models import User
| 33.572727 | 107 | 0.479285 |
406c19e470ed1397c6d2535e8a38599b7798d3a3 | 2,906 | py | Python | custom/ahex.py | piyush1104/ColorHelper | 7321cc2642f82c701e3c9c1ff1ebdad3a8ff19dc | [
"MIT"
] | null | null | null | custom/ahex.py | piyush1104/ColorHelper | 7321cc2642f82c701e3c9c1ff1ebdad3a8ff19dc | [
"MIT"
] | null | null | null | custom/ahex.py | piyush1104/ColorHelper | 7321cc2642f82c701e3c9c1ff1ebdad3a8ff19dc | [
"MIT"
] | null | null | null | """Custon color that looks for colors of format `#RRGGBBAA` as `#AARRGGBB`."""
from coloraide.css.colors import Color, SRGB
from coloraide.colors import _parse as parse
from coloraide import util
import copy
import re
class ColorAlphaHex(Color):
"""Color object whose sRGB color space looks for colors of format `#RRGGBBAA` as `#AARRGGBB`."""
CS_MAP = copy.copy(Color.CS_MAP)
CS_MAP["srgb"] = ASRGB
| 33.022727 | 100 | 0.547144 |
406c1c0028a84aba8bcd01a2421dbf11b583f400 | 2,115 | py | Python | source_code/terrain.py | Wiolarz/Console_PY_dungeon | cbf3b9a68251b9ce620aac1f4ca36361160186ea | [
"Apache-2.0"
] | null | null | null | source_code/terrain.py | Wiolarz/Console_PY_dungeon | cbf3b9a68251b9ce620aac1f4ca36361160186ea | [
"Apache-2.0"
] | 2 | 2021-11-29T16:26:03.000Z | 2021-11-29T16:27:14.000Z | source_code/terrain.py | Wiolarz/Console_PY_dungeon | cbf3b9a68251b9ce620aac1f4ca36361160186ea | [
"Apache-2.0"
] | null | null | null | import random
import jobs
import balance
from economy import roman_numbers
| 27.467532 | 113 | 0.613239 |
406d45c5b1e3edd5a8eec1e610e28a22eb3881b2 | 2,190 | py | Python | entrepreneurial_property/models/scientificpark.py | CzechInvest/ciis | c6102598f564a717472e5e31e7eb894bba2c8104 | [
"MIT"
] | 1 | 2019-05-26T22:24:01.000Z | 2019-05-26T22:24:01.000Z | entrepreneurial_property/models/scientificpark.py | CzechInvest/ciis | c6102598f564a717472e5e31e7eb894bba2c8104 | [
"MIT"
] | 6 | 2019-01-22T14:53:43.000Z | 2020-09-22T16:20:28.000Z | entrepreneurial_property/models/scientificpark.py | CzechInvest/ciis | c6102598f564a717472e5e31e7eb894bba2c8104 | [
"MIT"
] | null | null | null | from django.db import models
from .media import Water
from .media import Electricity
from .media import Gas
from .media import WasteWater
from .media import Telecommunication
from .generic import Attachment
from .generic import Photo
from .generic import Location as EstateLocation
from cigeo.models import GenericNote as EstateNote
| 20.660377 | 57 | 0.696347 |
406e0a83e413ef1e4bba7c5add21f6292e7188e7 | 2,328 | py | Python | pusion/input_output/file_input_output.py | IPVS-AS/pusion | 58ef24b602f611192430f6005ecf5305f878f412 | [
"MIT"
] | 5 | 2021-07-24T16:05:12.000Z | 2022-01-21T15:06:03.000Z | pusion/input_output/file_input_output.py | IPVS-AS/pusion | 58ef24b602f611192430f6005ecf5305f878f412 | [
"MIT"
] | null | null | null | pusion/input_output/file_input_output.py | IPVS-AS/pusion | 58ef24b602f611192430f6005ecf5305f878f412 | [
"MIT"
] | 2 | 2021-07-24T16:05:14.000Z | 2022-03-25T21:24:40.000Z | import json
import ntpath
import shutil
from pathlib import Path
import pickle5
def load_pickle_files_as_data(file_paths):
"""
Load pickle files containing decision outputs as an data array.
:param file_paths: A List of file paths to the individual pickle files.
:return: A data array.
"""
data = []
for file_path in file_paths:
with (open(file_path, "rb")) as handle:
data.append(pickle5.load(handle))
return data
def dump_pusion_data(data, file_path):
"""
Dump classification output data to the given file using pickle.
:param data: A data dictionary.
:param file_path: Location of the output pickle file.
"""
with open(file_path, "wb") as handle:
pickle5.dump(data, handle, protocol=pickle5.HIGHEST_PROTOCOL)
def dump_data_as_txt(data, name, identifier):
"""
Dump a data dictionary to the JSON file for a given evaluation unit.
:param data: A data dictionary.
:param name: The file name.
:param identifier: The identifier of the current evaluation unit (e.g. date/time).
"""
directory = "res/eval_" + identifier
Path(directory).mkdir(parents=True, exist_ok=True)
with open(directory + "/" + name + ".txt", 'w') as file:
file.write(json.dumps(data, indent=4))
def save(plot_instance, name, identifier):
"""
Save the plot instance for a given evaluation unit to the SVG and the PDF file, respectively.
:param plot_instance: `matplotlib.pyplot`-instance.
:param name: The file name.
:param identifier: The identifier of the current evaluation unit (e.g. date/time).
"""
directory = "res/eval_" + identifier
Path(directory).mkdir(parents=True, exist_ok=True)
plot_instance.savefig(directory + "/" + name + ".svg", bbox_inches="tight")
plot_instance.savefig(directory + "/" + name + ".pdf", bbox_inches="tight")
def save_evaluator(file, identifier):
"""
Save the evaluation script for a given evaluation unit.
:param file: The Python file. (E.g. referenced by __file__).
:param identifier: The identifier of the current evaluation unit (e.g. date/time).
"""
directory = "res/eval_" + identifier
Path(directory).mkdir(parents=True, exist_ok=True)
shutil.copy(file, directory + "/" + ntpath.basename(file) + ".txt")
| 32.333333 | 97 | 0.681701 |
407208de4a5ad6967ea27d59e0496b7b2dfa6fe5 | 747 | py | Python | meiduo_mall/meiduo_mall/apps/meiduo_admin/views/spus.py | aGrass0825/meiduo_project | 78c560c1e9a3205d4958ddbe798cd0ab2be41830 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/meiduo_admin/views/spus.py | aGrass0825/meiduo_project | 78c560c1e9a3205d4958ddbe798cd0ab2be41830 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/apps/meiduo_admin/views/spus.py | aGrass0825/meiduo_project | 78c560c1e9a3205d4958ddbe798cd0ab2be41830 | [
"MIT"
] | null | null | null | from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAdminUser
from goods.models import SPU, SPUSpecification
from meiduo_admin.serializers.spus import SPUSimpleSerializer, SPUSpecSerializer
# GET/meiduo_admin/goods/(?P<pk>\d+)/specs/
| 24.096774 | 80 | 0.749665 |
4072139f6fa73549f4c92cc0b2aa6d9bd1e96911 | 1,172 | py | Python | Scientific Computing with Python/Probability Calculator/prob_calculator.py | Fradxyz/FCCProjects | f337ebdfb86605107e0b85d9e83e88ec7ed60778 | [
"MIT"
] | null | null | null | Scientific Computing with Python/Probability Calculator/prob_calculator.py | Fradxyz/FCCProjects | f337ebdfb86605107e0b85d9e83e88ec7ed60778 | [
"MIT"
] | null | null | null | Scientific Computing with Python/Probability Calculator/prob_calculator.py | Fradxyz/FCCProjects | f337ebdfb86605107e0b85d9e83e88ec7ed60778 | [
"MIT"
] | null | null | null | # Hacked by Ry2uko :D
import copy
import random
# Consider using the modules imported above.
if __name__ == '__main__':
# Test here
pass | 26.044444 | 70 | 0.551195 |
40738ad4ddc2dca3384f1a7a4b98ec684eed9a5c | 1,611 | py | Python | src/frames/add_quantity_frame.py | GolovPavel/ValueConverter | 8492f100667af49fe4bf06eaf0de660513424252 | [
"MIT"
] | 1 | 2020-09-22T17:10:21.000Z | 2020-09-22T17:10:21.000Z | src/frames/add_quantity_frame.py | GolovPavel/ValueConverter | 8492f100667af49fe4bf06eaf0de660513424252 | [
"MIT"
] | 1 | 2020-03-06T21:18:10.000Z | 2020-03-06T21:18:10.000Z | src/frames/add_quantity_frame.py | GolovPavel/ValueConverter | 8492f100667af49fe4bf06eaf0de660513424252 | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter.messagebox import showerror
from constants.frames import MAIN_FRAME_NAME
from util import add_new_quantity
| 33.5625 | 116 | 0.666667 |
4073fadf1987f151aaa0076f63a670fabd02b58e | 226 | py | Python | setup.py | vwxyzjn/pysc2gym | 7c43e55a8f48be77f53332b73fda7635e6063589 | [
"MIT"
] | 6 | 2020-09-23T21:31:48.000Z | 2022-03-14T23:59:35.000Z | setup.py | vwxyzjn/pysc2gym | 7c43e55a8f48be77f53332b73fda7635e6063589 | [
"MIT"
] | 2 | 2022-01-13T03:48:47.000Z | 2022-03-12T00:58:26.000Z | setup.py | vwxyzjn/pysc2gym | 7c43e55a8f48be77f53332b73fda7635e6063589 | [
"MIT"
] | 1 | 2021-06-28T14:17:11.000Z | 2021-06-28T14:17:11.000Z | from setuptools import setup
import versioneer
setup(name='gym_pysc2',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
install_requires=['gym'] # And any other dependencies foo needs
) | 28.25 | 70 | 0.738938 |
40747f1fe0cf14a0bae5770661298c543ddc7ac6 | 1,395 | py | Python | Compressed downloads/server.py | Aldair47x/aa | ac49239ff94ec6735b316606482dc366ae52bfe8 | [
"MIT"
] | null | null | null | Compressed downloads/server.py | Aldair47x/aa | ac49239ff94ec6735b316606482dc366ae52bfe8 | [
"MIT"
] | null | null | null | Compressed downloads/server.py | Aldair47x/aa | ac49239ff94ec6735b316606482dc366ae52bfe8 | [
"MIT"
] | null | null | null | import zmq
import sys
import os
import math
if __name__ == '__main__':
main()
| 26.320755 | 72 | 0.496774 |
40757d236a917305a24dbe63896ecb49966f293c | 1,618 | py | Python | metric_learn/nca.py | ogrisel/metric-learn | fb6733c190911d2c408bd7f0b8c9b54ff005fa8d | [
"MIT"
] | null | null | null | metric_learn/nca.py | ogrisel/metric-learn | fb6733c190911d2c408bd7f0b8c9b54ff005fa8d | [
"MIT"
] | null | null | null | metric_learn/nca.py | ogrisel/metric-learn | fb6733c190911d2c408bd7f0b8c9b54ff005fa8d | [
"MIT"
] | 2 | 2017-08-02T08:57:50.000Z | 2020-03-20T13:32:54.000Z | """
Neighborhood Components Analysis (NCA)
Ported to Python from https://github.com/vomjom/nca
"""
from __future__ import absolute_import
import numpy as np
from six.moves import xrange
from sklearn.utils.validation import check_X_y
from .base_metric import BaseMetricLearner
EPS = np.finfo(float).eps
| 26.966667 | 74 | 0.600742 |
4075a5272343f25994c7b713935ff6736a8b4fb7 | 2,923 | py | Python | rl_repr/batch_rl/evaluation.py | xxdreck/google-research | dac724bc2b9362d65c26747a8754504fe4c615f8 | [
"Apache-2.0"
] | 23,901 | 2018-10-04T19:48:53.000Z | 2022-03-31T21:27:42.000Z | rl_repr/batch_rl/evaluation.py | xxdreck/google-research | dac724bc2b9362d65c26747a8754504fe4c615f8 | [
"Apache-2.0"
] | 891 | 2018-11-10T06:16:13.000Z | 2022-03-31T10:42:34.000Z | rl_repr/batch_rl/evaluation.py | admariner/google-research | 7cee4b22b925581d912e8d993625c180da2a5a4f | [
"Apache-2.0"
] | 6,047 | 2018-10-12T06:31:02.000Z | 2022-03-31T13:59:28.000Z | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Policy evaluation."""
import typing
import tensorflow.compat.v2 as tf
def evaluate(
env,
policy,
num_episodes = 10,
ctx_length = None,
embed_training_window = None,
state_mask_fn = None, # pylint: disable=g-bare-generic
):
"""Evaluates the policy.
Args:
env: Environment to evaluate the policy on.
policy: Policy to evaluate.
num_episodes: A number of episodes to average the policy on.
ctx_length: number of previous steps to compute context from.
embed_training_window: window size used during embed training.
state_mask_fn: state masking function for partially obs envs.
Returns:
Averaged reward and a total number of steps.
"""
total_timesteps = 0
total_returns = 0.0
for _ in range(num_episodes):
timestep = env.reset()
if ctx_length:
states = [apply_mask(timestep.observation) for _ in range(ctx_length)]
actions = [
tf.zeros(policy.action_spec.shape)[None, :] for _ in range(ctx_length)
]
rewards = [[0.] for _ in range(ctx_length)]
latent_action = None
i = 0
while not timestep.is_last():
if embed_training_window and (i % embed_training_window == 0 or
embed_training_window <= 2):
latent_action = None
if ctx_length:
states.append(apply_mask(timestep.observation))
if len(states) > ctx_length:
states.pop(0)
actions.pop(0)
rewards.pop(0)
action = policy.act(
tf.stack(states, axis=1),
actions=tf.stack(actions, axis=1),
rewards=tf.stack(rewards, axis=1))
actions.append(action)
else:
if embed_training_window:
action, latent_action = policy.act(
apply_mask(timestep.observation), latent_action=latent_action)
else:
action = policy.act(apply_mask(timestep.observation))
timestep = env.step(action)
if ctx_length:
rewards.append(timestep.reward)
total_returns += timestep.reward[0]
total_timesteps += 1
i += 1
return total_returns / num_episodes, total_timesteps / num_episodes
| 31.771739 | 80 | 0.671912 |
4075b24c28e51db8658934eede3f2eedb744d8c0 | 4,721 | py | Python | src/nwb_conversion_tools/datainterfaces/ecephys/intan/intandatainterface.py | ben-dichter-consulting/nwbn-conversion-tools | f5641317d2697a3916eeb54f74ce171ed65469ed | [
"BSD-3-Clause"
] | null | null | null | src/nwb_conversion_tools/datainterfaces/ecephys/intan/intandatainterface.py | ben-dichter-consulting/nwbn-conversion-tools | f5641317d2697a3916eeb54f74ce171ed65469ed | [
"BSD-3-Clause"
] | 6 | 2020-01-31T13:29:40.000Z | 2020-03-27T13:09:32.000Z | src/nwb_conversion_tools/datainterfaces/ecephys/intan/intandatainterface.py | ben-dichter-consulting/nwb-conversion-tools | f5641317d2697a3916eeb54f74ce171ed65469ed | [
"BSD-3-Clause"
] | 1 | 2019-11-24T05:08:06.000Z | 2019-11-24T05:08:06.000Z | """Authors: Cody Baker and Ben Dichter."""
from pathlib import Path
import spikeextractors as se
from pynwb.ecephys import ElectricalSeries
from ..baserecordingextractorinterface import BaseRecordingExtractorInterface
from ....utils import get_schema_from_hdmf_class, FilePathType
try:
from pyintan.intan import read_rhd, read_rhs
HAVE_PYINTAN = True
except ImportError:
HAVE_PYINTAN = False
INSTALL_MESSAGE = "Please install pyintan to use this extractor!"
| 44.121495 | 118 | 0.615336 |
40771f48cc35e55bf1ed0377d840f200b12f6982 | 739 | py | Python | Use.py | XtremeCoder1384/SongDownloader | 7bb06d7961ec699af8517cbd7cb4a1ec83d4fd02 | [
"MIT"
] | 1 | 2019-03-04T02:26:41.000Z | 2019-03-04T02:26:41.000Z | Use.py | XtremeCoder1384/SongDownloader | 7bb06d7961ec699af8517cbd7cb4a1ec83d4fd02 | [
"MIT"
] | 1 | 2018-12-20T02:32:35.000Z | 2019-03-11T12:51:15.000Z | Use.py | IngeniousCoder/SongDownloader | 7bb06d7961ec699af8517cbd7cb4a1ec83d4fd02 | [
"MIT"
] | null | null | null | import os
import youtube_dl
os.system("setup.bat")
playlist = input("Paste the Youtube Playlist URL Here.")
track = 1
print("""THIS TOOL WILL ATTEMPT TO DOWNLOAD THE FIRST 1000 SONGS IN THE QUEUE.\n
PLEASE DO NOT INTERRUPT THE TOOL.
YOU MAY CLOSE THE TOOL WHEN IT DISPLAYS "DONE!".
ALL DOWNLOADED SONGS WILL BE IN THE SAME DIRECTORY THIS FILE IS IN.
TO EXTRACT THEM, FILTER BY MP3.""")
for x in range(1000):
file = open("Downloader.bat","w")
file.write("youtube-dl -x --playlist-start {} --audio-format mp3 --playlist-end {} {}".format(str(track),str(track),playlist))
file.close
os.system("Downloader.bat")
track = track + 1
print("DONE! You may now close this window.")
| 36.95 | 129 | 0.663058 |
40776dbc5b7aba40a9cfd205d779833d8dd62541 | 1,903 | py | Python | site/manage.py | oaoouo/railgun | b09d276723976740841d8b8adf9cbf87a05cd970 | [
"MIT"
] | null | null | null | site/manage.py | oaoouo/railgun | b09d276723976740841d8b8adf9cbf87a05cd970 | [
"MIT"
] | null | null | null | site/manage.py | oaoouo/railgun | b09d276723976740841d8b8adf9cbf87a05cd970 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
manage.py
~~~~~~~~~
"""
import os
import sys
import shutil
import platform
from app import app
from gen import Gen
from flask_script import Manager
""""""
if (platform.python_version().split('.')[0] == '2'):
# reload(sys) is evil :)
reload(sys)
sys.setdefaultencoding('utf-8')
"""Git"""
git_url = app.config['GIT_URL']
git_branch = app.config['BRANCH']
manager = Manager(app)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'build':
_gen = Gen(app)
_gen.gen()
# update static resources
update_static_res()
elif len(sys.argv) > 1 and sys.argv[1] == 'first_upload':
first_upload()
elif len(sys.argv) > 1 and sys.argv[1] == 'other_upload':
other_upload()
else:
manager.run()
| 25.039474 | 71 | 0.60536 |
40780e501d35b1715806673a2e143e24f1727e1c | 3,152 | py | Python | tests/test_segmenters.py | edoarn/cv-models | 5fa7e50fd69f76b54611bb323b15610eeb1bb5cf | [
"MIT"
] | null | null | null | tests/test_segmenters.py | edoarn/cv-models | 5fa7e50fd69f76b54611bb323b15610eeb1bb5cf | [
"MIT"
] | 4 | 2021-04-23T12:05:45.000Z | 2021-04-25T11:38:01.000Z | tests/test_segmenters.py | edoarn/cv-models | 5fa7e50fd69f76b54611bb323b15610eeb1bb5cf | [
"MIT"
] | null | null | null | from typing import Any
import torch
import torch.nn as nn
from cvmodels.segmentation import unet, deeplab as dl
| 36.651163 | 115 | 0.703046 |
40788e305d7f2fee1abfae85125753bcd3fa071f | 10,981 | py | Python | bagua/torch_api/contrib/sync_batchnorm.py | mmathys/bagua | e17978690452318b65b317b283259f09c24d59bb | [
"MIT"
] | 635 | 2021-06-11T03:03:11.000Z | 2022-03-31T14:52:57.000Z | bagua/torch_api/contrib/sync_batchnorm.py | mmathys/bagua | e17978690452318b65b317b283259f09c24d59bb | [
"MIT"
] | 181 | 2021-06-10T12:27:19.000Z | 2022-03-31T04:08:19.000Z | bagua/torch_api/contrib/sync_batchnorm.py | shjwudp/bagua | 7e1b438e27e3119b23e472f5b9217a9862932bef | [
"MIT"
] | 71 | 2021-06-10T13:16:53.000Z | 2022-03-22T09:26:22.000Z | # Copyright (c) Uber Technologies, Inc. and its affiliates.
# Copyright (c) 2021 Kuaishou AI Platform & DS3 Lab.
#
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from distutils.version import LooseVersion
import torch
from torch.autograd.function import Function
import torch.nn.functional as F
from torch.nn.modules.batchnorm import _BatchNorm
import bagua.torch_api as bagua
from bagua.torch_api.communication import allgather, allreduce
# Backward compat for old PyTorch
if not hasattr(torch.jit, "unused"):
torch.jit.unused = lambda x: x
_SYNC_BN_V2 = LooseVersion(torch.__version__) >= LooseVersion("1.5.0") and LooseVersion(
torch.__version__
) <= LooseVersion("1.6.0")
_SYNC_BN_V3 = LooseVersion(torch.__version__) >= LooseVersion("1.6.0")
_SYNC_BN_V4 = LooseVersion(torch.__version__) >= LooseVersion("1.9.0")
| 38.128472 | 160 | 0.601858 |
407a65f9c4b9f958fde5ab42bad4bdd15788bb31 | 4,046 | py | Python | tests/test_classification_metric.py | DaveFClarke/ml_bias_checking | 90f67ebc602b6107042e6cbff3268051bb3b1c95 | [
"Apache-2.0"
] | 2 | 2021-07-31T20:52:37.000Z | 2022-02-15T21:05:17.000Z | tests/test_classification_metric.py | DaveFClarke/ml_bias_checking | 90f67ebc602b6107042e6cbff3268051bb3b1c95 | [
"Apache-2.0"
] | 2 | 2021-08-25T16:16:43.000Z | 2022-02-10T05:26:14.000Z | tests/test_classification_metric.py | DaveFClarke/ml_bias_checking | 90f67ebc602b6107042e6cbff3268051bb3b1c95 | [
"Apache-2.0"
] | 1 | 2019-05-21T15:31:24.000Z | 2019-05-21T15:31:24.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import pandas as pd
from aif360.datasets import BinaryLabelDataset
from aif360.metrics import ClassificationMetric
| 34.87931 | 85 | 0.505685 |
407b22ddf13dab3659fb801ada3a7cb31608cf9a | 200 | py | Python | PDA/extra_assignments/10.6. Dicts_ Countries and cities/solution/main.py | EMbeDS-education/StatsAndComputing20212022 | 971e418882b206a1b5606d15d222cef1a5a04834 | [
"MIT"
] | 2 | 2022-02-24T09:35:15.000Z | 2022-03-14T20:34:33.000Z | PDA/extra_assignments/10.6. Dicts_ Countries and cities/solution/main.py | GeorgiosArg/StatsAndComputing20212022 | 798d39af6aa5ef5eef49d5d6f43191351e8a49f3 | [
"MIT"
] | null | null | null | PDA/extra_assignments/10.6. Dicts_ Countries and cities/solution/main.py | GeorgiosArg/StatsAndComputing20212022 | 798d39af6aa5ef5eef49d5d6f43191351e8a49f3 | [
"MIT"
] | 2 | 2022-03-15T21:40:35.000Z | 2022-03-26T14:51:31.000Z | city_country = {}
for _ in range(int(input())):
country, *cities = input().split()
for city in cities:
city_country[city] = country
for _ in range(int(input())):
print(city_country[input()]) | 28.571429 | 36 | 0.665 |
407cd39412220721420002d2204aeef22618cb4c | 1,562 | py | Python | config.py | oyasr/mudawen | 6f0161ab783536d7c5d695225ef28ce4947a46e3 | [
"MIT"
] | null | null | null | config.py | oyasr/mudawen | 6f0161ab783536d7c5d695225ef28ce4947a46e3 | [
"MIT"
] | null | null | null | config.py | oyasr/mudawen | 6f0161ab783536d7c5d695225ef28ce4947a46e3 | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
load_dotenv()
basedir = os.path.abspath(os.path.dirname(__file__))
config = {
'development': DevConfig,
'testing': TestConfig,
'production': ProductionConfig,
'default': DevConfig
}
| 28.4 | 70 | 0.691421 |
407ce0ad1e21c01e8414bc4b63e17958aa42df9e | 998 | py | Python | experiments/async_tests/async_3.py | 10ks/py_utils | 54ce06dbd567b097deda1c7ef2d0a2265e5b243e | [
"MIT"
] | null | null | null | experiments/async_tests/async_3.py | 10ks/py_utils | 54ce06dbd567b097deda1c7ef2d0a2265e5b243e | [
"MIT"
] | null | null | null | experiments/async_tests/async_3.py | 10ks/py_utils | 54ce06dbd567b097deda1c7ef2d0a2265e5b243e | [
"MIT"
] | null | null | null | import asyncio
if __name__ == "__main__":
import time
s = time.perf_counter()
asyncio.run(main())
# Completing unfinished tasks (throws a warning)
# loop = asyncio.get_event_loop()
# loop.run_until_complete(main())
# pending = asyncio.Task.all_tasks()
# loop.run_until_complete(asyncio.gather(*pending))
elapsed = time.perf_counter() - s
print(f"{__file__} executed in {elapsed:0.2f} seconds.")
| 23.209302 | 80 | 0.607214 |
407d150b5548e5dc5c3decda923610fd51eb2141 | 1,438 | py | Python | vk_bot/mods/util/calculator.py | triangle1984/GLaDOS | 39dea7bf8043e791ef079ea1ac6616f95d5b5312 | [
"BSD-3-Clause"
] | 3 | 2019-12-12T05:48:34.000Z | 2020-12-07T19:23:41.000Z | vk_bot/mods/util/calculator.py | anar66/vk-bot | 39dea7bf8043e791ef079ea1ac6616f95d5b5312 | [
"BSD-3-Clause"
] | 1 | 2019-11-15T14:28:49.000Z | 2019-11-15T14:28:49.000Z | vk_bot/mods/util/calculator.py | triangle1984/vk-bot | 39dea7bf8043e791ef079ea1ac6616f95d5b5312 | [
"BSD-3-Clause"
] | 5 | 2019-11-20T14:20:30.000Z | 2022-02-05T10:37:01.000Z | # from vk_bot.core.modules.basicplug import BasicPlug
# import math
# class Calculator(BasicPlug):
# doc = ""
# command = ("",)
# def main(self):
# try:
# x = self.text[1]; x = int(x)
# encalc = self.text[2]; encalc = encalc.lower()
# y = self.text[3]; y = int(y)
# except:
# self.sendmsg(""" : / 2 + 2
# 2 , """)
# return
# if encalc == "+" or encalc == "":
# result = x + y
# elif encalc == "-" or encalc == "":
# result = x - y
# elif encalc == "*" or encalc == "":
# result = x * y
# elif encalc == "**" or encalc == "" or encalc == "^":
# if x > 999 or y > 999:
# return
# result = x ** y
# elif encalc == "":
# try:
# x / y
# except ZeroDivisionError:
# result = " ?"
# elif encalc == "":
# result = math.sqrt(x), math.sqrt(y)
# elif encalc == "":
# result = math.sin(x), math.sin(y)
# elif encalc == "":
# result = math.cos(x), math.cos(y)
# else:
# return
# self.sendmsg(f" : {result}")
| 36.871795 | 74 | 0.462448 |
407dc792a754cf8c4cf33cd4fb3c31fe49507ba3 | 9,201 | py | Python | sample-input/homogeneous/geometry.py | AI-Pranto/OpenMOC | 7f6ce4797aec20ddd916981a56a4ba54ffda9a06 | [
"MIT"
] | 97 | 2015-01-02T02:13:45.000Z | 2022-03-09T14:12:45.000Z | sample-input/homogeneous/geometry.py | AI-Pranto/OpenMOC | 7f6ce4797aec20ddd916981a56a4ba54ffda9a06 | [
"MIT"
] | 325 | 2015-01-07T17:43:14.000Z | 2022-02-21T17:22:00.000Z | sample-input/homogeneous/geometry.py | AI-Pranto/OpenMOC | 7f6ce4797aec20ddd916981a56a4ba54ffda9a06 | [
"MIT"
] | 73 | 2015-01-17T19:11:58.000Z | 2022-03-24T16:31:37.000Z | import openmoc
import openmoc.log as log
import openmoc.plotter as plotter
import openmoc.materialize as materialize
log.set_log_level('NORMAL')
###############################################################################
########################### Creating Materials ############################
###############################################################################
log.py_printf('NORMAL', 'Importing materials data from HDF5...')
materials = openmoc.materialize.load_from_hdf5('c5g7-mgxs.h5', '../')
###############################################################################
########################### Creating Surfaces #############################
###############################################################################
log.py_printf('NORMAL', 'Creating surfaces...')
xmin = openmoc.XPlane(x=-5.0, name='xmin')
xmax = openmoc.XPlane(x= 5.0, name='xmax')
ymin = openmoc.YPlane(y=-5.0, name='ymin')
ymax = openmoc.YPlane(y= 5.0, name='ymax')
zmin = openmoc.ZPlane(z=-5.0, name='zmin')
zmax = openmoc.ZPlane(z= 5.0, name='zmax')
xmin.setBoundaryType(openmoc.REFLECTIVE)
xmax.setBoundaryType(openmoc.REFLECTIVE)
ymin.setBoundaryType(openmoc.REFLECTIVE)
ymax.setBoundaryType(openmoc.REFLECTIVE)
zmin.setBoundaryType(openmoc.REFLECTIVE)
zmax.setBoundaryType(openmoc.REFLECTIVE)
###############################################################################
############################# Creating Cells ##############################
###############################################################################
log.py_printf('NORMAL', 'Creating cells...')
fuel = openmoc.Cell(name='fuel')
fuel.setFill(materials['UO2'])
moderator = openmoc.Cell(name='moderator')
moderator.setFill(materials['UO2'])
root_cell = openmoc.Cell(name='root cell')
root_cell.addSurface(halfspace=+1, surface=xmin)
root_cell.addSurface(halfspace=-1, surface=xmax)
root_cell.addSurface(halfspace=+1, surface=ymin)
root_cell.addSurface(halfspace=-1, surface=ymax)
root_cell.addSurface(halfspace=+1, surface=zmin)
root_cell.addSurface(halfspace=-1, surface=zmax)
###############################################################################
########################### Creating Universes ############################
###############################################################################
log.py_printf('NORMAL', 'Creating universes...')
fue_univ = openmoc.Universe(name='homogeneous fue cell')
fue_univ.addCell(fuel)
mod_univ = openmoc.Universe(name='homogeneous mod cell')
mod_univ.addCell(moderator)
root_universe = openmoc.Universe(name='root universe')
root_universe.addCell(root_cell)
###############################################################################
########################### Creating Lattices #############################
###############################################################################
log.py_printf('NORMAL', 'Creating simple 10 x 10 lattice...')
f = fue_univ
lattice = openmoc.Lattice(name='10x10 lattice')
lattice.setWidth(width_x=1.0, width_y=1.0, width_z=1.0)
lattice.setUniverses([[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]],
[[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f],
[f, f, f, f, f, f, f, f, f, f]]])
root_cell.setFill(lattice)
###############################################################################
########################## Creating the Geometry ##########################
###############################################################################
log.py_printf('NORMAL', 'Creating geometry...')
geometry = openmoc.Geometry()
geometry.setRootUniverse(root_universe)
geometry.initializeFlatSourceRegions()
| 46.705584 | 79 | 0.29236 |
407e66ad31400c201f52210276cc27484a563068 | 22,314 | py | Python | google/ads/google_ads/v5/__init__.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | 1 | 2021-04-09T04:28:47.000Z | 2021-04-09T04:28:47.000Z | google/ads/google_ads/v5/__init__.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v5/__init__.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import importlib
import sys
from google.ads.google_ads import util
if sys.version_info < (3, 6):
raise ImportError("This module requires Python 3.6 or later.")
_lazy_name_to_package_map = {
"account_budget_proposal_service_client": "google.ads.google_ads.v5.services",
"account_budget_service_client": "google.ads.google_ads.v5.services",
"account_link_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_asset_view_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_ad_service_client": "google.ads.google_ads.v5.services",
"ad_group_audience_view_service_client": "google.ads.google_ads.v5.services",
"ad_group_bid_modifier_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_service_client": "google.ads.google_ads.v5.services",
"ad_group_criterion_simulation_service_client": "google.ads.google_ads.v5.services",
"ad_group_extension_setting_service_client": "google.ads.google_ads.v5.services",
"ad_group_feed_service_client": "google.ads.google_ads.v5.services",
"ad_group_label_service_client": "google.ads.google_ads.v5.services",
"ad_group_service_client": "google.ads.google_ads.v5.services",
"ad_group_simulation_service_client": "google.ads.google_ads.v5.services",
"ad_parameter_service_client": "google.ads.google_ads.v5.services",
"ad_schedule_view_service_client": "google.ads.google_ads.v5.services",
"ad_service_client": "google.ads.google_ads.v5.services",
"age_range_view_service_client": "google.ads.google_ads.v5.services",
"asset_service_client": "google.ads.google_ads.v5.services",
"batch_job_service_client": "google.ads.google_ads.v5.services",
"bidding_strategy_service_client": "google.ads.google_ads.v5.services",
"billing_setup_service_client": "google.ads.google_ads.v5.services",
"campaign_asset_service_client": "google.ads.google_ads.v5.services",
"campaign_audience_view_service_client": "google.ads.google_ads.v5.services",
"campaign_bid_modifier_service_client": "google.ads.google_ads.v5.services",
"campaign_budget_service_client": "google.ads.google_ads.v5.services",
"campaign_criterion_service_client": "google.ads.google_ads.v5.services",
"campaign_criterion_simulation_service_client": "google.ads.google_ads.v5.services",
"campaign_draft_service_client": "google.ads.google_ads.v5.services",
"campaign_experiment_service_client": "google.ads.google_ads.v5.services",
"campaign_extension_setting_service_client": "google.ads.google_ads.v5.services",
"campaign_feed_service_client": "google.ads.google_ads.v5.services",
"campaign_label_service_client": "google.ads.google_ads.v5.services",
"campaign_service_client": "google.ads.google_ads.v5.services",
"campaign_shared_set_service_client": "google.ads.google_ads.v5.services",
"carrier_constant_service_client": "google.ads.google_ads.v5.services",
"change_status_service_client": "google.ads.google_ads.v5.services",
"click_view_service_client": "google.ads.google_ads.v5.services",
"conversion_action_service_client": "google.ads.google_ads.v5.services",
"conversion_adjustment_upload_service_client": "google.ads.google_ads.v5.services",
"conversion_upload_service_client": "google.ads.google_ads.v5.services",
"currency_constant_service_client": "google.ads.google_ads.v5.services",
"custom_interest_service_client": "google.ads.google_ads.v5.services",
"customer_client_link_service_client": "google.ads.google_ads.v5.services",
"customer_client_service_client": "google.ads.google_ads.v5.services",
"customer_extension_setting_service_client": "google.ads.google_ads.v5.services",
"customer_feed_service_client": "google.ads.google_ads.v5.services",
"customer_label_service_client": "google.ads.google_ads.v5.services",
"customer_manager_link_service_client": "google.ads.google_ads.v5.services",
"customer_negative_criterion_service_client": "google.ads.google_ads.v5.services",
"customer_service_client": "google.ads.google_ads.v5.services",
"detail_placement_view_service_client": "google.ads.google_ads.v5.services",
"display_keyword_view_service_client": "google.ads.google_ads.v5.services",
"distance_view_service_client": "google.ads.google_ads.v5.services",
"domain_category_service_client": "google.ads.google_ads.v5.services",
"dynamic_search_ads_search_term_view_service_client": "google.ads.google_ads.v5.services",
"expanded_landing_page_view_service_client": "google.ads.google_ads.v5.services",
"extension_feed_item_service_client": "google.ads.google_ads.v5.services",
"feed_item_service_client": "google.ads.google_ads.v5.services",
"feed_item_target_service_client": "google.ads.google_ads.v5.services",
"feed_mapping_service_client": "google.ads.google_ads.v5.services",
"feed_placeholder_view_service_client": "google.ads.google_ads.v5.services",
"feed_service_client": "google.ads.google_ads.v5.services",
"gender_view_service_client": "google.ads.google_ads.v5.services",
"geo_target_constant_service_client": "google.ads.google_ads.v5.services",
"geographic_view_service_client": "google.ads.google_ads.v5.services",
"google_ads_field_service_client": "google.ads.google_ads.v5.services",
"google_ads_service_client": "google.ads.google_ads.v5.services",
"group_placement_view_service_client": "google.ads.google_ads.v5.services",
"hotel_group_view_service_client": "google.ads.google_ads.v5.services",
"hotel_performance_view_service_client": "google.ads.google_ads.v5.services",
"income_range_view_service_client": "google.ads.google_ads.v5.services",
"invoice_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_ad_group_keyword_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_ad_group_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_campaign_keyword_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_campaign_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_idea_service_client": "google.ads.google_ads.v5.services",
"keyword_plan_service_client": "google.ads.google_ads.v5.services",
"keyword_view_service_client": "google.ads.google_ads.v5.services",
"label_service_client": "google.ads.google_ads.v5.services",
"landing_page_view_service_client": "google.ads.google_ads.v5.services",
"language_constant_service_client": "google.ads.google_ads.v5.services",
"location_view_service_client": "google.ads.google_ads.v5.services",
"managed_placement_view_service_client": "google.ads.google_ads.v5.services",
"media_file_service_client": "google.ads.google_ads.v5.services",
"merchant_center_link_service_client": "google.ads.google_ads.v5.services",
"mobile_app_category_constant_service_client": "google.ads.google_ads.v5.services",
"mobile_device_constant_service_client": "google.ads.google_ads.v5.services",
"offline_user_data_job_service_client": "google.ads.google_ads.v5.services",
"operating_system_version_constant_service_client": "google.ads.google_ads.v5.services",
"paid_organic_search_term_view_service_client": "google.ads.google_ads.v5.services",
"parental_status_view_service_client": "google.ads.google_ads.v5.services",
"payments_account_service_client": "google.ads.google_ads.v5.services",
"product_bidding_category_constant_service_client": "google.ads.google_ads.v5.services",
"product_group_view_service_client": "google.ads.google_ads.v5.services",
"reach_plan_service_client": "google.ads.google_ads.v5.services",
"recommendation_service_client": "google.ads.google_ads.v5.services",
"remarketing_action_service_client": "google.ads.google_ads.v5.services",
"search_term_view_service_client": "google.ads.google_ads.v5.services",
"shared_criterion_service_client": "google.ads.google_ads.v5.services",
"shared_set_service_client": "google.ads.google_ads.v5.services",
"shopping_performance_view_service_client": "google.ads.google_ads.v5.services",
"third_party_app_analytics_link_service_client": "google.ads.google_ads.v5.services",
"topic_constant_service_client": "google.ads.google_ads.v5.services",
"topic_view_service_client": "google.ads.google_ads.v5.services",
"user_data_service_client": "google.ads.google_ads.v5.services",
"user_interest_service_client": "google.ads.google_ads.v5.services",
"user_list_service_client": "google.ads.google_ads.v5.services",
"user_location_view_service_client": "google.ads.google_ads.v5.services",
"video_service_client": "google.ads.google_ads.v5.services",
"account_budget_proposal_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"account_budget_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"account_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_asset_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_ad_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_audience_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_bid_modifier_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_criterion_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_group_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_parameter_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_schedule_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"ad_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"age_range_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"asset_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"batch_job_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"bidding_strategy_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"billing_setup_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_asset_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_audience_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_bid_modifier_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_budget_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_criterion_simulation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_draft_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_experiment_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"campaign_shared_set_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"carrier_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"change_status_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"click_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_action_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_adjustment_upload_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"conversion_upload_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"currency_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"custom_interest_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_client_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_client_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_extension_setting_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_manager_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_negative_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"customer_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"detail_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"display_keyword_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"distance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"domain_category_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"dynamic_search_ads_search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"expanded_landing_page_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"extension_feed_item_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_item_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_item_target_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_mapping_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_placeholder_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"feed_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"gender_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"geo_target_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"geographic_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"google_ads_field_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"google_ads_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"group_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"hotel_group_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"hotel_performance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"income_range_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"invoice_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_ad_group_keyword_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_ad_group_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_campaign_keyword_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_campaign_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_idea_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_plan_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"keyword_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"label_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"landing_page_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"language_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"location_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"managed_placement_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"media_file_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"merchant_center_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"mobile_app_category_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"mobile_device_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"offline_user_data_job_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"operating_system_version_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"paid_organic_search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"parental_status_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"payments_account_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"product_bidding_category_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"product_group_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"reach_plan_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"recommendation_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"remarketing_action_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"search_term_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shared_criterion_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shared_set_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"shopping_performance_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"third_party_app_analytics_link_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"topic_constant_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"topic_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_data_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_interest_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_list_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"user_location_view_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
"video_service_grpc_transport": "google.ads.google_ads.v5.services.transports",
}
# Background on how this behaves: https://www.python.org/dev/peps/pep-0562/
def __getattr__(name): # Requires Python >= 3.7
"""Lazily perform imports and class definitions on first demand."""
if name == "__all__":
converted = (
util.convert_snake_case_to_upper_case(key)
for key in _lazy_name_to_package_map
)
all_names = sorted(converted)
globals()["__all__"] = all_names
return all_names
elif name.endswith("Transport"):
module = __getattr__(util.convert_upper_case_to_snake_case(name))
sub_mod_class = getattr(module, name)
klass = type(name, (sub_mod_class,), {"__doc__": sub_mod_class.__doc__})
globals()[name] = klass
return klass
elif name.endswith("ServiceClient"):
module = __getattr__(util.convert_upper_case_to_snake_case(name))
enums = __getattr__("enums")
sub_mod_class = getattr(module, name)
klass = type(
name,
(sub_mod_class,),
{"__doc__": sub_mod_class.__doc__, "enums": enums},
)
globals()[name] = klass
return klass
elif name == "enums":
path = "google.ads.google_ads.v5.services.enums"
module = importlib.import_module(path)
globals()[name] = module
return module
elif name == "types":
path = "google.ads.google_ads.v5.types"
module = importlib.import_module(path)
globals()[name] = module
return module
elif name in _lazy_name_to_package_map:
module = importlib.import_module(
f"{_lazy_name_to_package_map[name]}.{name}"
)
globals()[name] = module
return module
else:
raise AttributeError(f"unknown sub-module {name!r}.")
if not sys.version_info >= (3, 7):
from pep562 import Pep562
Pep562(__name__)
| 71.519231 | 113 | 0.791118 |
407f96b82e23f251ebe7b0d09ba3c8416a7e9d98 | 5,279 | py | Python | PNN/model.py | jingxiufenghua/rec-model | 23204f70fc1bf384d3cdd0cc85e43117d3394074 | [
"MIT"
] | 1,323 | 2020-08-24T02:34:25.000Z | 2022-03-31T06:03:28.000Z | PNN/model.py | yiLinMaster/Recommender-System-with-TF2.0 | cfc7b3fbd4ba2d9157a78938e6bdaeba7df82822 | [
"MIT"
] | 65 | 2020-08-25T06:07:41.000Z | 2022-03-18T20:10:53.000Z | PNN/model.py | yiLinMaster/Recommender-System-with-TF2.0 | cfc7b3fbd4ba2d9157a78938e6bdaeba7df82822 | [
"MIT"
] | 395 | 2020-08-24T00:57:08.000Z | 2022-03-31T12:41:13.000Z | """
Created on July 20, 2020
Updated on May 19, 2021
model: Product-based Neural Networks for User Response Prediction
@author: Ziyao Geng(zggzy1996@163.com)
"""
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.regularizers import l2
from tensorflow.keras.layers import Embedding, Dense, Layer, Dropout, Input
from modules import DNN
| 47.990909 | 114 | 0.544232 |
407fa3643267388dca73bf3b3496b61a0c5f9491 | 314 | py | Python | exercicio3.py | DrokaGit/-infosatc-lp-avaliativo-02 | 6bb78ce84ac325c866201ff538f426d6e7a72ab5 | [
"MIT"
] | null | null | null | exercicio3.py | DrokaGit/-infosatc-lp-avaliativo-02 | 6bb78ce84ac325c866201ff538f426d6e7a72ab5 | [
"MIT"
] | null | null | null | exercicio3.py | DrokaGit/-infosatc-lp-avaliativo-02 | 6bb78ce84ac325c866201ff538f426d6e7a72ab5 | [
"MIT"
] | null | null | null | nume1 = int(input("Digite um numero"))
nume2 = int(input("Digite um numero"))
nume3 = int(input("Digite um numero"))
nume4 = int(input("Digite um numero"))
nume5 = int(input("Digite um numero"))
table = [nume1,nume2,nume3,nume4,nume5]
tableM = (float((nume1 + nume2 + nume3 + nume4 + nume5)))
print(float(tableM)) | 34.888889 | 57 | 0.691083 |
40804fd1f1dd57a07519de8f44b10f0b6f6d1a54 | 274 | py | Python | platonic/platonic/box/implementation.py | anatoly-scherbakov/platonic | b2d239e19f3ebf5a562b6aabcd4b82492bb03564 | [
"MIT"
] | 1 | 2019-11-01T09:08:50.000Z | 2019-11-01T09:08:50.000Z | platonic/platonic/box/implementation.py | anatoly-scherbakov/platonic | b2d239e19f3ebf5a562b6aabcd4b82492bb03564 | [
"MIT"
] | null | null | null | platonic/platonic/box/implementation.py | anatoly-scherbakov/platonic | b2d239e19f3ebf5a562b6aabcd4b82492bb03564 | [
"MIT"
] | null | null | null | from typing import TypeVar
from .abstract import AbstractBox
T = TypeVar('T')
| 15.222222 | 33 | 0.635036 |
408169e338ef415cc1c1cefeaa3179019885ca4e | 79 | py | Python | Schemas/Subject.py | esot0/jmsa-tutoring-backend | f35000c73fbbb31f9b4dcca36e40854dc2e06d23 | [
"MIT"
] | null | null | null | Schemas/Subject.py | esot0/jmsa-tutoring-backend | f35000c73fbbb31f9b4dcca36e40854dc2e06d23 | [
"MIT"
] | null | null | null | Schemas/Subject.py | esot0/jmsa-tutoring-backend | f35000c73fbbb31f9b4dcca36e40854dc2e06d23 | [
"MIT"
] | null | null | null | from mongoengine import * | 19.75 | 27 | 0.746835 |
40826ce560682ad3ad560f8fecc12e0ab6658bc0 | 767 | py | Python | 39. Combination Sum.py | MapleLove2014/leetcode | 135c79ebe98815d0e38280edfadaba90e677aff5 | [
"Apache-2.0"
] | 1 | 2020-12-04T07:38:16.000Z | 2020-12-04T07:38:16.000Z | 39. Combination Sum.py | MapleLove2014/leetcode | 135c79ebe98815d0e38280edfadaba90e677aff5 | [
"Apache-2.0"
] | null | null | null | 39. Combination Sum.py | MapleLove2014/leetcode | 135c79ebe98815d0e38280edfadaba90e677aff5 | [
"Apache-2.0"
] | null | null | null |
s = Solution()
print(s.combinationSum([2,3,6,7], 7))
print(s.combinationSum([2,3,5], 8))
| 34.863636 | 114 | 0.555411 |
4082bcb5f99112c93d2d504f08622c615955a33b | 1,204 | py | Python | crawl_comments.py | tosh1ki/NicoCrawler | 236029f103e01de9e61a042759dc9bf2cb7d3d55 | [
"MIT"
] | 1 | 2015-03-04T14:06:33.000Z | 2015-03-04T14:06:33.000Z | crawl_comments.py | tosh1ki/NicoCrawler | 236029f103e01de9e61a042759dc9bf2cb7d3d55 | [
"MIT"
] | 2 | 2015-03-04T02:48:18.000Z | 2015-03-04T14:18:32.000Z | crawl_comments.py | tosh1ki/NicoCrawler | 236029f103e01de9e61a042759dc9bf2cb7d3d55 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = '''
Crawl comment from nicovideo.jp
Usage:
crawl_comments.py --url <url> --mail <mail> --pass <pass> [--sqlite <sqlite>] [--csv <csv>]
Options:
--url <url>
--mail <mail>
--pass <pass>
--sqlite <sqlite> (optional) path of comment DB [default: comments.sqlite3]
--csv <csv> (optional) path of csv file contains urls of videos [default: crawled.csv]
'''
from docopt import docopt
from nicocrawler.nicocrawler import NicoCrawler
if __name__ == '__main__':
#
args = docopt(__doc__)
url_channel_toppage = args['--url']
login_mail = args['--mail']
login_pass = args['--pass']
path_sqlite = args['--sqlite']
path_csv = args['--csv']
ncrawler = NicoCrawler(login_mail, login_pass)
ncrawler.connect_sqlite(path_sqlite)
df = ncrawler.get_all_video_url_of_season(url_channel_toppage)
ncrawler.initialize_csv_from_db(path_csv)
# # 1~300
# url = 'http://www.nicovideo.jp/ranking/fav/daily/all'
# ncrawler.initialize_csv_from_url(url, path_csv, max_page=3)
# ncrawler.get_all_comments_of_csv(path_csv, max_n_iter=1)
| 26.173913 | 102 | 0.671096 |
40830eea2a3d7f03b3b7dae05b19fdc253a0e60b | 2,095 | py | Python | sif/greedy_sim_max.py | longland-m/wikigen | 459ba7bf9d3ca9584de65388cc9b9a15fa16a69f | [
"MIT"
] | null | null | null | sif/greedy_sim_max.py | longland-m/wikigen | 459ba7bf9d3ca9584de65388cc9b9a15fa16a69f | [
"MIT"
] | 2 | 2021-08-25T16:04:29.000Z | 2022-02-10T01:50:44.000Z | sif/greedy_sim_max.py | longland-m/wikigen | 459ba7bf9d3ca9584de65388cc9b9a15fa16a69f | [
"MIT"
] | null | null | null | # Functions to do the greedy similarity maximisation for article:node assignments
# All code is original
import random
def computeSimSum(G, similarityMatrix, asgn):
""" Compute the total similarity sum for the current node:article assignment """
S = sum([similarityMatrix[asgn[j], asgn[i]]
for j in range(len(G)) for i in list(G[j])])
return S
| 32.230769 | 82 | 0.651074 |
40836d6113e4a1359c6e3078275ec9078aa642e4 | 23,463 | py | Python | plab/photon_counters/Idq801.py | joamatab/photonic-coupling-drivers | c12581d8e2158a292e1c585e45c0207c8129c0f1 | [
"MIT"
] | null | null | null | plab/photon_counters/Idq801.py | joamatab/photonic-coupling-drivers | c12581d8e2158a292e1c585e45c0207c8129c0f1 | [
"MIT"
] | null | null | null | plab/photon_counters/Idq801.py | joamatab/photonic-coupling-drivers | c12581d8e2158a292e1c585e45c0207c8129c0f1 | [
"MIT"
] | null | null | null | import sys
import numpy as np
import shutil
import time
import itertools as it
import collections
import ctypes as ct
import os
import copy
sys.path.append(os.path.dirname(__file__))
from ThreadStoppable import ThreadStoppable
#
if __name__ == "__main__":
main()
| 35.931087 | 112 | 0.592507 |
408378ae2d1cd6ca599deacc2843f436a637b9b1 | 7,472 | py | Python | IRIS/IRIS_formatting.py | Xinglab/IRIS | dc3c172eae9083daf57ce0e71c4fe322ab5cc928 | [
"BSD-2-Clause-FreeBSD"
] | 7 | 2019-11-21T08:42:37.000Z | 2021-08-13T15:49:18.000Z | IRIS/IRIS_formatting.py | Xinglab/IRIS | dc3c172eae9083daf57ce0e71c4fe322ab5cc928 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | IRIS/IRIS_formatting.py | Xinglab/IRIS | dc3c172eae9083daf57ce0e71c4fe322ab5cc928 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2021-05-08T08:22:38.000Z | 2022-01-20T23:43:03.000Z | import sys, numpy, argparse, os
if __name__ == '__main__':
main()
| 42.454545 | 204 | 0.722564 |
408407cd45d1d31df97defaffbefa6540d0ab484 | 7,444 | py | Python | quests/dataflow_python/streaming_event_generator.py | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
] | 2 | 2022-01-06T11:52:57.000Z | 2022-01-09T01:53:56.000Z | quests/dataflow_python/streaming_event_generator.py | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
] | null | null | null | quests/dataflow_python/streaming_event_generator.py | Glairly/introduction_to_tensorflow | aa0a44d9c428a6eb86d1f79d73f54c0861b6358d | [
"Apache-2.0"
] | null | null | null | # This program reads a file representing web server logs in common log format and streams them into a PubSub topic
# with lag characteristics as determined by command-line arguments
import argparse
from google.cloud import pubsub_v1
import time
from datetime import datetime, timezone
import random
from anytree.importer import DictImporter
import json
from multiprocessing import Process
parser = argparse.ArgumentParser(__file__, description="event_generator")
parser.add_argument("--taxonomy", "-x", dest="taxonomy_fp",
help="A .json file representing a taxonomy of web resources",
default="taxonomy.json")
parser.add_argument("--users_fp", "-u", dest="users_fp",
help="A .csv file of users",
default="users.csv")
parser.add_argument("--off_to_on", "-off", dest="off_to_on_prob", type=float,
help="A float representing the probability that a user who is offline will come online",
default=.25)
parser.add_argument("--on_to_off", "-on", dest="on_to_off_prob", type=float,
help="A float representing the probability that a user who is online will go offline",
default=.1)
parser.add_argument("--max_lag_millis", '-l', dest="max_lag_millis", type=int,
help="An integer representing the maximum amount of lag in millisecond", default=250)
parser.add_argument("--project_id", "-p", type=str, dest="project_id", help="A GCP Project ID", required=True)
parser.add_argument("--topic_name", "-t", dest="topic_name", type=str,
help="The name of the topic where the messages to be published", required=True)
avg_secs_between_events = 5
args = parser.parse_args()
taxonomy_fp = args.taxonomy_fp
users_fp = args.users_fp
online_to_offline_probability = args.on_to_off_prob
offline_to_online_probability = args.off_to_on_prob
max_lag_millis = args.max_lag_millis
project_id = args.project_id
topic_name = args.topic_name
min_file_size_bytes = 100
max_file_size_bytes = 500
verbs = ["GET"]
responses = [200]
log_fields = ["ip", "user_id", "lat", "lng", "timestamp", "http_request",
"http_response", "num_bytes", "user_agent"]
def extract_resources(taxonomy_filepath):
"""
Reads a .json representing a taxonomy and returns
a data structure representing their hierarchical relationship
:param taxonomy_file: a string representing a path to a .json file
:return: Node representing root of taxonomic tree
"""
try:
with open(taxonomy_filepath, 'r') as fp:
json_str = fp.read()
json_data = json.loads(json_str)
root = DictImporter().import_(json_data)
finally:
fp.close()
return root
def read_users(users_fp):
"""
Reads a .csv from @user_fp representing users into a list of dictionaries,
each elt of which represents a user
:param user_fp: a .csv file where each line represents a user
:return: a list of dictionaries
"""
users = []
with open(users_fp, 'r') as fp:
fields = fp.readline().rstrip().split(",")
for line in fp:
user = dict(zip(fields, line.rstrip().split(",")))
users.append(user)
return users
def sleep_then_publish_burst(burst, publisher, topic_path):
"""
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes
to track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return:
"""
sleep_secs = random.uniform(0, max_lag_millis/1000)
time.sleep(sleep_secs)
publish_burst(burst, publisher, topic_path)
def publish_burst(burst, publisher, topic_path):
"""
Publishes and prints each event
:param burst: a list of dictionaries, each representing an event
:param num_events_counter: an instance of Value shared by all processes to
track the number of published events
:param publisher: a PubSub publisher
:param topic_path: a topic path for PubSub
:return:
"""
for event_dict in burst:
json_str = json.dumps(event_dict)
data = json_str.encode('utf-8')
publisher.publish(topic_path, data=data, timestamp=event_dict['timestamp'])
def create_user_process(user, root):
"""
Code for continuously-running process representing a user publishing
events to pubsub
:param user: a dictionary representing characteristics of the user
:param root: an instance of AnyNode representing the home page of a website
:param num_events_counter: a variable shared among all processes used to track the number of events published
:return:
"""
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_name)
user['page'] = root
user['is_online'] = True
user['offline_events'] = []
while True:
time_between_events = random.uniform(0, avg_secs_between_events * 2)
time.sleep(time_between_events)
prob = random.random()
event = generate_event(user)
if user['is_online']:
if prob < online_to_offline_probability:
user['is_online'] = False
user['offline_events'] = [event]
else:
sleep_then_publish_burst([event], publisher, topic_path)
else:
user['offline_events'].append(event)
if prob < offline_to_online_probability:
user['is_online'] = True
sleep_then_publish_burst(user['offline_events'], publisher, topic_path)
user['offline_events'] = []
def generate_event(user):
"""
Returns a dictionary representing an event
:param user:
:return:
"""
user['page'] = get_next_page(user)
uri = str(user['page'].name)
event_time = datetime.now(tz=timezone.utc)
current_time_str = event_time.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
file_size_bytes = random.choice(range(min_file_size_bytes, max_file_size_bytes))
http_request = "\"{} {} HTTP/1.0\"".format(random.choice(verbs), uri)
http_response = random.choice(responses)
event_values = [user['ip'], user['id'], float(user['lat']), float(user['lng']), current_time_str, http_request,
http_response, file_size_bytes, user['user_agent']]
return dict(zip(log_fields, event_values))
def get_next_page(user):
"""
Consults the user's representation of the web site taxonomy to determine the next page that they visit
:param user:
:return:
"""
possible_next_pages = [user['page']]
if not user['page'].is_leaf:
possible_next_pages += list(user['page'].children)
if (user['page'].parent != None):
possible_next_pages += [user['page'].parent]
next_page = random.choice(possible_next_pages)
return next_page
if __name__ == '__main__':
users = read_users(users_fp)
root = extract_resources(taxonomy_fp)
processes = [Process(target=create_user_process, args=(user, root))
for user in users]
[process.start() for process in processes]
while True:
time.sleep(1) | 39.595745 | 116 | 0.657174 |
4084a9455c8745ebe8cdb17a3177996a15d02016 | 210 | py | Python | src/models/configs/database.py | Nardri/rbac-service | c5cf6baf60e95a7790156c85e37c76c697efd585 | [
"MIT"
] | null | null | null | src/models/configs/database.py | Nardri/rbac-service | c5cf6baf60e95a7790156c85e37c76c697efd585 | [
"MIT"
] | null | null | null | src/models/configs/database.py | Nardri/rbac-service | c5cf6baf60e95a7790156c85e37c76c697efd585 | [
"MIT"
] | null | null | null | """Database setup"""
# Third party library
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
# initialization of the database and migration
database = SQLAlchemy()
migrate = Migrate()
| 21 | 46 | 0.795238 |
4085bccb38fa4dfee0e895626450b9f141da766f | 4,111 | py | Python | postreise/plot/plot_heatmap.py | lanesmith/PostREISE | 69d47968cf353bca57aa8b587cc035d127fa424f | [
"MIT"
] | 1 | 2022-01-31T16:53:40.000Z | 2022-01-31T16:53:40.000Z | postreise/plot/plot_heatmap.py | lanesmith/PostREISE | 69d47968cf353bca57aa8b587cc035d127fa424f | [
"MIT"
] | 71 | 2021-01-22T20:09:47.000Z | 2022-03-30T16:53:18.000Z | postreise/plot/plot_heatmap.py | lanesmith/PostREISE | 69d47968cf353bca57aa8b587cc035d127fa424f | [
"MIT"
] | 7 | 2021-04-02T14:45:21.000Z | 2022-01-17T22:23:38.000Z | import datetime as dt
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import pandas as pd
from powersimdata.input.check import _check_time_series
from postreise.analyze.time import change_time_zone
def plot_heatmap(
series,
time_zone=None,
time_zone_label=None,
title=None,
cmap="PiYG",
scale=None,
save_filename=None,
origin="upper",
vmin=None,
vmax=None,
cbar_format=None,
cbar_tick_values=None,
cbar_label=None,
cbar_tick_labels=None,
contour_levels=None,
figsize=(16, 8),
):
"""Show time-series values via an imshow where each column is one color-coded day.
:param pandas.Series series: a time-series of values to be color-coded.
:param str time_zone: a time zone to be passed as `tz` kwarg to
:func:`postreise.analyze.time.change_time_zone`.
:param str time_zone_label: a time zone label to be added to the y axis label.
:param str title: a title to be added to the figure.
:param str/matplotlib.colors.Colormap cmap: colormap specification to be passed
as `cmap` kwarg to :func:`matplotlib.pyplot.imshow`.
:param int/float scale: a scaling factor to be applied to the series values.
:param str save_filename: a path to save the figure to.
:param str origin: the vertical location of the origin, either "upper" or "lower".
:param int/float vmin: Minimum value for coloring, to be passed as `vmin` kwarg to
:func:`matplotlib.pyplot.imshow`.
:param int/float vmax: Maximum value for coloring, to be passed as `vmax` kwarg to
:func:`matplotlib.pyplot.imshow`.
:param str/matplotlib.ticker.Formatter cbar_format: a formatter for colorbar labels,
to be passed as `format` kwarg to :func:`matplotlib.pyplot.colorbar`.
:param iterable cbar_tick_values: colorbar tick locations, to be passed as
`ticks` kwarg to :func:`matplotlib.pyplot.colorbar`.
:param str cbar_label: axis label for colorbar.
:param iterable cbar_tick_labels: colorbar tick labels.
:param iterable contour_levels: values at which to draw contours, passed as `levels`
kwarg to :func:`matplotlib.pyplot.contour`.
:param tuple(int/float, int/float) figsize: size of figure.
"""
_check_time_series(series, "series")
df = series.to_frame(name="values").asfreq("H")
year = df.index[0].year
if time_zone is not None:
df = change_time_zone(df, time_zone)
df["date"] = df.index.date
df["hour"] = df.index.hour
df_reshaped = pd.pivot(
df,
index="date",
columns="hour",
values="values",
)
xlims = mdates.date2num([df_reshaped.index[0], df_reshaped.index[-1]])
ylims = mdates.date2num([dt.datetime(year, 1, 1, 0), dt.datetime(year, 1, 1, 23)])
if scale is not None:
df_reshaped *= scale
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot()
# if necessary, flip ylims so labels follow data from top to bottom
extent = [*xlims, *ylims] if origin == "lower" else [*xlims, ylims[1], ylims[0]]
im = plt.imshow(
df_reshaped.T,
cmap=cmap,
aspect="auto",
extent=extent,
origin=origin,
vmin=vmin,
vmax=vmax,
)
if contour_levels is not None:
ax.contour(df_reshaped.T, extent=extent, levels=contour_levels, origin=origin)
date_format = mdates.DateFormatter("%m/%d")
ax.xaxis_date()
ax.xaxis.set_major_formatter(date_format)
ax.set_xlabel("Date")
time_format = mdates.DateFormatter("%H:%M")
ax.yaxis_date()
ax.yaxis.set_major_formatter(time_format)
y_axis_label = "Time" if time_zone_label is None else f"Time {time_zone_label}"
ax.set_ylabel(y_axis_label)
cbar = fig.colorbar(im, format=cbar_format, ticks=cbar_tick_values)
if cbar_label is not None:
cbar.set_label(cbar_label)
if title is not None:
plt.title(title)
if cbar_tick_labels is not None:
cbar.ax.set_yticklabels(cbar_tick_labels)
if save_filename is not None:
plt.savefig(save_filename, bbox_inches="tight")
| 37.036036 | 88 | 0.683289 |
4086e4dd21e9a774c97734bcd63cd0233cf32c3d | 4,000 | py | Python | tensorflow_federated/python/simulation/file_per_user_client_data.py | houcharlie/federated-legacy | cb10a9cdcea33288f8113e7445782d21c8c65f81 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/simulation/file_per_user_client_data.py | houcharlie/federated-legacy | cb10a9cdcea33288f8113e7445782d21c8c65f81 | [
"Apache-2.0"
] | null | null | null | tensorflow_federated/python/simulation/file_per_user_client_data.py | houcharlie/federated-legacy | cb10a9cdcea33288f8113e7445782d21c8c65f81 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of the ClientData abstract base class."""
import collections
import os.path
from typing import Callable, Mapping
import tensorflow as tf
from tensorflow_federated.python import core as tff
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.simulation import client_data
from tensorflow_federated.python.tensorflow_libs import tensor_utils
| 36.363636 | 80 | 0.74325 |
4086e6c92cd0f6bf0670ff63d76bbec71943f194 | 162 | py | Python | 20-Blog_Clone_Project/blog_project_Practice/blog/admin.py | andy2167565/Django-Bootcamp-Practice | f08d2866382db96060450d4dbd1ffaca7243f623 | [
"MIT"
] | null | null | null | 20-Blog_Clone_Project/blog_project_Practice/blog/admin.py | andy2167565/Django-Bootcamp-Practice | f08d2866382db96060450d4dbd1ffaca7243f623 | [
"MIT"
] | null | null | null | 20-Blog_Clone_Project/blog_project_Practice/blog/admin.py | andy2167565/Django-Bootcamp-Practice | f08d2866382db96060450d4dbd1ffaca7243f623 | [
"MIT"
] | null | null | null | from django.contrib import admin
from blog.models import Post, Comment
# Register your models here.
admin.site.register(Post)
admin.site.register(Comment)
| 23.142857 | 38 | 0.777778 |
408710371dd0d37abadd9978ea2c4a4f85a8ec3b | 6,459 | py | Python | tests/compilation/request/test_request_compiler.py | ymoch/preacher | ae68170d14c72791884e91b20054bd13a79b52d0 | [
"MIT"
] | 3 | 2019-08-01T03:14:49.000Z | 2020-01-31T08:55:22.000Z | tests/compilation/request/test_request_compiler.py | ymoch/preacher | ae68170d14c72791884e91b20054bd13a79b52d0 | [
"MIT"
] | 353 | 2019-04-14T14:53:28.000Z | 2022-03-11T03:26:08.000Z | tests/compilation/request/test_request_compiler.py | ymoch/preacher | ae68170d14c72791884e91b20054bd13a79b52d0 | [
"MIT"
] | 1 | 2020-08-01T06:23:08.000Z | 2020-08-01T06:23:08.000Z | from unittest.mock import NonCallableMock, sentinel
from pytest import mark, raises, fixture
from preacher.compilation.argument import Argument
from preacher.compilation.error import CompilationError, NamedNode, IndexedNode
from preacher.compilation.request.request import RequestCompiler, RequestCompiled
from preacher.compilation.request.request_body import RequestBodyCompiler
from preacher.core.request import Method
PKG = "preacher.compilation.request.request"
def test_given_an_empty_mapping(compiler: RequestCompiler):
compiled = compiler.compile({})
assert compiled.method is sentinel.default_method
assert compiled.path is sentinel.default_path
assert compiled.headers is sentinel.default_headers
assert compiled.params is sentinel.default_params
assert compiled.body is sentinel.default_body
def test_given_an_invalid_params(compiler: RequestCompiler, mocker):
compile_params = mocker.patch(f"{PKG}.compile_url_params")
compile_params.side_effect = CompilationError("msg", node=NamedNode("x"))
with raises(CompilationError) as error_info:
compiler.compile({"params": sentinel.params})
assert error_info.value.path == [NamedNode("params"), NamedNode("x")]
compile_params.assert_called_once_with(sentinel.params, None)
def test_given_valid_params(compiler: RequestCompiler, mocker):
compile_params = mocker.patch(f"{PKG}.compile_url_params")
compile_params.return_value = sentinel.compiled_params
compiled = compiler.compile({"params": sentinel.params}, sentinel.args)
assert compiled.params == sentinel.compiled_params
compile_params.assert_called_once_with(sentinel.params, sentinel.args)
def test_given_invalid_body(compiler: RequestCompiler, body):
body.compile.side_effect = CompilationError("x", node=IndexedNode(1))
with raises(CompilationError) as error_info:
compiler.compile({"body": sentinel.body_obj})
assert error_info.value.path == [NamedNode("body"), IndexedNode(1)]
body.compile.assert_called_once_with(sentinel.body_obj, None)
def test_given_valid_body(compiler: RequestCompiler, body):
body.compile.return_value = sentinel.body
compiled = compiler.compile({"body": sentinel.body_obj}, sentinel.args)
assert compiled.body is sentinel.body
body.compile.assert_called_once_with(sentinel.body_obj, sentinel.args)
def test_given_a_string(compiler: RequestCompiler):
compiled = compiler.compile(Argument("path"), {"path": "/path"})
assert compiled.method is sentinel.default_method
assert compiled.path == "/path"
assert compiled.headers is sentinel.default_headers
assert compiled.params is sentinel.default_params
assert compiled.body is sentinel.default_body
def test_of_default_no_body(compiler: RequestCompiler, body, mocker):
ctor = mocker.patch(f"{PKG}.RequestCompiler")
ctor.return_value = sentinel.compiler_of_default
new_default = RequestCompiled(
method=sentinel.new_default_method,
path=sentinel.new_default_path,
headers=sentinel.new_default_headers,
params=sentinel.new_default_params,
)
new_compiler = compiler.of_default(new_default)
assert new_compiler is sentinel.compiler_of_default
ctor.assert_called_once_with(
body=body,
default=RequestCompiled(
method=sentinel.new_default_method,
path=sentinel.new_default_path,
headers=sentinel.new_default_headers,
params=sentinel.new_default_params,
body=sentinel.default_body,
),
)
body.of_default.assert_not_called()
def test_of_default_body(compiler: RequestCompiler, body, mocker):
ctor = mocker.patch(f"{PKG}.RequestCompiler")
ctor.return_value = sentinel.compiler_of_default
new_default = RequestCompiled(body=sentinel.new_default_body)
new_compiler = compiler.of_default(new_default)
assert new_compiler is sentinel.compiler_of_default
ctor.assert_called_once_with(
body=sentinel.new_body_compiler,
default=RequestCompiled(
method=sentinel.default_method,
path=sentinel.default_path,
headers=sentinel.default_headers,
params=sentinel.default_params,
body=sentinel.new_default_body,
),
)
body.of_default.assert_called_once_with(sentinel.new_default_body)
| 34.174603 | 81 | 0.71203 |
408784a24cae84367d1864aa02a8ff6e4a8e197a | 1,109 | py | Python | bot/conversation_handlers/stage01.py | gerbigtim/coaching_bot | 5b4ef6e207a5017f7b4274d8238550b4988d0a6e | [
"MIT"
] | null | null | null | bot/conversation_handlers/stage01.py | gerbigtim/coaching_bot | 5b4ef6e207a5017f7b4274d8238550b4988d0a6e | [
"MIT"
] | null | null | null | bot/conversation_handlers/stage01.py | gerbigtim/coaching_bot | 5b4ef6e207a5017f7b4274d8238550b4988d0a6e | [
"MIT"
] | null | null | null | # imports
from telegram.ext import (
CommandHandler,
MessageHandler,
Filters,
ConversationHandler,
)
from handler_functions.start import start
from handler_functions.bio import bio
from handler_functions.gender import gender
from handler_functions.photo import photo, skip_photo
from handler_functions.location import location, skip_location
from handler_functions.cancel import cancel
from conversation_handlers.stage_constants import *
# Adds conversation handler with the states GENDER, PHOTO, LOCATION and BIO for stage 1 of the sign up
conv_handler = ConversationHandler(
entry_points=[CommandHandler('start', start)],
states={
GENDER: [MessageHandler(Filters.regex('^(Gentleman|Lady|I am a unicorn.)$'), gender)],
PHOTO: [MessageHandler(Filters.photo, photo), CommandHandler('skip', skip_photo)],
LOCATION: [
MessageHandler(Filters.location, location),
CommandHandler('skip', skip_location),
],
BIO: [MessageHandler(Filters.text & ~Filters.command, bio)],
},
fallbacks=[CommandHandler('cancel', cancel)],
) | 38.241379 | 102 | 0.734896 |
4087ac882a0e642cb2645d67bfb2e7473130d2e9 | 265 | py | Python | python100days/day03/conversion.py | lanSeFangZhou/pythonbase | f4daa373573b2fc0a59a5eb919d02eddf5914e18 | [
"Apache-2.0"
] | null | null | null | python100days/day03/conversion.py | lanSeFangZhou/pythonbase | f4daa373573b2fc0a59a5eb919d02eddf5914e18 | [
"Apache-2.0"
] | 1 | 2021-06-02T00:58:26.000Z | 2021-06-02T00:58:26.000Z | python100days/day03/conversion.py | lanSeFangZhou/pythonbase | f4daa373573b2fc0a59a5eb919d02eddf5914e18 | [
"Apache-2.0"
] | null | null | null | #
value =float(input(''))
unit =input('')
if unit == 'in' or unit == '':
print('%f = %f' % (value, value * 2.54))
elif unit == '' or unit == 'cm':
print('%f = %f' % (value, value / 2.54))
else:
print('') | 26.5 | 49 | 0.558491 |
4088dc579c34d53321481174879bd2850ab8f43e | 485 | py | Python | tests/models/test_dtfactory.py | surajsjain/ocean.py | 2e853db94d9aee2a0cf6b3d58f714215b83d917b | [
"Apache-2.0"
] | 4 | 2021-07-05T20:21:41.000Z | 2021-09-02T14:13:26.000Z | tests/models/test_dtfactory.py | surajsjain/ocean.py | 2e853db94d9aee2a0cf6b3d58f714215b83d917b | [
"Apache-2.0"
] | null | null | null | tests/models/test_dtfactory.py | surajsjain/ocean.py | 2e853db94d9aee2a0cf6b3d58f714215b83d917b | [
"Apache-2.0"
] | 1 | 2021-03-25T15:04:12.000Z | 2021-03-25T15:04:12.000Z | from ocean_lib.models.data_token import DataToken
from ocean_lib.models.dtfactory import DTFactory
from ocean_lib.ocean.util import to_base_18
| 37.307692 | 108 | 0.781443 |
408a713d6a5b30cf98528302f34eefe2000e2530 | 4,223 | py | Python | methods/unilm_based/unilm/src/pytorch_pretrained_bert/optimization_fp16.py | Guaguago/CommonGen | 0a81b4edb8cd111571eba817eb994420f1070c48 | [
"MIT"
] | 100 | 2020-01-30T08:14:25.000Z | 2022-03-30T08:59:33.000Z | methods/unilm_based/unilm/src/pytorch_pretrained_bert/optimization_fp16.py | Guaguago/CommonGen | 0a81b4edb8cd111571eba817eb994420f1070c48 | [
"MIT"
] | 4 | 2021-06-08T22:34:33.000Z | 2022-03-12T00:50:13.000Z | methods/unilm_based/unilm/src/pytorch_pretrained_bert/optimization_fp16.py | Guaguago/CommonGen | 0a81b4edb8cd111571eba817eb994420f1070c48 | [
"MIT"
] | 15 | 2020-04-13T22:56:27.000Z | 2022-03-10T02:44:26.000Z | # coding=utf-8
"""PyTorch optimization for BERT model."""
from apex.contrib.optimizers import FP16_Optimizer
| 52.135802 | 117 | 0.656642 |
408ac0aced4fa7689e5bb64bd87a616424377650 | 46 | py | Python | ermaket/api/generation/__init__.py | SqrtMinusOne/ERMaket_Experiment | c4a7b61651edd15a619d9b690e2aaeaab4de282d | [
"Apache-2.0"
] | null | null | null | ermaket/api/generation/__init__.py | SqrtMinusOne/ERMaket_Experiment | c4a7b61651edd15a619d9b690e2aaeaab4de282d | [
"Apache-2.0"
] | null | null | null | ermaket/api/generation/__init__.py | SqrtMinusOne/ERMaket_Experiment | c4a7b61651edd15a619d9b690e2aaeaab4de282d | [
"Apache-2.0"
] | null | null | null | from .generator import *
from .types import *
| 15.333333 | 24 | 0.73913 |
408c88fb92a834b62165870e3156152b98dd330c | 956 | py | Python | Source/stack0verf10w.py | IRIDIUM-SUB/Software-Security-Course-Design | 596664a728d73133e44a4566027561170c5d2ae8 | [
"MIT"
] | null | null | null | Source/stack0verf10w.py | IRIDIUM-SUB/Software-Security-Course-Design | 596664a728d73133e44a4566027561170c5d2ae8 | [
"MIT"
] | null | null | null | Source/stack0verf10w.py | IRIDIUM-SUB/Software-Security-Course-Design | 596664a728d73133e44a4566027561170c5d2ae8 | [
"MIT"
] | null | null | null | import Bugdetectionuniversalframe
import os
import re
| 30.83871 | 84 | 0.582636 |
408e5eee21b5e0ed193fbd1da82ee85348eb987d | 7,517 | py | Python | ndbc/station.py | ppokhrel1/ndbc | e8ed73ae35a49c967384e2c80c1a2bf838eeb0c2 | [
"MIT"
] | null | null | null | ndbc/station.py | ppokhrel1/ndbc | e8ed73ae35a49c967384e2c80c1a2bf838eeb0c2 | [
"MIT"
] | null | null | null | ndbc/station.py | ppokhrel1/ndbc | e8ed73ae35a49c967384e2c80c1a2bf838eeb0c2 | [
"MIT"
] | null | null | null | """
station.py
"""
from datetime import datetime, timedelta
import gzip
import numpy as np
import requests
import urllib
_BASEURL = 'http://www.ndbc.noaa.gov/data'
_SENSOR_URL = _BASEURL+'/stations/buoyht.txt'
_REALTIME_URL = _BASEURL+'/realtime2/'
_RECENT_URL = _BASEURL+'/stdmet/'
_HISTORICAL_URL = _BASEURL+'/historical/stdmet/'
_STATION_URL = _BASEURL+'/stations/station_table.txt'
| 41.994413 | 89 | 0.543036 |
408f68f533f8c5055f6e751095cb737571178a12 | 765 | py | Python | main.py | kajuna0amendez/Cython_Machine_Learning_Models | 8b7d502bae07487ae0fdbced796e0fa50082e681 | [
"Apache-2.0"
] | null | null | null | main.py | kajuna0amendez/Cython_Machine_Learning_Models | 8b7d502bae07487ae0fdbced796e0fa50082e681 | [
"Apache-2.0"
] | 2 | 2021-02-02T23:02:12.000Z | 2021-08-23T20:51:22.000Z | main.py | kajuna0amendez/Machine_Learning_Models | 8b7d502bae07487ae0fdbced796e0fa50082e681 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#!/usr/bin/env python
__author__ = "Andres Mendez-Vazquez"
__copyright__ = "Copyright 2018"
__credits__ = ["Andres Mendez-Vazquez"]
__license__ = "Apache"
__version__ = "v1.0.0"
__maintainer__ = "Andres Mendez-Vazquez"
__email = "kajuna0kajuna@gmail.com"
__status__ = "Development"
from data_model.load_data import create_connection, select_all_tasks
from tools.data_frames import dframe_t_db
if __name__ == '__main__':
df = main()
print(df)
| 23.181818 | 68 | 0.705882 |
408f7d16d7791c4eaced84288001a4eccaab5dae | 54 | py | Python | graw/__init__.py | iamsajjad/graw | 84289b9bd2e298bad72ade402ab8a87e7c37688d | [
"MIT"
] | null | null | null | graw/__init__.py | iamsajjad/graw | 84289b9bd2e298bad72ade402ab8a87e7c37688d | [
"MIT"
] | null | null | null | graw/__init__.py | iamsajjad/graw | 84289b9bd2e298bad72ade402ab8a87e7c37688d | [
"MIT"
] | null | null | null |
# version of the graw package
__version__ = "0.1.0"
| 10.8 | 29 | 0.685185 |
408fa80f7b62ab2142b5ebe87fafa4317281b530 | 6,779 | py | Python | sdk/python/pulumi_aws/acm/get_certificate.py | mehd-io/pulumi-aws | 034629c3fb30dc90db65b196d115df43723df19c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/acm/get_certificate.py | mehd-io/pulumi-aws | 034629c3fb30dc90db65b196d115df43723df19c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/acm/get_certificate.py | mehd-io/pulumi-aws | 034629c3fb30dc90db65b196d115df43723df19c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = [
'GetCertificateResult',
'AwaitableGetCertificateResult',
'get_certificate',
]
def get_certificate(domain: Optional[str] = None,
key_types: Optional[Sequence[str]] = None,
most_recent: Optional[bool] = None,
statuses: Optional[Sequence[str]] = None,
tags: Optional[Mapping[str, str]] = None,
types: Optional[Sequence[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetCertificateResult:
"""
Use this data source to get the ARN of a certificate in AWS Certificate
Manager (ACM), you can reference
it by domain without having to hard code the ARNs as input.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
issued = aws.acm.get_certificate(domain="tf.example.com",
statuses=["ISSUED"])
amazon_issued = aws.acm.get_certificate(domain="tf.example.com",
most_recent=True,
types=["AMAZON_ISSUED"])
rsa4096 = aws.acm.get_certificate(domain="tf.example.com",
key_types=["RSA_4096"])
```
:param str domain: The domain of the certificate to look up. If no certificate is found with this name, an error will be returned.
:param Sequence[str] key_types: A list of key algorithms to filter certificates. By default, ACM does not return all certificate types when searching. Valid values are `RSA_1024`, `RSA_2048`, `RSA_4096`, `EC_prime256v1`, `EC_secp384r1`, and `EC_secp521r1`.
:param bool most_recent: If set to true, it sorts the certificates matched by previous criteria by the NotBefore field, returning only the most recent one. If set to false, it returns an error if more than one certificate is found. Defaults to false.
:param Sequence[str] statuses: A list of statuses on which to filter the returned list. Valid values are `PENDING_VALIDATION`, `ISSUED`,
`INACTIVE`, `EXPIRED`, `VALIDATION_TIMED_OUT`, `REVOKED` and `FAILED`. If no value is specified, only certificates in the `ISSUED` state
are returned.
:param Mapping[str, str] tags: A mapping of tags for the resource.
:param Sequence[str] types: A list of types on which to filter the returned list. Valid values are `AMAZON_ISSUED` and `IMPORTED`.
"""
__args__ = dict()
__args__['domain'] = domain
__args__['keyTypes'] = key_types
__args__['mostRecent'] = most_recent
__args__['statuses'] = statuses
__args__['tags'] = tags
__args__['types'] = types
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:acm/getCertificate:getCertificate', __args__, opts=opts, typ=GetCertificateResult).value
return AwaitableGetCertificateResult(
arn=__ret__.arn,
domain=__ret__.domain,
id=__ret__.id,
key_types=__ret__.key_types,
most_recent=__ret__.most_recent,
statuses=__ret__.statuses,
tags=__ret__.tags,
types=__ret__.types)
| 39.184971 | 260 | 0.651424 |
40902a024648160483f25a5dd670916ae7cc2c01 | 2,688 | py | Python | Part 3/batch_VS_stochastic.py | m9psy/neural_network_habr_guide | 543b4bc82cfed5d5675b9ecc1cc97c2286a5562a | [
"MIT"
] | 20 | 2016-08-08T12:16:51.000Z | 2022-03-26T19:56:09.000Z | Part 3/batch_VS_stochastic.py | m9psy/neural_network_habr_guide | 543b4bc82cfed5d5675b9ecc1cc97c2286a5562a | [
"MIT"
] | null | null | null | Part 3/batch_VS_stochastic.py | m9psy/neural_network_habr_guide | 543b4bc82cfed5d5675b9ecc1cc97c2286a5562a | [
"MIT"
] | 8 | 2016-08-08T14:22:13.000Z | 2020-05-30T07:05:36.000Z | import numpy as np
import matplotlib.pyplot as plt
TOTAL = 200
STEP = 0.25
EPS = 0.1
INITIAL_THETA = [9, 14]
X = np.arange(0, TOTAL * STEP, STEP)
Y = np.array([y for y in generate_sample(TOTAL)])
# ,
X = (X - X.min()) / (X.max() - X.min())
A = np.empty((TOTAL, 2))
A[:, 0] = 1
A[:, 1] = X
theta = np.linalg.pinv(A).dot(Y)
print(theta, cost_function(A, Y, theta))
import time
start = time.clock()
theta_stochastic = stochastic_descent(A, Y, 0.1)
print("St:", time.clock() - start, theta_stochastic)
start = time.clock()
theta_batch = batch_descent(A, Y, 0.001)
print("Btch:", time.clock() - start, theta_batch)
| 29.866667 | 74 | 0.553943 |