hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
5b14c0f520aa2dfc088e43cb4960682061f61a03
409
py
Python
netrd/__init__.py
sdmccabe/netrd
f703c19b02f42c9f54bcab57014381da11dd58da
[ "MIT" ]
116
2019-01-17T18:31:43.000Z
2022-03-31T13:37:21.000Z
netrd/__init__.py
sdmccabe/netrd
f703c19b02f42c9f54bcab57014381da11dd58da
[ "MIT" ]
175
2019-01-15T01:19:13.000Z
2021-05-25T16:51:26.000Z
netrd/__init__.py
sdmccabe/netrd
f703c19b02f42c9f54bcab57014381da11dd58da
[ "MIT" ]
36
2019-01-14T20:38:32.000Z
2022-01-21T20:58:38.000Z
""" netrd ----- netrd stands for Network Reconstruction and Distances. It is a repository of different algorithms for constructing a network from time series data, as well as for comparing two networks. It is the product of the Network Science Insitute 2019 Collabathon. """ from . import distance # noqa from . import reconstruction # noqa from . import dynamics # noqa from . import utilities # noqa
25.5625
73
0.760391
5b14c2ff1b60260805608d9bdfcac0cbbde63652
5,613
py
Python
pytorch/GPT.py
lyq628/NLP-Tutorials
7c9d117a3542695e79419c835ba9e98ef80800b8
[ "MIT" ]
643
2018-11-30T09:14:29.000Z
2022-03-28T14:04:15.000Z
pytorch/GPT.py
lyq628/NLP-Tutorials
7c9d117a3542695e79419c835ba9e98ef80800b8
[ "MIT" ]
22
2019-01-03T17:58:12.000Z
2022-02-10T01:56:00.000Z
pytorch/GPT.py
lyq628/NLP-Tutorials
7c9d117a3542695e79419c835ba9e98ef80800b8
[ "MIT" ]
258
2018-12-03T17:15:04.000Z
2022-03-30T07:45:49.000Z
from transformer import Encoder from torch import nn,optim from torch.nn.functional import cross_entropy,softmax, relu from torch.utils.data import DataLoader from torch.utils.data.dataloader import default_collate import torch import utils import os import pickle if __name__ == "__main__": train()
43.176923
136
0.637983
5b14e976757ac56925070b1b4efc08dd156d8a00
22,691
py
Python
skyportal/plot.py
dannygoldstein/skyportal
3f3518136530fcf5bd1787a4c890782164627fce
[ "BSD-3-Clause" ]
null
null
null
skyportal/plot.py
dannygoldstein/skyportal
3f3518136530fcf5bd1787a4c890782164627fce
[ "BSD-3-Clause" ]
null
null
null
skyportal/plot.py
dannygoldstein/skyportal
3f3518136530fcf5bd1787a4c890782164627fce
[ "BSD-3-Clause" ]
null
null
null
import numpy as np import pandas as pd from bokeh.core.json_encoder import serialize_json from bokeh.core.properties import List, String from bokeh.document import Document from bokeh.layouts import row, column from bokeh.models import CustomJS, HoverTool, Range1d, Slider, Button from bokeh.models.widgets import CheckboxGroup, TextInput, Panel, Tabs from bokeh.palettes import viridis from bokeh.plotting import figure, ColumnDataSource from bokeh.util.compiler import bundle_all_models from bokeh.util.serialization import make_id from matplotlib import cm from matplotlib.colors import rgb2hex import os from skyportal.models import ( DBSession, Obj, Photometry, Group, Instrument, Telescope, PHOT_ZP, ) import sncosmo DETECT_THRESH = 5 # sigma SPEC_LINES = { 'H': ([3970, 4102, 4341, 4861, 6563], '#ff0000'), 'He': ([3886, 4472, 5876, 6678, 7065], '#002157'), 'He II': ([3203, 4686], '#003b99'), 'C II': ([3919, 4267, 6580, 7234, 9234], '#570199'), 'C III': ([4650, 5696], '#a30198'), 'C IV': ([5801], '#ff0073'), 'O': ([7772, 7774, 7775, 8447, 9266], '#007236'), 'O II': ([3727], '#00a64d'), 'O III': ([4959, 5007], '#00bf59'), 'Na': ([5890, 5896, 8183, 8195], '#aba000'), 'Mg': ([2780, 2852, 3829, 3832, 3838, 4571, 5167, 5173, 5184], '#8c6239'), 'Mg II': ([2791, 2796, 2803, 4481], '#bf874e'), 'Si II': ([3856, 5041, 5056, 5670, 6347, 6371], '#5674b9'), 'S II': ([5433, 5454, 5606, 5640, 5647, 6715], '#a38409'), 'Ca II': ([3934, 3969, 7292, 7324, 8498, 8542, 8662], '#005050'), 'Fe II': ([5018, 5169], '#f26c4f'), 'Fe III': ([4397, 4421, 4432, 5129, 5158], '#f9917b'), } # TODO add groups # Galaxy lines # # 'H': '4341, 4861, 6563; # 'N II': '6548, 6583; # 'O I': '6300;' # 'O II': '3727; # 'O III': '4959, 5007; # 'Mg II': '2798; # 'S II': '6717, 6731' # 'H': '3970, 4102, 4341, 4861, 6563' # 'Na': '5890, 5896, 8183, 8195' # 'He': '3886, 4472, 5876, 6678, 7065' # 'Mg': '2780, 2852, 3829, 3832, 3838, 4571, 5167, 5173, 5184' # 'He II': '3203, 4686' # 'Mg II': '2791, 2796, 2803, 4481' # 'O': '7772, 7774, 7775, 8447, 9266' # 'Si II': '3856, 5041, 5056, 5670 6347, 6371' # 'O II': '3727' # 'Ca II': '3934, 3969, 7292, 7324, 8498, 8542, 8662' # 'O III': '4959, 5007' # 'Fe II': '5018, 5169' # 'S II': '5433, 5454, 5606, 5640, 5647, 6715' # 'Fe III': '4397, 4421, 4432, 5129, 5158' # # Other # # 'Tel: 6867-6884, 7594-7621' # 'Tel': '#b7b7b7', # 'H: 4341, 4861, 6563; # 'N II': 6548, 6583; # 'O I': 6300; # 'O II': 3727; # 'O III': 4959, 5007; # 'Mg II': 2798; # 'S II': 6717, 6731' # TODO replace with (script, div) method def _plot_to_json(plot): """Convert plot to JSON objects necessary for rendering with `bokehJS`. Parameters ---------- plot : bokeh.plotting.figure.Figure Bokeh plot object to be rendered. Returns ------- (str, str) Returns (docs_json, render_items) json for the desired plot. """ render_items = [{'docid': plot._id, 'elementid': make_id()}] doc = Document() doc.add_root(plot) docs_json_inner = doc.to_json() docs_json = {render_items[0]['docid']: docs_json_inner} docs_json = serialize_json(docs_json) render_items = serialize_json(render_items) custom_model_js = bundle_all_models() return docs_json, render_items, custom_model_js tooltip_format = [ ('mjd', '@mjd{0.000000}'), ('flux', '@flux'), ('filter', '@filter'), ('fluxerr', '@fluxerr'), ('mag', '@mag'), ('magerr', '@magerr'), ('lim_mag', '@lim_mag'), ('instrument', '@instrument'), ('stacked', '@stacked'), ] cmap = cm.get_cmap('jet_r') # TODO make async so that thread isn't blocked def photometry_plot(obj_id, user, width=600, height=300): """Create scatter plot of photometry for object. Parameters ---------- obj_id : str ID of Obj to be plotted. Returns ------- (str, str) Returns (docs_json, render_items) json for the desired plot. """ data = pd.read_sql( DBSession() .query( Photometry, Telescope.nickname.label("telescope"), Instrument.name.label("instrument"), ) .join(Instrument, Instrument.id == Photometry.instrument_id) .join(Telescope, Telescope.id == Instrument.telescope_id) .filter(Photometry.obj_id == obj_id) .filter( Photometry.groups.any(Group.id.in_([g.id for g in user.accessible_groups])) ) .statement, DBSession().bind, ) if data.empty: return None, None, None data['color'] = [get_color(f) for f in data['filter']] data['label'] = [ f'{i} {f}-band' for i, f in zip(data['instrument'], data['filter']) ] data['zp'] = PHOT_ZP data['magsys'] = 'ab' data['alpha'] = 1.0 data['lim_mag'] = -2.5 * np.log10(data['fluxerr'] * DETECT_THRESH) + data['zp'] # Passing a dictionary to a bokeh datasource causes the frontend to die, # deleting the dictionary column fixes that del data['original_user_data'] # keep track of things that are only upper limits data['hasflux'] = ~data['flux'].isna() # calculate the magnitudes - a photometry point is considered "significant" # or "detected" (and thus can be represented by a magnitude) if its snr # is above DETECT_THRESH obsind = data['hasflux'] & ( data['flux'].fillna(0.0) / data['fluxerr'] >= DETECT_THRESH ) data.loc[~obsind, 'mag'] = None data.loc[obsind, 'mag'] = -2.5 * np.log10(data[obsind]['flux']) + PHOT_ZP # calculate the magnitude errors using standard error propagation formulae # https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulae data.loc[~obsind, 'magerr'] = None coeff = 2.5 / np.log(10) magerrs = np.abs(coeff * data[obsind]['fluxerr'] / data[obsind]['flux']) data.loc[obsind, 'magerr'] = magerrs data['obs'] = obsind data['stacked'] = False split = data.groupby('label', sort=False) finite = np.isfinite(data['flux']) fdata = data[finite] lower = np.min(fdata['flux']) * 0.95 upper = np.max(fdata['flux']) * 1.05 plot = figure( plot_width=width, plot_height=height, active_drag='box_zoom', tools='box_zoom,wheel_zoom,pan,reset,save', y_range=(lower, upper), ) imhover = HoverTool(tooltips=tooltip_format) plot.add_tools(imhover) model_dict = {} for i, (label, sdf) in enumerate(split): # for the flux plot, we only show things that have a flux value df = sdf[sdf['hasflux']] key = f'obs{i}' model_dict[key] = plot.scatter( x='mjd', y='flux', color='color', marker='circle', fill_color='color', alpha='alpha', source=ColumnDataSource(df), ) imhover.renderers.append(model_dict[key]) key = f'bin{i}' model_dict[key] = plot.scatter( x='mjd', y='flux', color='color', marker='circle', fill_color='color', source=ColumnDataSource( data=dict( mjd=[], flux=[], fluxerr=[], filter=[], color=[], lim_mag=[], mag=[], magerr=[], stacked=[], instrument=[], ) ), ) imhover.renderers.append(model_dict[key]) key = 'obserr' + str(i) y_err_x = [] y_err_y = [] for d, ro in df.iterrows(): px = ro['mjd'] py = ro['flux'] err = ro['fluxerr'] y_err_x.append((px, px)) y_err_y.append((py - err, py + err)) model_dict[key] = plot.multi_line( xs='xs', ys='ys', color='color', alpha='alpha', source=ColumnDataSource( data=dict( xs=y_err_x, ys=y_err_y, color=df['color'], alpha=[1.0] * len(df) ) ), ) key = f'binerr{i}' model_dict[key] = plot.multi_line( xs='xs', ys='ys', color='color', source=ColumnDataSource(data=dict(xs=[], ys=[], color=[])), ) plot.xaxis.axis_label = 'MJD' plot.yaxis.axis_label = 'Flux (Jy)' plot.toolbar.logo = None toggle = CheckboxWithLegendGroup( labels=list(data.label.unique()), active=list(range(len(data.label.unique()))), colors=list(data.color.unique()), ) # TODO replace `eval` with Namespaces # https://github.com/bokeh/bokeh/pull/6340 toggle.callback = CustomJS( args={'toggle': toggle, **model_dict}, code=open( os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'togglef.js') ).read(), ) slider = Slider(start=0.0, end=15.0, value=0.0, step=1.0, title='Binsize (days)') callback = CustomJS( args={'slider': slider, 'toggle': toggle, **model_dict}, code=open( os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'stackf.js') ) .read() .replace('default_zp', str(PHOT_ZP)) .replace('detect_thresh', str(DETECT_THRESH)), ) slider.js_on_change('value', callback) # Mark the first and last detections detection_dates = data[data['hasflux']]['mjd'] if len(detection_dates) > 0: first = round(detection_dates.min(), 6) last = round(detection_dates.max(), 6) first_color = "#34b4eb" last_color = "#8992f5" midpoint = (upper + lower) / 2 line_top = 5 * upper - 4 * midpoint line_bottom = 5 * lower - 4 * midpoint first_x = np.full(5000, first) last_x = np.full(5000, last) y = np.linspace(line_bottom, line_top, num=5000) first_r = plot.line( x=first_x, y=y, line_alpha=0.5, line_color=first_color, line_width=2, ) plot.add_tools( HoverTool(tooltips=[("First detection", f'{first}')], renderers=[first_r],) ) last_r = plot.line( x=last_x, y=y, line_alpha=0.5, line_color=last_color, line_width=2 ) plot.add_tools( HoverTool(tooltips=[("Last detection", f'{last}')], renderers=[last_r],) ) layout = row(plot, toggle) layout = column(slider, layout) p1 = Panel(child=layout, title='Flux') # now make the mag light curve ymax = np.nanmax(data['mag']) + 0.1 ymin = np.nanmin(data['mag']) - 0.1 plot = figure( plot_width=width, plot_height=height, active_drag='box_zoom', tools='box_zoom,wheel_zoom,pan,reset,save', y_range=(ymax, ymin), toolbar_location='above', ) # Mark the first and last detections again if len(detection_dates) > 0: midpoint = (ymax + ymin) / 2 line_top = 5 * ymax - 4 * midpoint line_bottom = 5 * ymin - 4 * midpoint y = np.linspace(line_bottom, line_top, num=5000) first_r = plot.line( x=first_x, y=y, line_alpha=0.5, line_color=first_color, line_width=2, ) plot.add_tools( HoverTool(tooltips=[("First detection", f'{first}')], renderers=[first_r],) ) last_r = plot.line( x=last_x, y=y, line_alpha=0.5, line_color=last_color, line_width=2 ) plot.add_tools( HoverTool( tooltips=[("Last detection", f'{last}')], renderers=[last_r], point_policy='follow_mouse', ) ) imhover = HoverTool(tooltips=tooltip_format) plot.add_tools(imhover) model_dict = {} for i, (label, df) in enumerate(split): key = f'obs{i}' model_dict[key] = plot.scatter( x='mjd', y='mag', color='color', marker='circle', fill_color='color', alpha='alpha', source=ColumnDataSource(df[df['obs']]), ) imhover.renderers.append(model_dict[key]) unobs_source = df[~df['obs']].copy() unobs_source.loc[:, 'alpha'] = 0.8 key = f'unobs{i}' model_dict[key] = plot.scatter( x='mjd', y='lim_mag', color='color', marker='inverted_triangle', fill_color='white', line_color='color', alpha='alpha', source=ColumnDataSource(unobs_source), ) imhover.renderers.append(model_dict[key]) key = f'bin{i}' model_dict[key] = plot.scatter( x='mjd', y='mag', color='color', marker='circle', fill_color='color', source=ColumnDataSource( data=dict( mjd=[], flux=[], fluxerr=[], filter=[], color=[], lim_mag=[], mag=[], magerr=[], instrument=[], stacked=[], ) ), ) imhover.renderers.append(model_dict[key]) key = 'obserr' + str(i) y_err_x = [] y_err_y = [] for d, ro in df[df['obs']].iterrows(): px = ro['mjd'] py = ro['mag'] err = ro['magerr'] y_err_x.append((px, px)) y_err_y.append((py - err, py + err)) model_dict[key] = plot.multi_line( xs='xs', ys='ys', color='color', alpha='alpha', source=ColumnDataSource( data=dict( xs=y_err_x, ys=y_err_y, color=df[df['obs']]['color'], alpha=[1.0] * len(df[df['obs']]), ) ), ) key = f'binerr{i}' model_dict[key] = plot.multi_line( xs='xs', ys='ys', color='color', source=ColumnDataSource(data=dict(xs=[], ys=[], color=[])), ) key = f'unobsbin{i}' model_dict[key] = plot.scatter( x='mjd', y='lim_mag', color='color', marker='inverted_triangle', fill_color='white', line_color='color', alpha=0.8, source=ColumnDataSource( data=dict( mjd=[], flux=[], fluxerr=[], filter=[], color=[], lim_mag=[], mag=[], magerr=[], instrument=[], stacked=[], ) ), ) imhover.renderers.append(model_dict[key]) key = f'all{i}' model_dict[key] = ColumnDataSource(df) key = f'bold{i}' model_dict[key] = ColumnDataSource( df[ [ 'mjd', 'flux', 'fluxerr', 'mag', 'magerr', 'filter', 'zp', 'magsys', 'lim_mag', 'stacked', ] ] ) plot.xaxis.axis_label = 'MJD' plot.yaxis.axis_label = 'AB mag' plot.toolbar.logo = None toggle = CheckboxWithLegendGroup( labels=list(data.label.unique()), active=list(range(len(data.label.unique()))), colors=list(data.color.unique()), ) # TODO replace `eval` with Namespaces # https://github.com/bokeh/bokeh/pull/6340 toggle.callback = CustomJS( args={'toggle': toggle, **model_dict}, code=open( os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'togglem.js') ).read(), ) slider = Slider(start=0.0, end=15.0, value=0.0, step=1.0, title='Binsize (days)') button = Button(label="Export Bold Light Curve to CSV") button.callback = CustomJS( args={'slider': slider, 'toggle': toggle, **model_dict}, code=open( os.path.join( os.path.dirname(__file__), '../static/js/plotjs', "download.js" ) ) .read() .replace('objname', obj_id) .replace('default_zp', str(PHOT_ZP)), ) toplay = row(slider, button) callback = CustomJS( args={'slider': slider, 'toggle': toggle, **model_dict}, code=open( os.path.join(os.path.dirname(__file__), '../static/js/plotjs', 'stackm.js') ) .read() .replace('default_zp', str(PHOT_ZP)) .replace('detect_thresh', str(DETECT_THRESH)), ) slider.js_on_change('value', callback) layout = row(plot, toggle) layout = column(toplay, layout) p2 = Panel(child=layout, title='Mag') tabs = Tabs(tabs=[p2, p1]) return _plot_to_json(tabs) # TODO make async so that thread isn't blocked def spectroscopy_plot(obj_id, spec_id=None): """TODO normalization? should this be handled at data ingestion or plot-time?""" obj = Obj.query.get(obj_id) spectra = Obj.query.get(obj_id).spectra if spec_id is not None: spectra = [spec for spec in spectra if spec.id == int(spec_id)] if len(spectra) == 0: return None, None, None color_map = dict(zip([s.id for s in spectra], viridis(len(spectra)))) data = pd.concat( [ pd.DataFrame( { 'wavelength': s.wavelengths, 'flux': s.fluxes, 'id': s.id, 'instrument': s.instrument.telescope.nickname, } ) for i, s in enumerate(spectra) ] ) split = data.groupby('id') hover = HoverTool( tooltips=[('wavelength', '$x'), ('flux', '$y'), ('instrument', '@instrument')] ) plot = figure( plot_width=600, plot_height=300, sizing_mode='scale_both', tools='box_zoom,wheel_zoom,pan,reset', active_drag='box_zoom', ) plot.add_tools(hover) model_dict = {} for i, (key, df) in enumerate(split): model_dict['s' + str(i)] = plot.line( x='wavelength', y='flux', color=color_map[key], source=ColumnDataSource(df) ) plot.xaxis.axis_label = 'Wavelength ()' plot.yaxis.axis_label = 'Flux' plot.toolbar.logo = None # TODO how to choose a good default? plot.y_range = Range1d(0, 1.03 * data.flux.max()) toggle = CheckboxWithLegendGroup( labels=[s.instrument.telescope.nickname for s in spectra], active=list(range(len(spectra))), width=100, colors=[color_map[k] for k, df in split], ) toggle.callback = CustomJS( args={'toggle': toggle, **model_dict}, code=""" for (let i = 0; i < toggle.labels.length; i++) { eval("s" + i).visible = (toggle.active.includes(i)) } """, ) elements = CheckboxWithLegendGroup( labels=list(SPEC_LINES.keys()), active=[], width=80, colors=[c for w, c in SPEC_LINES.values()], ) z = TextInput(value=str(obj.redshift), title="z:") v_exp = TextInput(value='0', title="v_exp:") for i, (wavelengths, color) in enumerate(SPEC_LINES.values()): el_data = pd.DataFrame({'wavelength': wavelengths}) el_data['x'] = el_data['wavelength'] * (1 + obj.redshift) model_dict[f'el{i}'] = plot.segment( x0='x', x1='x', # TODO change limits y0=0, y1=1e-13, color=color, source=ColumnDataSource(el_data), ) model_dict[f'el{i}'].visible = False # TODO callback policy: don't require submit for text changes? elements.callback = CustomJS( args={'elements': elements, 'z': z, 'v_exp': v_exp, **model_dict}, code=""" let c = 299792.458; // speed of light in km / s for (let i = 0; i < elements.labels.length; i++) { let el = eval("el" + i); el.visible = (elements.active.includes(i)) el.data_source.data.x = el.data_source.data.wavelength.map( x_i => (x_i * (1 + parseFloat(z.value)) / (1 + parseFloat(v_exp.value) / c)) ); el.data_source.change.emit(); } """, ) z.callback = elements.callback v_exp.callback = elements.callback layout = row(plot, toggle, elements, column(z, v_exp)) return _plot_to_json(layout)
30.335561
88
0.534441
5b15f03a9e21ad9e630b8c38b2ac80ff1cf06549
4,625
py
Python
lib/session.py
Hiteshsuhas/err-stackstorm
7579350ac50d9324b64a73b86d57e094270cb275
[ "Apache-2.0" ]
15
2016-09-19T12:06:12.000Z
2021-11-30T12:04:44.000Z
lib/session.py
Hiteshsuhas/err-stackstorm
7579350ac50d9324b64a73b86d57e094270cb275
[ "Apache-2.0" ]
22
2017-06-19T18:13:54.000Z
2021-05-28T09:25:01.000Z
lib/session.py
Hiteshsuhas/err-stackstorm
7579350ac50d9324b64a73b86d57e094270cb275
[ "Apache-2.0" ]
7
2017-06-19T17:03:59.000Z
2021-09-27T11:06:31.000Z
# coding:utf-8 import uuid import string import hashlib import logging from lib.errors import SessionExpiredError, SessionConsumedError from datetime import datetime as dt from random import SystemRandom LOG = logging.getLogger("errbot.plugin.st2.session")
34.774436
99
0.611676
5b16bf8ef2577dbc0fa8123ec5c7829b61cd4d77
700
py
Python
junopy/entities/bill.py
robertons/junopy
1acc64ab99d8ea49bb0dac979cd34da43541f243
[ "MIT" ]
3
2021-07-12T15:05:13.000Z
2022-01-31T03:35:43.000Z
junopy/entities/bill.py
robertons/junopy
1acc64ab99d8ea49bb0dac979cd34da43541f243
[ "MIT" ]
2
2022-01-29T20:14:51.000Z
2022-02-07T16:16:24.000Z
junopy/entities/bill.py
robertons/junopy
1acc64ab99d8ea49bb0dac979cd34da43541f243
[ "MIT" ]
1
2022-02-01T18:36:10.000Z
2022-02-01T18:36:10.000Z
# -*- coding: utf-8 -*- from .lib import *
26.923077
63
0.674286
5b18bfb17e1557ac4b871c78c2b1715de071b1e0
881
py
Python
accounts/signals.py
julesc00/challenge
0f991d07c3fa959e254d1b97d4d393fde13844a9
[ "MIT" ]
null
null
null
accounts/signals.py
julesc00/challenge
0f991d07c3fa959e254d1b97d4d393fde13844a9
[ "MIT" ]
null
null
null
accounts/signals.py
julesc00/challenge
0f991d07c3fa959e254d1b97d4d393fde13844a9
[ "MIT" ]
null
null
null
from django.db.models.signals import post_save from django.contrib.auth.signals import user_logged_in, user_logged_out, user_login_failed from django.contrib.auth.models import User from django.contrib.auth.models import Group from django.dispatch import receiver from .models import Usuario, LoginLog post_save.connect(user_profile, sender=User)
24.472222
90
0.715096
5b190f68d89adb80d4fc9ec36ff5f159161ba327
2,166
py
Python
Python Scripting/Python - POC-3/DvdApp.py
vaibhavkrishna-bhosle/Trendnxt-Projects
6c8a31be2f05ec79cfc5086ee09adff161b836ad
[ "MIT" ]
null
null
null
Python Scripting/Python - POC-3/DvdApp.py
vaibhavkrishna-bhosle/Trendnxt-Projects
6c8a31be2f05ec79cfc5086ee09adff161b836ad
[ "MIT" ]
null
null
null
Python Scripting/Python - POC-3/DvdApp.py
vaibhavkrishna-bhosle/Trendnxt-Projects
6c8a31be2f05ec79cfc5086ee09adff161b836ad
[ "MIT" ]
null
null
null
import mysql.connector from mysql.connector.errors import ProgrammingError from mysql.connector import Error from DvdOperations import DvdStore database = "db4" Function2()
24.066667
171
0.60711
5b1919573f3036459523134660e1cde252b7f5d5
8,689
py
Python
cloudshell/rest/api.py
QualiSystems/cloudshell-rest-api
70d09262c81b8dae55053aae162a7265cf67865f
[ "Apache-2.0" ]
1
2021-11-26T22:52:42.000Z
2021-11-26T22:52:42.000Z
cloudshell/rest/api.py
katzy687/cloudshell-rest-api
70d09262c81b8dae55053aae162a7265cf67865f
[ "Apache-2.0" ]
11
2019-01-08T06:37:34.000Z
2021-06-09T17:39:50.000Z
cloudshell/rest/api.py
katzy687/cloudshell-rest-api
70d09262c81b8dae55053aae162a7265cf67865f
[ "Apache-2.0" ]
7
2016-09-27T13:14:00.000Z
2021-11-23T14:02:06.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- import os import json try: import urllib2 except: import urllib.request as urllib2 from requests import delete, get, post, put from cloudshell.rest.exceptions import ShellNotFoundException, FeatureUnavailable
36.662447
140
0.579008
5b19d3c83fe2ac0f121d05692ca3db02ba4ea908
1,848
py
Python
data/scripts/classes/team_row.py
matt-waite/lol-reference
1042fc0a63f7911ed9434b5bb6ba8f866fc0a9c2
[ "MIT" ]
1
2020-08-26T17:29:58.000Z
2020-08-26T17:29:58.000Z
data/scripts/classes/team_row.py
matt-waite/lol-reference
1042fc0a63f7911ed9434b5bb6ba8f866fc0a9c2
[ "MIT" ]
null
null
null
data/scripts/classes/team_row.py
matt-waite/lol-reference
1042fc0a63f7911ed9434b5bb6ba8f866fc0a9c2
[ "MIT" ]
null
null
null
from classes import oracles_headers
24
63
0.540584
5b1a34dd97d2ac3c30c9847cc931832f35fa692e
7,854
py
Python
startup/97-standard-plans.py
MikeHart85/SIX_profile_collection
f4b34add0c464006a1310375b084c63597b6baf0
[ "BSD-3-Clause" ]
null
null
null
startup/97-standard-plans.py
MikeHart85/SIX_profile_collection
f4b34add0c464006a1310375b084c63597b6baf0
[ "BSD-3-Clause" ]
null
null
null
startup/97-standard-plans.py
MikeHart85/SIX_profile_collection
f4b34add0c464006a1310375b084c63597b6baf0
[ "BSD-3-Clause" ]
null
null
null
#TODO put this inside of rixscam def rixscam_get_threshold(Ei = None): '''Calculate the minimum and maximum threshold for RIXSCAM single photon counting (LS mode) Ei\t:\t float - incident energy (default is beamline current energy) ''' if Ei is None: Ei = pgm.en.user_readback.value t_min = 0.7987 * Ei - 97.964 t_max = 1.4907 * Ei + 38.249 print('\n\n\tMinimum value for RIXSCAM threshold (LS mode):\t{}'.format(t_min)) print('\tMaximum value for RIXSCAM threshold (LS mode):\t{}'.format(t_max)) print('\tFor Beamline Energy:\t\t\t\t{}'.format(Ei)) return t_min, t_max #TODO put this insdie of rixscam #TODO make official so that there is a m1_fbk device like m1fbk.setpoint m1_fbk = EpicsSignal('XF:02IDA-OP{FBck}Sts:FB-Sel', name = 'm1_fbk') m1_fbk_sp = EpicsSignal('XF:02IDA-OP{FBck}PID-SP', name = 'm1_fbk_sp') m1_fbk_th = extslt_cam.stats1.centroid_threshold #m1_fbk_pix_x = extslt_cam.stats1.centroid.x.value m1_fbk_cam_time = extslt_cam.cam.acquire_time #(mv(m1_fbk_th,1500) m1_simple_fbk = EpicsSignal('XF:02IDA-OP{M1_simp_feed}FB-Ena', name = 'm1_simple_fbk') m1_simple_fbk_target_ratio = EpicsSignal('XF:02IDA-OP{M1_simp_feed}FB-TarRat', name = 'm1_simple_fbk_target_ratio') m1_simple_fbk_ratio = EpicsSignal('XF:02IDA-OP{M1_simp_feed}FB-Ratio', name = 'm1_simple_fbk_ratio') m3_simple_fbk = EpicsSignal('XF:02IDA-OP{M3_simp_feed}FB-Ena', name = 'm3_simple_fbk') m3_simple_fbk_target = EpicsSignal('XF:02IDA-OP{M3_simp_feed}FB-Targ', name = 'm3_simple_fbk_target') m3_simple_fbk_cen = EpicsSignal('XF:02IDA-OP{M3_simp_feed}FB_inpbuf', name = 'm3_simple_fbk_cen')
37.222749
134
0.697734
5b1a7c8341406690f20aa12accdb9fc9001deadc
238
py
Python
speechpro/cloud/speech/synthesis/rest/cloud_client/api/__init__.py
speechpro/cloud-python
dfcfc19a1f008b55c5290599c594fe8de777018b
[ "MIT" ]
15
2020-05-27T09:35:32.000Z
2022-03-29T18:35:36.000Z
speechpro/cloud/speech/synthesis/rest/cloud_client/api/__init__.py
speechpro/cloud-python
dfcfc19a1f008b55c5290599c594fe8de777018b
[ "MIT" ]
null
null
null
speechpro/cloud/speech/synthesis/rest/cloud_client/api/__init__.py
speechpro/cloud-python
dfcfc19a1f008b55c5290599c594fe8de777018b
[ "MIT" ]
1
2021-04-06T21:39:29.000Z
2021-04-06T21:39:29.000Z
from __future__ import absolute_import # flake8: noqa # import apis into api package import speechpro.cloud.speech.synthesis.rest.cloud_client.api.session_api import speechpro.cloud.speech.synthesis.rest.cloud_client.api.synthesize_api
29.75
76
0.848739
5b1aad312b8c27483bc4147a2754724cb8c715fb
1,039
py
Python
learn_pyqt5/checkable_bar.py
liusong-cn/python
f67933f0879021a595258e09c4cde5ca1f9f6aed
[ "Apache-2.0" ]
1
2019-11-12T13:38:54.000Z
2019-11-12T13:38:54.000Z
learn_pyqt5/checkable_bar.py
liusong-cn/python
f67933f0879021a595258e09c4cde5ca1f9f6aed
[ "Apache-2.0" ]
null
null
null
learn_pyqt5/checkable_bar.py
liusong-cn/python
f67933f0879021a595258e09c4cde5ca1f9f6aed
[ "Apache-2.0" ]
null
null
null
# _*_ coding:utf-8 _*_ # author:ls # time:2020/3/19 0019 import sys from PyQt5.QtWidgets import QApplication,QAction,QMainWindow from PyQt5.QtGui import QIcon if __name__ == '__main__': app = QApplication(sys.argv) ex = Example() sys.exit(app.exec_())
25.341463
60
0.627526
5b1aca9be8fbadae0d16bcaf4d8c545808d7368a
3,451
py
Python
service/test.py
ksiomelo/cubix
cd9e6dda6696b302a7c0d383259a9d60b15b0d55
[ "Apache-2.0" ]
3
2015-09-07T00:16:16.000Z
2019-01-11T20:27:56.000Z
service/test.py
ksiomelo/cubix
cd9e6dda6696b302a7c0d383259a9d60b15b0d55
[ "Apache-2.0" ]
null
null
null
service/test.py
ksiomelo/cubix
cd9e6dda6696b302a7c0d383259a9d60b15b0d55
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python import pika import time import json import StringIO #from fca.concept import Concept from casa import Casa #from fca.readwrite import cxt connection = pika.BlockingConnection(pika.ConnectionParameters( host='localhost')) channel = connection.channel() channel.queue_declare(queue='task_queue', durable=True, exclusive=False) channel.queue_declare(queue='msg_queue', durable=True, exclusive=False) #channel.exchange_declare(exchange='', # type="topic", # durable=True, # auto_delete=False) #channel.queue_declare(queue="task_queue", # durable=True, # exclusive=False, # auto_delete=False, # callback=on_queue_declared) print ' [*] Waiting for messages. To exit press CTRL+C' channel.basic_qos(prefetch_count=1) channel.basic_consume(callback, queue='task_queue') channel.basic_consume(msg_callback, queue='msg_queue') channel.start_consuming()
30.8125
86
0.597508
5b1cda3e00260587ee1daafde0d87ed8f1313a59
310
py
Python
src/nia/selections/rank.py
salar-shdk/nia
bb0f1b941240b627291dd8212b8840cbe77b0398
[ "MIT" ]
8
2021-09-06T07:20:23.000Z
2022-02-23T23:18:22.000Z
src/nia/selections/rank.py
salar-shdk/nia
bb0f1b941240b627291dd8212b8840cbe77b0398
[ "MIT" ]
null
null
null
src/nia/selections/rank.py
salar-shdk/nia
bb0f1b941240b627291dd8212b8840cbe77b0398
[ "MIT" ]
null
null
null
from .selection import Selection import numpy as np
25.833333
80
0.677419
5b1ed26356ab2b3641b50b827cab69738be819bd
15,878
py
Python
datasets/imppres/imppres.py
ddhruvkr/datasets-1
66f2a7eece98d2778bd22bb5034cb7c2376032d4
[ "Apache-2.0" ]
7
2021-01-04T22:18:26.000Z
2021-07-10T09:13:29.000Z
datasets/imppres/imppres.py
ddhruvkr/datasets-1
66f2a7eece98d2778bd22bb5034cb7c2376032d4
[ "Apache-2.0" ]
null
null
null
datasets/imppres/imppres.py
ddhruvkr/datasets-1
66f2a7eece98d2778bd22bb5034cb7c2376032d4
[ "Apache-2.0" ]
3
2021-01-03T22:08:20.000Z
2021-08-12T20:09:39.000Z
# coding=utf-8 # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Over 25k semiautomatically generated sentence pairs illustrating well-studied pragmatic inference types. IMPPRES is an NLI dataset following the format of SNLI (Bowman et al., 2015), MultiNLI (Williams et al., 2018) and XNLI (Conneau et al., 2018), which was created to evaluate how well trained NLI models recognize several classes of presuppositions and scalar implicatures.""" from __future__ import absolute_import, division, print_function import json import os import datasets # Find for instance the citation on arxiv or on the dataset repo/website _CITATION = """\ @inproceedings{jeretic-etal-2020-natural, title = "Are Natural Language Inference Models {IMPPRESsive}? {L}earning {IMPlicature} and {PRESupposition}", author = "Jereti\v{c}, Paloma and Warstadt, Alex and Bhooshan, Suvrat and Williams, Adina", booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", month = jul, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.acl-main.768", doi = "10.18653/v1/2020.acl-main.768", pages = "8690--8705", abstract = "Natural language inference (NLI) is an increasingly important task for natural language understanding, which requires one to infer whether a sentence entails another. However, the ability of NLI models to make pragmatic inferences remains understudied. We create an IMPlicature and PRESupposition diagnostic dataset (IMPPRES), consisting of 32K semi-automatically generated sentence pairs illustrating well-studied pragmatic inference types. We use IMPPRES to evaluate whether BERT, InferSent, and BOW NLI models trained on MultiNLI (Williams et al., 2018) learn to make pragmatic inferences. Although MultiNLI appears to contain very few pairs illustrating these inference types, we find that BERT learns to draw pragmatic inferences. It reliably treats scalar implicatures triggered by {``}some{''} as entailments. For some presupposition triggers like {``}only{''}, BERT reliably recognizes the presupposition as an entailment, even when the trigger is embedded under an entailment canceling operator like negation. BOW and InferSent show weaker evidence of pragmatic reasoning. We conclude that NLI training encourages models to learn some, but not all, pragmatic inferences.", } """ # You can copy an official description _DESCRIPTION = """Over >25k semiautomatically generated sentence pairs illustrating well-studied pragmatic inference types. IMPPRES is an NLI dataset following the format of SNLI (Bowman et al., 2015), MultiNLI (Williams et al., 2018) and XNLI (Conneau et al., 2018), which was created to evaluate how well trained NLI models recognize several classes of presuppositions and scalar implicatures.""" _HOMEPAGE = "https://github.com/facebookresearch/Imppres" _LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International Public License" # The HuggingFace dataset library don't host the datasets but only point to the original files # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method) _URLs = {"default": "https://github.com/facebookresearch/Imppres/blob/master/dataset/IMPPRES.zip?raw=true"}
56.910394
1,197
0.634463
5b201dedf7625f49673a17f90219f4d165f06f5d
1,322
py
Python
app.py
juergenpointinger/status-dashboard
439c7e9b6966ff10ada4062c6b97d5088083f442
[ "MIT" ]
null
null
null
app.py
juergenpointinger/status-dashboard
439c7e9b6966ff10ada4062c6b97d5088083f442
[ "MIT" ]
null
null
null
app.py
juergenpointinger/status-dashboard
439c7e9b6966ff10ada4062c6b97d5088083f442
[ "MIT" ]
null
null
null
# Standard library imports import logging import os # Third party imports import dash import dash_bootstrap_components as dbc from flask_caching import Cache import plotly.io as pio # Local application imports from modules.gitlab import GitLab import settings # Initialize logging mechanism logging.basicConfig(level=settings.LOGLEVEL, format=settings.LOGFORMAT) logger = logging.getLogger(__name__) gl = GitLab() logger.info("Current GitLab version: {}".format(GitLab.version)) # App instance app = dash.Dash(__name__, suppress_callback_exceptions=True, external_stylesheets=[dbc.themes.BOOTSTRAP]) app.title = settings.APP_NAME # App caching # CACHE_CONFIG = { # # Note that filesystem cache doesn't work on systems with ephemeral # # filesystems like Heroku. # 'CACHE_TYPE': 'filesystem', # 'CACHE_DIR': 'cache-directory', # # should be equal to maximum number of users on the app at a single time # # higher numbers will store more data in the filesystem / redis cache # 'CACHE_THRESHOLD': 200 # } CACHE_CONFIG = { # try 'filesystem' if you don't want to setup redis 'CACHE_TYPE': 'redis', 'CACHE_REDIS_URL': settings.REDIS_URL } cache = Cache() cache.init_app(app.server, config=CACHE_CONFIG) pio.templates.default = "plotly_dark"
28.12766
77
0.729955
5b20baf76a7bc453b189c49cad4f4c0139f19706
5,154
py
Python
tests/scanner/test_data/fake_retention_scanner_data.py
ogreface/forseti-security
a7a3573183fa1416c605dad683587717795fe13b
[ "Apache-2.0" ]
null
null
null
tests/scanner/test_data/fake_retention_scanner_data.py
ogreface/forseti-security
a7a3573183fa1416c605dad683587717795fe13b
[ "Apache-2.0" ]
null
null
null
tests/scanner/test_data/fake_retention_scanner_data.py
ogreface/forseti-security
a7a3573183fa1416c605dad683587717795fe13b
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fake Retention scanner data.""" import json from datetime import datetime, timedelta import collections from google.cloud.forseti.common.gcp_type import organization from google.cloud.forseti.common.gcp_type import project from google.cloud.forseti.common.gcp_type import bucket from google.cloud.forseti.scanner.audit import retention_rules_engine as rre ORGANIZATION = organization.Organization( '123456', display_name='Default Organization', full_name='organization/123456/', data='fake_org_data_123456', ) PROJECT1 = project.Project( 'def-project-1', project_number=11223344, display_name='default project 1', parent=ORGANIZATION, full_name='organization/123456/project/def-project-1/', data='fake_project_data_11223344', ) PROJECT2 = project.Project( 'def-project-2', project_number=55667788, display_name='default project 2', parent=ORGANIZATION, full_name='organization/123456/project/def-project-2/', data='fake_project_data_55667788', ) PROJECT3 = project.Project( 'def-project-3', project_number=12121212, display_name='default project 3', parent=ORGANIZATION, full_name='organization/123456/project/def-project-3/', data='fake_project_data_12121212', ) PROJECT4 = project.Project( 'def-project-4', project_number=34343434, display_name='default project 4', parent=ORGANIZATION, full_name='organization/123456/project/def-project-4/', data='fake_project_data_34343434', ) FakeBucketDataInput = collections.namedtuple( 'FakeBucketDataInput', ['id', 'project', 'lifecycles']) LifecycleInput = collections.namedtuple( 'LifecycleInput', ['action', 'conditions'])
34.13245
85
0.684517
5b22463c2df2d021f347bc17bcb98562b99edb54
4,298
py
Python
libsonyapi/camera.py
BugsForDays/libsonyapi
c6482b4ad90f199b7fb4e344f8e61d4ed0f9466f
[ "MIT" ]
13
2019-04-19T16:44:58.000Z
2021-09-20T05:33:10.000Z
libsonyapi/camera.py
BugsForDays/libsonyapi
c6482b4ad90f199b7fb4e344f8e61d4ed0f9466f
[ "MIT" ]
3
2021-04-23T17:21:50.000Z
2022-01-06T17:21:28.000Z
libsonyapi/camera.py
BugsForDays/libsonyapi
c6482b4ad90f199b7fb4e344f8e61d4ed0f9466f
[ "MIT" ]
5
2019-04-11T20:24:47.000Z
2021-10-17T22:02:56.000Z
import socket import requests import json import xml.etree.ElementTree as ET
37.051724
156
0.578176
5b231e5f06d51cf2896d5d0d0db4095473d26007
11,961
py
Python
utility_functions_flu.py
neherlab/treetime_validation
c9760194712396ea5f5c33a9215eddbd3d13bfc1
[ "MIT" ]
4
2019-01-28T06:47:48.000Z
2021-04-22T16:31:37.000Z
utility_functions_flu.py
neherlab/treetime_validation
c9760194712396ea5f5c33a9215eddbd3d13bfc1
[ "MIT" ]
1
2020-04-03T14:42:11.000Z
2020-04-03T14:42:11.000Z
utility_functions_flu.py
neherlab/treetime_validation
c9760194712396ea5f5c33a9215eddbd3d13bfc1
[ "MIT" ]
1
2020-03-25T06:58:45.000Z
2020-03-25T06:58:45.000Z
#!/usr/bin/env python """ This module defines functions to facilitate operations with data specific to Flu trees and alignments. """ import numpy as np from Bio import AlignIO, Phylo from Bio.Align import MultipleSeqAlignment import random import subprocess import datetime import os, copy import matplotlib.pyplot as plt from scipy.stats import linregress from collections import Counter import StringIO import treetime from utility_functions_general import remove_polytomies from utility_functions_beast import run_beast, create_beast_xml, read_beast_log import xml.etree.ElementTree as XML from external_binaries import BEAST_BIN def date_from_seq_name(name): """ Parse flu sequence name to the date in numeric format (YYYY.F) Args: - name(str): name of the flu sequence. Returns: - sequence sampling date if succeeded to parse. None otherwise. """ def str2date_time(instr): """ Convert input string to datetime object. Args: - instr (str): input string. Accepts one of the formats: {MM.DD.YYYY, MM.YYYY, MM/DD/YYYY, MM/YYYY, YYYY}. Returns: - date (datetime.datetime): parsed date object. If the parsing failed, None is returned """ instr = instr.replace('/', '.') # import ipdb; ipdb.set_trace() try: date = datetime.datetime.strptime(instr, "%m.%d.%Y") except ValueError: date = None if date is not None: return date try: date = datetime.datetime.strptime(instr, "%m.%Y") except ValueError: date = None if date is not None: return date try: date = datetime.datetime.strptime(instr, "%Y") except ValueError: date = None return date try: date = str2date_time(name.split('|')[3].strip()) return date.year + (date - datetime.datetime(date.year, 1, 1)).days / 365.25 except: return None def dates_from_flu_tree(tree): """ Iterate over the Flu tree, parse each leaf name and return dates for the leaves as dictionary. Args: - tree(str or Biopython tree): Flu tree Returns: - dates(dict): dictionary of dates in format {seq_name: numdate}. Only the entries which were parsed successfully are included. """ if isinstance(tree, str): tree = Phylo.read(tree, 'newick') dates = {k.name:date_from_seq_name(k.name) for k in tree.get_terminals() if date_from_seq_name(k.name) is not None} return dates def subtree_with_same_root(tree, Nleaves, outfile, optimize=True): """ Sample subtree of the given tree so that the root of the subtree is that of the original tree. Args: - tree(str or Biopython tree): initial tree - Nleaves(int): number of leaves in the target subtree - outfile(str): path to save the resulting subtree optimize(bool): perform branch length optimization for the subtree? Returns: - tree(Biopython tree): the subtree """ if isinstance(tree, str): treecopy = Phylo.read(tree, 'newick') else: treecopy = copy.deepcopy(tree) remove_polytomies(treecopy) assert(len(treecopy.root.clades) == 2) tot_terminals = treecopy.count_terminals() # sample to the left of the root left = treecopy.root.clades[0] n_left = left.count_terminals() right = treecopy.root.clades[1] n_right = right.count_terminals() n_left_sampled = np.min((n_left, Nleaves * n_left / (n_left + n_right))) n_left_sampled = np.max((n_left_sampled, 5)) # make sure we have at least one left_terminals = left.get_terminals() left_sample_idx = np.random.choice(np.arange(len(left_terminals)), size=n_left_sampled, replace=False) left_sample = [left_terminals[i] for i in left_sample_idx] # sample to the right of the root n_right_sampled = np.min((n_right, Nleaves * n_right / (n_left + n_right))) n_right_sampled = np.max((n_right_sampled, 5)) # make sure we have at least one right_terminals = right.get_terminals() right_sample_idx = np.random.choice(np.arange(len(right_terminals)), size=n_right_sampled, replace=False) right_sample = [right_terminals[i] for i in right_sample_idx] for leaf in treecopy.get_terminals(): if leaf not in right_sample and leaf not in left_sample: treecopy.prune(leaf) else: pass #print ("leaving leaf {} in the tree".format(leaf.name)) if optimize: import treetime dates = dates_from_flu_tree(treecopy) aln = './resources/flu_H3N2/H3N2_HA_2011_2013.fasta' tt = treetime.TreeAnc(tree=treecopy, aln=aln,gtr='Jukes-Cantor') tt.optimize_seq_and_branch_len(prune_short=False) Phylo.write(tt.tree, outfile, 'newick') return tt.tree else: Phylo.write(treecopy, outfile, 'newick') return treecopy def subtree_year_vol(tree, N_per_year, outfile): """ Sample subtree of the given tree with equal number of samples per year. Note: - if there are not enough leaves sampled at a given year, all leaves for this year will be included in the subtree. Args: - tree(str or Biopython object): Initial tree - N_per_year(int): number of samples per year. - outfile (str): path to save the subtree Returns: - tree(Biopython tree): the subtree """ if isinstance(tree, str): treecopy = Phylo.read(tree, 'newick') else: treecopy = copy.deepcopy(tree) remove_polytomies(treecopy) dates = dates_from_flu_tree(treecopy) sample = [] cntr = Counter(map (int, dates.values())) years = cntr.keys() min_year = np.min(years) for year in years: all_names = [k for k in dates if int(dates[k]) == year] if len(all_names) <= N_per_year or year == min_year: sample += all_names else: sample += list(np.random.choice(all_names, size=N_per_year, replace=False)) for leaf in treecopy.get_terminals(): if leaf.name not in sample: treecopy.prune(leaf) else: pass #print ("leaving leaf {} in the tree".format(leaf.name)) Phylo.write(treecopy, outfile, 'newick') return treecopy def create_LSD_dates_file_from_flu_tree(tree, outfile): """ Parse dates from the flu tree and write to the file in the LSD format. Args: - tree(str or Biopython object): Initial tree - outfile(str): path to save the LSD dates file. Returns: - dates(dict): dates parsed from the tree as dictionary. """ dates = dates_from_flu_tree(tree) with open(outfile, 'w') as df: df.write(str(len(dates)) + "\n") df.write("\n".join([str(k) + "\t" + str(dates[k]) for k in dates])) return dates def make_known_dates_dict(alnfile, dates_known_fraction=1.0): """ Read all the dates of the given flu sequences, and make the dates dictionary for only a fraction of them. The sequences in the resulting dict are chosen randomly. """ aln = AlignIO.read(alnfile, 'fasta') dates = {k.name: date_from_seq_name(k.name) for k in aln} # randomly choose the dates so that only the known_ratio number of dates is known if dates_known_fraction != 1.0: assert(dates_known_fraction > 0 and dates_known_fraction < 1.0) knonw_keys = np.random.choice(dates.keys(), size=int (len(dates) * dates_known_fraction), replace=False) dates = {k : dates[k] for k in knonw_keys} return dates def create_treetime_with_missing_dates(alnfile, treefile, dates_known_fraction=1.0): """dates = {k.name: date_from_seq_name(k.name) for k in aln} Create TreeTime object with fraction of leaves having no sampling dates. The leaves to earse sampling dates are chosen randomly. Args: - alnfile(str): path to the flu alignment - treefiule(str): path to the Flu newixk tree - dates_known_fraction(float): fraction of leaves, which should have sampling date information. """ aln = AlignIO.read(alnfile, 'fasta') tt = Phylo.read(treefile, 'newick') dates = make_known_dates_dict(alnfile, dates_known_fraction) myTree = treetime.TreeTime(gtr='Jukes-Cantor', tree = treefile, aln = alnfile, verbose = 4, dates = dates, debug=False) myTree.optimize_seq_and_branch_len(reuse_branch_len=True, prune_short=True, max_iter=5, infer_gtr=False) return myTree def create_subtree(tree, n_seqs, out_file, st_type='equal_sampling'): """ Args: - tree(filename or Biopython tree): original tree - n_seqs: number of leaves in the resulting subtree - out_file: output locaton to store the resulting subtree - st_type: type of the subtree generation algorithm. Available types: - random: just choose n_leaves randomly - equal_sampling: choose equal leaves each year (if possible) - preserve_root: sample from right and left subtrees of the tree root. The root of the resulting subtree is therefore the same as of the original tree """ if isinstance(tree, str): tree = Phylo.read(tree, 'newick') pass if __name__ == '__main__': pass
32.239892
157
0.664995
5b24576277ff90503d0b77ea45447ed2cd207807
3,443
py
Python
add_label.py
Mause/pull_requests
6c3aa3feb8ec775c184eaa70d09b944ba753125b
[ "MIT" ]
null
null
null
add_label.py
Mause/pull_requests
6c3aa3feb8ec775c184eaa70d09b944ba753125b
[ "MIT" ]
39
2021-02-10T05:59:09.000Z
2022-03-18T07:21:29.000Z
add_label.py
Mause/pull_requests
6c3aa3feb8ec775c184eaa70d09b944ba753125b
[ "MIT" ]
null
null
null
from asyncio import get_event_loop from dataclasses import dataclass, field from typing import Dict, List, Optional, Union from aiohttp import ClientSession from pydantic import BaseModel from sgqlc.endpoint.base import BaseEndpoint from sgqlc.operation import Operation from sgqlc_schemas.github.schema import ( AddLabelsToLabelableInput, AddLabelsToLabelablePayload, MergePullRequestInput, Mutation, Query, Repository, ) def build_merge(ids: List[str]): op = Operation(Mutation) for i, ident in enumerate(ids): op.merge_pull_request( input=MergePullRequestInput(pull_request_id=ident), __alias__=f'merge_{i}' ).pull_request.title() return op if __name__ == "__main__": get_event_loop().run_until_complete(main())
27.544
88
0.652338
5b24e7eb961669dcd20e501b760778d98a071d8b
851
py
Python
DataEngineering/Chapter7/7.6/financialdata/financialdata/scheduler.py
yz830620/FinMindBook
1ffda3541eb73e6d4cb47798bf9d28b66a49939b
[ "MIT" ]
5
2021-12-13T12:03:22.000Z
2022-03-30T08:51:19.000Z
DataEngineering/Chapter7/7.6/financialdata/financialdata/scheduler.py
yz830620/FinMindBook
1ffda3541eb73e6d4cb47798bf9d28b66a49939b
[ "MIT" ]
1
2022-01-26T05:42:56.000Z
2022-03-12T08:24:57.000Z
DataEngineering/Chapter7/7.6/financialdata/financialdata/scheduler.py
yz830620/FinMindBook
1ffda3541eb73e6d4cb47798bf9d28b66a49939b
[ "MIT" ]
6
2021-12-14T04:32:01.000Z
2022-03-31T17:15:11.000Z
import time import datetime from apscheduler.schedulers.background import BackgroundScheduler from financialdata.producer import Update from loguru import logger if __name__ == "__main__": main() while True: time.sleep(600)
24.314286
74
0.679201
d289828efb378099de1d3d6011a5a3e50df04330
2,692
py
Python
openmc_plasma_source/plotting/plot_tokamak_source.py
mdfaisal98/openmc-plasma-source
e55d61ce6d641f4d382ce298b6f6335cd46bc507
[ "MIT" ]
null
null
null
openmc_plasma_source/plotting/plot_tokamak_source.py
mdfaisal98/openmc-plasma-source
e55d61ce6d641f4d382ce298b6f6335cd46bc507
[ "MIT" ]
null
null
null
openmc_plasma_source/plotting/plot_tokamak_source.py
mdfaisal98/openmc-plasma-source
e55d61ce6d641f4d382ce298b6f6335cd46bc507
[ "MIT" ]
null
null
null
import matplotlib.pyplot as plt from matplotlib import cm import numpy as np def scatter_tokamak_source(source, quantity=None, **kwargs): """Create a 2D scatter plot of the tokamak source. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.scatter.html for more arguments. Args: source (ops.TokamakSource): the plasma source quantity ("str", optional): value by which the lines should be coloured. Defaults to None. Raises: ValueError: If the quantity is unknown """ quantity_to_attribute = { "ion_temperature": source.temperatures, "neutron_source_density": source.strengths } if quantity in quantity_to_attribute: colours = quantity_to_attribute[quantity] elif quantity is None: colours = None else: raise ValueError("Unknown quantity") plt.gca().set_aspect("equal") return plt.scatter(source.RZ[0], source.RZ[1], c=colours, **kwargs) def plot_tokamak_source_3D(source, quantity=None, angles=[0, 1/2*np.pi], colorbar="viridis", **kwargs): """Creates a 3D plot of the tokamak source. See https://matplotlib.org/stable/api/_as_gen/matplotlib.pyplot.plot.html#matplotlib.pyplot.plot for more arguments. Args: source (ops.TokamakSource): the plasma source quantity ("str", optional): value by which the lines should be coloured. Defaults to None. angles (list, optional): iterable of two floats defining the coverage. Defaults to [0, 1/2*np.pi]. colorbar (str, optional): colorbar used if quantity is not None. Defaults to "viridis". Raises: ValueError: If the quantity is unknown """ quantity_to_attribute = { "ion_temperature": source.temperatures, "neutron_source_density": source.strengths } if quantity in quantity_to_attribute: values = quantity_to_attribute[quantity] elif quantity is None: values = None else: raise ValueError("Unknown quantity") colorbar = cm.get_cmap(colorbar) axes = plt.axes(projection="3d") theta = np.linspace(*angles, 100) for i in range(source.sample_size): if values is not None: colour = colorbar(values[i]/max(values)) else: colour = None x = source.RZ[0][i] * np.sin(theta) y = source.RZ[0][i] * np.cos(theta) z = source.RZ[1][i] plt.plot(x, y, z, color=colour, **kwargs) axes.set_xlim(-source.major_radius, source.major_radius) axes.set_ylim(-source.major_radius, source.major_radius) axes.set_zlim(-source.major_radius, source.major_radius)
33.65
103
0.658247
d28a47045a9d4366365cea9cca22f372e578a38f
620
py
Python
Exercício feitos pela primeira vez/ex046.py
Claayton/pythonExerciciosLinux
696cdb16983638418bd0d0d4fe44dc72662b9c97
[ "MIT" ]
1
2021-01-23T15:43:34.000Z
2021-01-23T15:43:34.000Z
Exercício feitos pela primeira vez/ex046.py
Claayton/pythonExerciciosLinux
696cdb16983638418bd0d0d4fe44dc72662b9c97
[ "MIT" ]
null
null
null
Exercício feitos pela primeira vez/ex046.py
Claayton/pythonExerciciosLinux
696cdb16983638418bd0d0d4fe44dc72662b9c97
[ "MIT" ]
null
null
null
#Exerccio046 from time import sleep import emoji print('\033[32mCONTAGEM REGRESSIVA PARA O ANO NOVO:\033[m') sleep(1) for c in range(10, 0 - 1, -1):#repete os nmeros de 10 at o 0 print(c) sleep(1) print(emoji.emojize("\033[31m:boom::boom::boom:KABUM:boom::boom::boom:", use_aliases=True)) print(emoji.emojize("\033[32m:boom::boom::boom:FOGUETE:boom::boom::boom:", use_aliases=True)) print(emoji.emojize("\033[33m:boom::boom::boom:FOGOS E MAIS FOGOS:boom::boom::boom:", use_aliases=True)) print(emoji.emojize("\033[34m:boom::boom::boom:GUANAGARA VIADO:boom::boom::boom:", use_aliases=True)) print('\033[32mxD')
47.692308
104
0.720968
d28ad97667405531526925b2fe6abf6f466b39ff
10,989
py
Python
bmds/bmds2/logic/rules.py
shapiromatron/bmds
57562858f3c45e9b9ec23e1c229a8a1de0ea4a70
[ "MIT" ]
2
2017-05-01T20:00:26.000Z
2019-07-09T16:42:25.000Z
bmds/bmds2/logic/rules.py
shapiromatron/bmds
57562858f3c45e9b9ec23e1c229a8a1de0ea4a70
[ "MIT" ]
20
2016-11-23T21:30:22.000Z
2022-02-28T15:42:36.000Z
bmds/bmds2/logic/rules.py
shapiromatron/bmds
57562858f3c45e9b9ec23e1c229a8a1de0ea4a70
[ "MIT" ]
2
2016-06-28T20:32:00.000Z
2017-02-23T20:30:24.000Z
import abc import math from ... import constants class NumericValueExists(Rule): # Test succeeds if value is numeric and not -999 field_name = None field_name_verbose = None class BmdExists(NumericValueExists): default_rule_name = "BMD exists" field_name = "BMD" class BmdlExists(NumericValueExists): default_rule_name = "BMDL exists" field_name = "BMDL" class BmduExists(NumericValueExists): default_rule_name = "BMDU exists" field_name = "BMDU" class AicExists(NumericValueExists): default_rule_name = "AIC exists" field_name = "AIC" class RoiExists(NumericValueExists): default_rule_name = "Residual of interest exists" field_name = "residual_of_interest" field_name_verbose = "Residual of Interest" class ShouldBeGreaterThan(Rule): # Test fails if value is less-than threshold. field_name = "" field_name_verbose = "" class GlobalFit(ShouldBeGreaterThan): default_rule_name = "GGOF" field_name = "p_value4" field_name_verbose = "Goodness of fit p-value" class ShouldBeLessThan(Rule, abc.ABC): # Test fails if value is greater-than threshold. msg = "" # w/ arguments for value and threshold class BmdBmdlRatio(ShouldBeLessThan): default_rule_name = "BMD to BMDL ratio" field_name_verbose = "BMD/BMDL ratio" class RoiFit(ShouldBeLessThan): default_rule_name = "Residual of interest" field_name_verbose = "Residual of interest" class HighBmd(ShouldBeLessThan): default_rule_name = "High BMD" field_name_verbose = "BMD/high dose ratio" class HighBmdl(ShouldBeLessThan): default_rule_name = "High BMDL" field_name_verbose = "BMDL/high dose ratio" class LowBmd(ShouldBeLessThan): default_rule_name = "Low BMD" field_name_verbose = "minimum dose/BMD ratio" class LowBmdl(ShouldBeLessThan): default_rule_name = "Low BMDL" field_name_verbose = "minimum dose/BMDL ratio" class ControlResidual(ShouldBeLessThan): default_rule_name = "Control residual" field_name_verbose = "Residual at lowest dose" class ControlStdevResiduals(ShouldBeLessThan): default_rule_name = "Control stdev" field_name_verbose = "Ratio of modeled to actual stdev. at control" class CorrectVarianceModel(Rule): # Check variance model (continuous datasets-only) default_rule_name = "Variance type" class VarianceModelFit(Rule): default_rule_name = "Variance fit" class NoDegreesOfFreedom(Rule): """ Check to ensure at least one degree of freedom exist to prevent recommendation of an overfit model. """ default_rule_name = "Degrees of freedom" class Warnings(Rule): # Test fails if any warnings exist. default_rule_name = "Warnings"
31.760116
101
0.628629
d28b5d6c386f989e7b581b7ea7ba92a93a7470b3
1,959
py
Python
nets/static/conv_rnn_convT.py
MaximilienLC/nevo
c701a1202bc18d89a622472918733bf78ba5e304
[ "Apache-2.0" ]
null
null
null
nets/static/conv_rnn_convT.py
MaximilienLC/nevo
c701a1202bc18d89a622472918733bf78ba5e304
[ "Apache-2.0" ]
null
null
null
nets/static/conv_rnn_convT.py
MaximilienLC/nevo
c701a1202bc18d89a622472918733bf78ba5e304
[ "Apache-2.0" ]
1
2022-03-31T20:44:09.000Z
2022-03-31T20:44:09.000Z
# Copyright 2022 Maximilien Le Clei. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn from nets.static.base import StaticNetBase
26.472973
74
0.598264
d28b646d833333371908e74411b14fa7d1f681ca
3,306
py
Python
ors2bryton.py
andbue/ors2bryton
7a843cbf2e4d1fc4ca85497cb23919431d8d3843
[ "Unlicense" ]
null
null
null
ors2bryton.py
andbue/ors2bryton
7a843cbf2e4d1fc4ca85497cb23919431d8d3843
[ "Unlicense" ]
1
2021-02-02T13:11:23.000Z
2021-09-10T16:38:16.000Z
ors2bryton.py
andbue/ors2bryton
7a843cbf2e4d1fc4ca85497cb23919431d8d3843
[ "Unlicense" ]
null
null
null
from sys import argv from os.path import splitext from lxml import etree from struct import pack if __name__ == "__main__": main()
31.485714
151
0.468845
d28b98aeee69dc1cdd515a34f7751e391f42ef74
5,022
py
Python
src/main/python/smart/smartplots3_run.py
cday97/beam
7e1ab50eecaefafd04daab360f8b12bc7cab559b
[ "BSD-3-Clause-LBNL" ]
123
2017-04-06T20:17:19.000Z
2022-03-02T13:42:15.000Z
src/main/python/smart/smartplots3_run.py
cday97/beam
7e1ab50eecaefafd04daab360f8b12bc7cab559b
[ "BSD-3-Clause-LBNL" ]
2,676
2017-04-26T20:27:27.000Z
2022-03-31T16:39:53.000Z
src/main/python/smart/smartplots3_run.py
cday97/beam
7e1ab50eecaefafd04daab360f8b12bc7cab559b
[ "BSD-3-Clause-LBNL" ]
60
2017-04-06T20:14:32.000Z
2022-03-30T20:10:53.000Z
import pandas as pd import smartplots3_setup scenarios_lables = { "Base_CL_CT": "Base0", "Base_STL_STT_BAU": "Base2", "Base_STL_STT_VTO": "Base3", "Base_LTL_LTT_BAU": "Base5", "Base_LTL_LTT_VTO": "Base6", "A_STL_STT_BAU": "A2", "A_STL_STT_VTO": "A3", "B_LTL_LTT_BAU": "B5", "B_LTL_LTT_VTO": "B6", "C_LTL_LTT_BAU": "C5", "C_LTL_LTT_VTO": "C6" } output_folder = "/home/ubuntu/git/jupyter/data/28thOct2019" # Base_CL_CT # A_STL_STT_BAU settings=[] settings.append(createSettingRow(2010,1,15,scenarios_lables["Base_CL_CT"], "")) settings.append(createSettingRow(2025,6,15,scenarios_lables["A_STL_STT_BAU"], "")) settings.append(createSettingRow(2025,7,15,scenarios_lables["A_STL_STT_VTO"], "")) settings.append(createSettingRow(2040,8,15,scenarios_lables["B_LTL_LTT_BAU"], "")) settings.append(createSettingRow(2040,9,15,scenarios_lables["B_LTL_LTT_VTO"], "")) settings.append(createSettingRow(2040,10,15,scenarios_lables["C_LTL_LTT_BAU"], "")) settings.append(createSettingRow(2040,11,15,scenarios_lables["C_LTL_LTT_VTO"], "")) plt_setup_smart3 = createSetup('7scenarios', (7.75/0.315) * 27.0 / 21.3, 27.0/21.3, (8, 4.5), settings) #smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3, output_folder) #smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3, output_folder) #smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3, output_folder) #smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3, output_folder) #smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3, output_folder) #smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3, output_folder) #smartplots3_setup.pltRHWaitTime(plt_setup_smart3, output_folder) #smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3, output_folder) settings=[] settings.append(createSettingRow(2010,1,15,scenarios_lables["Base_CL_CT"], "")) settings.append(createSettingRow(2025,2,15,scenarios_lables["Base_STL_STT_BAU"], "")) settings.append(createSettingRow(2025,3,15,scenarios_lables["Base_STL_STT_VTO"], "")) settings.append(createSettingRow(2040,4,15,scenarios_lables["Base_LTL_LTT_BAU"], "")) settings.append(createSettingRow(2040,5,15,scenarios_lables["Base_LTL_LTT_VTO"], "")) settings.append(createSettingRow(2025,6,15,scenarios_lables["A_STL_STT_BAU"], "")) settings.append(createSettingRow(2025,7,15,scenarios_lables["A_STL_STT_VTO"], "")) settings.append(createSettingRow(2040,8,15,scenarios_lables["B_LTL_LTT_BAU"], "")) settings.append(createSettingRow(2040,9,15,scenarios_lables["B_LTL_LTT_VTO"], "")) settings.append(createSettingRow(2040,10,15,scenarios_lables["C_LTL_LTT_BAU"], "")) settings.append(createSettingRow(2040,11,15,scenarios_lables["C_LTL_LTT_VTO"], "")) plt_setup_smart3_base = createSetup('11scenarios', (7.75/0.315) * 27.0 / 21.3, 27.0/21.3, (10, 4.5), settings) smartplots3_setup.pltEnergyPerCapita(plt_setup_smart3_base, output_folder) smartplots3_setup.pltRealizedModeSplitByTrips(plt_setup_smart3_base, output_folder) smartplots3_setup.pltModeSplitInPMTPerCapita(plt_setup_smart3_base, output_folder) smartplots3_setup.pltAveragePersonSpeed_allModes(plt_setup_smart3_base, output_folder) smartplots3_setup.pltAveragePersonSpeed_car(plt_setup_smart3_base, output_folder) smartplots3_setup.pltModeSplitInVMT(plt_setup_smart3_base, output_folder) smartplots3_setup.pltRHEmptyPooled(plt_setup_smart3_base, output_folder) smartplots3_setup.pltRHWaitTime(plt_setup_smart3_base, output_folder) smartplots3_setup.pltLdvTechnologySplitInVMT(plt_setup_smart3_base, output_folder) #smartplots3_setup.pltMEP(plt_setup_smart3, output_folder, [15071,21151,22872,29014,27541,36325,45267]) smartplots3_setup.tableSummary(plt_setup_smart3_base, output_folder)
50.727273
110
0.788331
d28c4ad642d7e25e12003d4150c60dd4429d8299
50
py
Python
genrl/deep/agents/sac/__init__.py
ajaysub110/JigglypuffRL
083fd26d05b7eac018e6db7d32c4be4587461766
[ "MIT" ]
null
null
null
genrl/deep/agents/sac/__init__.py
ajaysub110/JigglypuffRL
083fd26d05b7eac018e6db7d32c4be4587461766
[ "MIT" ]
null
null
null
genrl/deep/agents/sac/__init__.py
ajaysub110/JigglypuffRL
083fd26d05b7eac018e6db7d32c4be4587461766
[ "MIT" ]
null
null
null
from genrl.deep.agents.sac.sac import SAC # noqa
25
49
0.76
d28c64bd9262b8b74070c47f2ceb3b8061a39ebe
238
py
Python
contrib/libs/cxxsupp/libsan/generate_symbolizer.py
HeyLey/catboost
f472aed90604ebe727537d9d4a37147985e10ec2
[ "Apache-2.0" ]
6,989
2017-07-18T06:23:18.000Z
2022-03-31T15:58:36.000Z
contrib/libs/cxxsupp/libsan/generate_symbolizer.py
HeyLey/catboost
f472aed90604ebe727537d9d4a37147985e10ec2
[ "Apache-2.0" ]
1,978
2017-07-18T09:17:58.000Z
2022-03-31T14:28:43.000Z
contrib/libs/cxxsupp/libsan/generate_symbolizer.py
HeyLey/catboost
f472aed90604ebe727537d9d4a37147985e10ec2
[ "Apache-2.0" ]
1,228
2017-07-18T09:03:13.000Z
2022-03-29T05:57:40.000Z
import os import sys if __name__ == '__main__': main()
18.307692
98
0.621849
d28c678a957ea394e636e4d4799124a81070a2a0
775
py
Python
scripts/scheduler/scheduler.py
OCHA-DAP/hdx-scraper-unosat-flood-portal
80b0bcd404993e4bd1dae442f794c9f86b6d5328
[ "MIT" ]
1
2016-07-22T13:32:54.000Z
2016-07-22T13:32:54.000Z
scripts/scheduler/scheduler.py
OCHA-DAP/hdx-scraper-unosat-flood-portal
80b0bcd404993e4bd1dae442f794c9f86b6d5328
[ "MIT" ]
21
2015-07-08T21:30:32.000Z
2015-08-27T17:52:24.000Z
scripts/scheduler/scheduler.py
OCHA-DAP/hdxscraper-unosat-flood-portal
80b0bcd404993e4bd1dae442f794c9f86b6d5328
[ "MIT" ]
null
null
null
#!/usr/bin/python # -*- coding: utf-8 -*- import os import sys import time import schedule dir = os.path.split(os.path.split(os.path.realpath(__file__))[0])[0] sys.path.append(dir) from utilities.prompt_format import item from unosat_flood_portal_collect import collect as Collect def Wrapper(patch=False): '''Wrapper for main program.''' # # Collect data. # Collect.Main(patch=True) # # Setting-up schedule. # schedule.every(1).day.do(Wrapper) def Main(verbose=True): '''Wrapper to run all the scheduled tasks.''' if verbose: print '%s Running scheduler.' % item('prompt_bullet') try: while True: schedule.run_pending() time.sleep(1) except Exception as e: print e return False if __name__ == '__main__': Main()
16.145833
68
0.68129
d28c6e3b8a94c187af7ae1ba6acb241b56167d9b
1,916
py
Python
grAdapt/sampling/initializer/Vertices.py
mkduong-ai/grAdapt
94c2659b0f6ff9a2984a9dc58e3c83213313bf90
[ "Apache-2.0" ]
25
2020-11-13T05:57:01.000Z
2021-06-18T11:16:03.000Z
grAdapt/sampling/initializer/Vertices.py
mkduong-ai/grAdapt
94c2659b0f6ff9a2984a9dc58e3c83213313bf90
[ "Apache-2.0" ]
null
null
null
grAdapt/sampling/initializer/Vertices.py
mkduong-ai/grAdapt
94c2659b0f6ff9a2984a9dc58e3c83213313bf90
[ "Apache-2.0" ]
null
null
null
# python # import warnings # Third party imports import numpy as np # grAdapt from .base import Initial from grAdapt.utils.sampling import sample_corner_bounds
34.214286
97
0.581942
d28e9a15ec55f39d2fbe7a6ba1ac7924e04991a1
6,456
py
Python
thirdweb/modules/base.py
princetonwong/python-sdk
f35181d97620e29d055498fca75f3702f3bb2449
[ "Apache-2.0" ]
1
2022-02-18T16:59:12.000Z
2022-02-18T16:59:12.000Z
thirdweb/modules/base.py
princetonwong/python-sdk
f35181d97620e29d055498fca75f3702f3bb2449
[ "Apache-2.0" ]
null
null
null
thirdweb/modules/base.py
princetonwong/python-sdk
f35181d97620e29d055498fca75f3702f3bb2449
[ "Apache-2.0" ]
null
null
null
"""Base Module.""" from abc import ABC, abstractmethod from typing import Callable, Dict, List, Optional, Union, cast from eth_account.account import LocalAccount from thirdweb_web3 import Web3 from thirdweb_web3.types import TxReceipt from zero_ex.contract_wrappers import TxParams import json from ..abi.coin import Coin from ..abi.erc165 import ERC165 from ..abi.market import Market from ..abi.nft import SignatureMint721 as NFT from ..abi.nft_collection import NFTCollection as NFTBundle from ..abi.pack import Pack from ..constants.erc_interfaces import InterfaceIdErc721, InterfaceIdErc1155 from ..errors import NoSignerException import io from ..options import SdkOptions from ..storage import IpfsStorage from ..types.role import Role ModuleTypes = Union[NFT, Market, Pack, NFTBundle, Coin]
33.278351
95
0.623296
d291c41a3b15e20796ea46ca106a1298d83274c2
17,356
py
Python
data_util.py
shiyu-wangbyte/leadopt
ef289ab349a19ba1f8aa581638ef7e8e3810cb41
[ "Apache-2.0" ]
null
null
null
data_util.py
shiyu-wangbyte/leadopt
ef289ab349a19ba1f8aa581638ef7e8e3810cb41
[ "Apache-2.0" ]
null
null
null
data_util.py
shiyu-wangbyte/leadopt
ef289ab349a19ba1f8aa581638ef7e8e3810cb41
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 Jacob Durrant # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Contains utility code for reading packed data files. """ import os import torch from torch.utils.data import DataLoader, Dataset import numpy as np import h5py import tqdm # Atom typing # # Atom typing is the process of figuring out which layer each atom should be # written to. For ease of testing, the packed data file contains a lot of # potentially useful atomic information which can be distilled during the # data loading process. # # Atom typing is implemented by map functions of the type: # (atom descriptor) -> (layer index) # # If the layer index is -1, the atom is ignored. REC_TYPER = { # 1 channel, no hydrogen 'single': CondAtomTyper([ lambda num, aro, hdon, hacc, pcharge: num not in [0,1] ]), # 1 channel, including hydrogen 'single_h': CondAtomTyper([ lambda num, aro, hdon, hacc, pcharge: num != 0 ]), # (C,N,O,S,*) 'simple': CondAtomTyper([ lambda num, aro, hdon, hacc, pcharge: num == 6, lambda num, aro, hdon, hacc, pcharge: num == 7, lambda num, aro, hdon, hacc, pcharge: num == 8, lambda num, aro, hdon, hacc, pcharge: num == 16, lambda num, aro, hdon, hacc, pcharge: num not in [0,1,6,7,8,16], ]), # (H,C,N,O,S,*) 'simple_h': CondAtomTyper([ lambda num, aro, hdon, hacc, pcharge: num == 1, lambda num, aro, hdon, hacc, pcharge: num == 6, lambda num, aro, hdon, hacc, pcharge: num == 7, lambda num, aro, hdon, hacc, pcharge: num == 8, lambda num, aro, hdon, hacc, pcharge: num == 16, lambda num, aro, hdon, hacc, pcharge: num not in [0,1,6,7,8,16], ]), # (aro, hdon, hacc, positive, negative, occ) 'meta': CondAtomTyper([ lambda num, aro, hdon, hacc, pcharge: bool(aro), # aromatic lambda num, aro, hdon, hacc, pcharge: bool(hdon), # hydrogen donor lambda num, aro, hdon, hacc, pcharge: bool(hacc), # hydrogen acceptor lambda num, aro, hdon, hacc, pcharge: pcharge >= 128, # partial positive lambda num, aro, hdon, hacc, pcharge: pcharge < 128, # partial negative lambda num, aro, hdon, hacc, pcharge: num != 0, # occupancy ]), # (aro, hdon, hacc, positive, negative, occ) 'meta_mix': CondAtomTyper([ lambda num, aro, hdon, hacc, pcharge: bool(aro), # aromatic lambda num, aro, hdon, hacc, pcharge: bool(hdon), # hydrogen donor lambda num, aro, hdon, hacc, pcharge: bool(hacc), # hydrogen acceptor lambda num, aro, hdon, hacc, pcharge: pcharge >= 128, # partial positive lambda num, aro, hdon, hacc, pcharge: pcharge < 128, # partial negative lambda num, aro, hdon, hacc, pcharge: num != 0, # occupancy lambda num, aro, hdon, hacc, pcharge: num == 1, # hydrogen lambda num, aro, hdon, hacc, pcharge: num == 6, # carbon lambda num, aro, hdon, hacc, pcharge: num == 7, # nitrogen lambda num, aro, hdon, hacc, pcharge: num == 8, # oxygen lambda num, aro, hdon, hacc, pcharge: num == 16, # sulfur ]) } LIG_TYPER = { # 1 channel, no hydrogen 'single': CondAtomTyper([ lambda num: num not in [0,1] ]), # 1 channel, including hydrogen 'single_h': CondAtomTyper([ lambda num: num != 0 ]), 'simple': CondAtomTyper([ lambda num: num == 6, # carbon lambda num: num == 7, # nitrogen lambda num: num == 8, # oxygen lambda num: num not in [0,1,6,7,8] # extra ]), 'simple_h': CondAtomTyper([ lambda num: num == 1, # hydrogen lambda num: num == 6, # carbon lambda num: num == 7, # nitrogen lambda num: num == 8, # oxygen lambda num: num not in [0,1,6,7,8] # extra ]) }
33.441233
93
0.589191
d291cc8632d543ebd26c04ae26559da840755d11
4,181
py
Python
add_socket_response_event.py
Kur0den/kur0bot
d36722617bb4094bdf636779b20a799f9bd3b419
[ "MIT" ]
1
2021-09-09T11:17:17.000Z
2021-09-09T11:17:17.000Z
add_socket_response_event.py
Kur0den/kur0bot
d36722617bb4094bdf636779b20a799f9bd3b419
[ "MIT" ]
1
2021-09-18T15:46:59.000Z
2021-09-18T15:46:59.000Z
add_socket_response_event.py
Kur0den/kur0bot
d36722617bb4094bdf636779b20a799f9bd3b419
[ "MIT" ]
1
2021-09-09T02:34:17.000Z
2021-09-09T02:34:17.000Z
from discord.gateway import DiscordWebSocket, utils, _log, KeepAliveHandler, ReconnectWebSocket DiscordWebSocket.received_message = received_message
32.664063
100
0.566611
d2936347651280722332cf187a2ad771feb61ab8
2,207
py
Python
Image_detection_codes/Keras_training/test2.py
pasadyash/CitizenServiceApp
01a0389d70624f04f6df25c1eb842b3bbce652da
[ "MIT" ]
null
null
null
Image_detection_codes/Keras_training/test2.py
pasadyash/CitizenServiceApp
01a0389d70624f04f6df25c1eb842b3bbce652da
[ "MIT" ]
null
null
null
Image_detection_codes/Keras_training/test2.py
pasadyash/CitizenServiceApp
01a0389d70624f04f6df25c1eb842b3bbce652da
[ "MIT" ]
null
null
null
import numpy as np np.random.seed(123) # for reproducibility from keras.models import Sequential from keras.layers import Dense, Dropout, Activation, Flatten from keras.layers import Convolution2D, MaxPooling2D from keras.utils import np_utils from dataset_pothole import pothole from keras.models import model_from_json # 4. Load pre-shuffled MNIST data into train and test sets (X_train, y_train), (X_test, y_test) = pothole.load_data() print(X_train.shape) print() print (y_train.shape) print() # 5. Preprocess input data X_train = X_train.reshape(X_train.shape[0], 200, 200, 1) X_test = X_test.reshape(X_test.shape[0], 200, 200, 1) X_train = X_train.astype('float32') X_test = X_test.astype('float32') X_train /= 3380 X_test /= 3380 # 6. Preprocess class labels Y_train = np_utils.to_categorical(y_train, 4) Y_test = np_utils.to_categorical(y_test, 4) # 7. Define model architecture nb_classes = 4 # number of epochs to train # number of convolutional filters to use nb_filters = 32 # size of pooling area for max pooling nb_pool = 2 # convolution kernel size nb_conv = 3 model = Sequential() model.add(Convolution2D(nb_filters, nb_conv, nb_conv, border_mode='valid', input_shape=(200, 200, 1))) convout1 = Activation('relu') model.add(convout1) model.add(Convolution2D(nb_filters, nb_conv, nb_conv)) convout2 = Activation('relu') model.add(convout2) model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool))) model.add(Dropout(0.5)) model.add(Flatten()) model.add(Dense(128)) model.add(Activation('relu')) model.add(Dropout(0.5)) model.add(Dense(nb_classes)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta') # 9. Fit model on training data model.fit(X_train, Y_train, batch_size=32, nb_epoch=2, verbose=1) # 10. Evaluate model on test data score = model.evaluate(X_test, Y_test, verbose=0) # serialize model to JSON model_json = model.to_json() with open("model.json", "w") as json_file: json_file.write(model_json) # serialize weights to HDF5 model.save_weights("model.h5") print("Saved model to disk") print('Test loss: ', score[0]) print('Test accuracy: ', score[1])
27.5875
68
0.7372
d2945eb56ca24287c1bd0834d603839aee1fedac
2,094
py
Python
platform/web/api/device/models.py
JMSHDev/regent.dev
e4cedf04dd241ad00012735b543ee3447a8da8a2
[ "Apache-2.0" ]
1
2021-12-23T14:06:08.000Z
2021-12-23T14:06:08.000Z
platform/web/api/device/models.py
JMSHDev/regent.dev
e4cedf04dd241ad00012735b543ee3447a8da8a2
[ "Apache-2.0" ]
null
null
null
platform/web/api/device/models.py
JMSHDev/regent.dev
e4cedf04dd241ad00012735b543ee3447a8da8a2
[ "Apache-2.0" ]
1
2021-06-28T22:17:28.000Z
2021-06-28T22:17:28.000Z
import hashlib import random import string import logging from django.db import models LOG = logging.getLogger(__name__)
34.327869
108
0.722063
d294cefa293f8d84c96bacb7467d9cfe88246372
147
py
Python
armageddon/__init__.py
acse-ns1321/asteroid-impact-simulator
986c12ff1276e5d0547a4f760e1d2cb90fe4ba11
[ "MIT" ]
null
null
null
armageddon/__init__.py
acse-ns1321/asteroid-impact-simulator
986c12ff1276e5d0547a4f760e1d2cb90fe4ba11
[ "MIT" ]
null
null
null
armageddon/__init__.py
acse-ns1321/asteroid-impact-simulator
986c12ff1276e5d0547a4f760e1d2cb90fe4ba11
[ "MIT" ]
null
null
null
# flake8:NOQA """Python asteroid airburst calculator""" from .solver import * from .damage import * from .locator import * from .mapping import *
18.375
41
0.734694
d294d257d8cdf140b519b1d91dd4b68639347768
8,235
py
Python
proxy_server/backend_services.py
lmanzurv/django_proxy_server
20304829ef1ddcbb281e1373d308e5fa826fcd39
[ "Apache-2.0" ]
11
2015-07-18T02:23:43.000Z
2021-11-15T11:43:21.000Z
proxy_server/backend_services.py
lmanzurv/django_proxy_server
20304829ef1ddcbb281e1373d308e5fa826fcd39
[ "Apache-2.0" ]
null
null
null
proxy_server/backend_services.py
lmanzurv/django_proxy_server
20304829ef1ddcbb281e1373d308e5fa826fcd39
[ "Apache-2.0" ]
5
2015-02-24T15:37:36.000Z
2021-10-10T16:42:22.000Z
from django.contrib.auth import SESSION_KEY from django.core.cache import cache from django.conf import settings from django.http import HttpResponse, HttpResponseServerError from proxy_server.response import AJAX_REQUEST import httplib, json, proxy_server
37.094595
131
0.600364
d294ed611a40faaaff54b7db50b237d6a8c768e7
1,645
py
Python
py/trawl_analyzer/TrawlSensorsDB_model.py
nwfsc-fram/pyFieldSoftware
477ba162b66ede2263693cda8c5a51d27eaa3b89
[ "MIT" ]
null
null
null
py/trawl_analyzer/TrawlSensorsDB_model.py
nwfsc-fram/pyFieldSoftware
477ba162b66ede2263693cda8c5a51d27eaa3b89
[ "MIT" ]
176
2019-11-22T17:44:55.000Z
2021-10-20T23:40:03.000Z
py/trawl_analyzer/TrawlSensorsDB_model.py
nwfsc-fram/pyFieldSoftware
477ba162b66ede2263693cda8c5a51d27eaa3b89
[ "MIT" ]
1
2021-05-07T01:06:32.000Z
2021-05-07T01:06:32.000Z
# from peewee import * from playhouse.apsw_ext import TextField, IntegerField, PrimaryKeyField from py.trawl_analyzer.Settings import SensorsModel as BaseModel # database = SqliteDatabase('data\clean_sensors.db', **{})
38.255814
83
0.764134
d29572229651c45d1ad6870cb96992f7e8dc3c59
9,754
py
Python
src/statemachine.py
CEOAI-ABM/SIR-Modelling
02ab89d64040b09ddce820a1ecbbc0cfc9b13f29
[ "MIT" ]
1
2021-06-13T11:50:08.000Z
2021-06-13T11:50:08.000Z
src/statemachine.py
CEOAI-ABM/SIR-Modelling
02ab89d64040b09ddce820a1ecbbc0cfc9b13f29
[ "MIT" ]
null
null
null
src/statemachine.py
CEOAI-ABM/SIR-Modelling
02ab89d64040b09ddce820a1ecbbc0cfc9b13f29
[ "MIT" ]
null
null
null
import transitions from functools import partial # from transitions import transitions.Machine # TODO: whenever there is a state chage store the following # (DAY,function_called) -> Stored for every person for agent status, state and Testing state
28.190751
159
0.705454
d295e921737512140cabce35cb8da35469a21633
304
py
Python
hard-gists/5898352/snippet.py
jjhenkel/dockerizeme
eaa4fe5366f6b9adf74399eab01c712cacaeb279
[ "Apache-2.0" ]
21
2019-07-08T08:26:45.000Z
2022-01-24T23:53:25.000Z
hard-gists/5898352/snippet.py
jjhenkel/dockerizeme
eaa4fe5366f6b9adf74399eab01c712cacaeb279
[ "Apache-2.0" ]
5
2019-06-15T14:47:47.000Z
2022-02-26T05:02:56.000Z
hard-gists/5898352/snippet.py
jjhenkel/dockerizeme
eaa4fe5366f6b9adf74399eab01c712cacaeb279
[ "Apache-2.0" ]
17
2019-05-16T03:50:34.000Z
2021-01-14T14:35:12.000Z
import os import scipy.io.wavfile as wav # install lame # install bleeding edge scipy (needs new cython) fname = 'XC135672-Red-winged\ Blackbird1301.mp3' oname = 'temp.wav' cmd = 'lame --decode {0} {1}'.format( fname,oname ) os.system(cmd) data = wav.read(oname) # your code goes here print len(data[1])
25.333333
51
0.720395
d29646348f53744d285a4ab6a2096da4edb810a8
2,612
py
Python
examples/home-assistant/custom_components/evacalor/config_flow.py
fredericvl/pyevacalor
37a3d96f867efffdec4457f11119977e6e887b8a
[ "Apache-2.0" ]
2
2020-10-25T15:42:03.000Z
2021-01-06T10:25:58.000Z
examples/home-assistant/custom_components/evacalor/config_flow.py
fredericvl/pyevacalor
37a3d96f867efffdec4457f11119977e6e887b8a
[ "Apache-2.0" ]
2
2021-01-06T09:24:58.000Z
2021-02-13T21:12:02.000Z
examples/home-assistant/custom_components/evacalor/config_flow.py
fredericvl/pyevacalor
37a3d96f867efffdec4457f11119977e6e887b8a
[ "Apache-2.0" ]
null
null
null
"""Config flow for Eva Calor.""" from collections import OrderedDict import logging import uuid from pyevacalor import ( # pylint: disable=redefined-builtin ConnectionError, Error as EvaCalorError, UnauthorizedError, evacalor, ) import voluptuous as vol from homeassistant import config_entries from homeassistant.const import CONF_EMAIL, CONF_PASSWORD from .const import CONF_UUID, DOMAIN _LOGGER = logging.getLogger(__name__) def conf_entries(hass): """Return the email tuples for the domain.""" return set( entry.data[CONF_EMAIL] for entry in hass.config_entries.async_entries(DOMAIN) )
31.095238
87
0.616003
d2965c42b4aa6f52d9c6e78125bcdb00950f4d9f
6,608
py
Python
library_samples/Python3/ocs_sample_library_preview/Dataview/Dataview.py
osi-awoodall/OSI-Samples-OCS
1995ccda20e4fe2ae66f3b67afbc1127d638a6fc
[ "Apache-2.0" ]
null
null
null
library_samples/Python3/ocs_sample_library_preview/Dataview/Dataview.py
osi-awoodall/OSI-Samples-OCS
1995ccda20e4fe2ae66f3b67afbc1127d638a6fc
[ "Apache-2.0" ]
null
null
null
library_samples/Python3/ocs_sample_library_preview/Dataview/Dataview.py
osi-awoodall/OSI-Samples-OCS
1995ccda20e4fe2ae66f3b67afbc1127d638a6fc
[ "Apache-2.0" ]
null
null
null
# Dataview.py # import json from .DataviewQuery import DataviewQuery from .DataviewMapping import DataviewMapping from .DataviewIndexConfig import DataviewIndexConfig from .DataviewGroupRule import DataviewGroupRule
25.513514
87
0.575969
d296cec19b3a1e77f406394741a977e6895ca59f
392
py
Python
PYTHON_Code/TestGUI.py
ROBO-BEV/BARISTO
0e87d79966efc111cc38c1a1cf22e2d8ee18c350
[ "CC-BY-3.0", "MIT" ]
8
2018-03-12T04:52:28.000Z
2021-05-19T19:37:01.000Z
PYTHON_Code/TestGUI.py
ROBO-BEV/BARISTO
0e87d79966efc111cc38c1a1cf22e2d8ee18c350
[ "CC-BY-3.0", "MIT" ]
null
null
null
PYTHON_Code/TestGUI.py
ROBO-BEV/BARISTO
0e87d79966efc111cc38c1a1cf22e2d8ee18c350
[ "CC-BY-3.0", "MIT" ]
1
2018-01-30T09:43:36.000Z
2018-01-30T09:43:36.000Z
from tkinter import * window0 = Tk() window0.geometry('960x540') #tk.iconbitmap(default='ROBO_BEV_LOGO.ico') window0.title("BARISTO") photo = PhotoImage(file="Page1.png") widget = Label(window0, image=photo) widget.photo = photo widget = Label(window0, text="10", fg="white", font=("Source Sans Pro",50)) #widget = Label(window0, text="9", fg="white") widget.pack() window0.mainloop()
19.6
75
0.709184
d297adc463629ff967a82e11d0f42bb013364af4
2,354
py
Python
handlers/play.py
AftahBagas/AlphaMusik
c8c3804a26ad393b6f666fecd4d3464727ce2544
[ "MIT" ]
null
null
null
handlers/play.py
AftahBagas/AlphaMusik
c8c3804a26ad393b6f666fecd4d3464727ce2544
[ "MIT" ]
null
null
null
handlers/play.py
AftahBagas/AlphaMusik
c8c3804a26ad393b6f666fecd4d3464727ce2544
[ "MIT" ]
1
2021-06-22T08:08:43.000Z
2021-06-22T08:08:43.000Z
from os import path from telethon import Client from telethon.types import Message, Voice from callsmusic import callsmusic, queues import converter from downloaders import youtube from config import BOT_NAME as bn, DURATION_LIMIT from helpers.filters import command, other_filters from helpers.decorators import errors from helpers.errors import DurationLimitError from helpers.gets import get_url, get_file_name from telethon.types import InlineKeyboardButton, InlineKeyboardMarkup
34.115942
116
0.658454
d2992c7176a1b65595e782d6603b030801317e72
2,662
py
Python
Sindri/Properties.py
mrcsbrn/TCC_software
17a5335aed17d4740c3bbd0ef828b0fc5dcea1da
[ "MIT" ]
11
2019-10-17T02:01:51.000Z
2022-03-17T17:39:34.000Z
Sindri/Properties.py
mrcsbrn/TCC_software
17a5335aed17d4740c3bbd0ef828b0fc5dcea1da
[ "MIT" ]
2
2019-07-25T22:16:16.000Z
2020-03-28T01:59:59.000Z
Sindri/Properties.py
mrcsbrn/TCC_software
17a5335aed17d4740c3bbd0ef828b0fc5dcea1da
[ "MIT" ]
5
2019-07-15T18:19:36.000Z
2021-12-24T08:06:24.000Z
from __future__ import annotations from constants import DBL_EPSILON
24.422018
87
0.531555
d29a434df89a3b05d94919b3e887c98d5f6aef26
8,240
py
Python
algorithms/randcommuns.py
eXascaleInfolab/clubmark
5c329a5308a39d53f77db790a31d621245a7c693
[ "Apache-2.0" ]
14
2018-11-20T08:32:30.000Z
2022-03-14T02:46:35.000Z
algorithms/randcommuns.py
eXascaleInfolab/clubmark
5c329a5308a39d53f77db790a31d621245a7c693
[ "Apache-2.0" ]
null
null
null
algorithms/randcommuns.py
eXascaleInfolab/clubmark
5c329a5308a39d53f77db790a31d621245a7c693
[ "Apache-2.0" ]
1
2019-05-22T08:39:00.000Z
2019-05-22T08:39:00.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- """ :Brief: Produces rand disjoint communities (clusters) for the given network with sizes similar in the ground truth. :Description: Takes number of the resulting communities and their sizes from the specified groundtruth (actually any sample of the community structure, the real ground truth is not required) and fills stubs of the clusters with randomly selected nodes from the input network with all their neighbors. Note: Produced result is a random disjoint partitioning, so if the 'ground truth' had overlapping clusters, then the number of nodes in the last cluster will be less than in the sample. :Authors: Artem Lutov <luart@ya.ru> :Organizations: eXascale lab <http://exascale.info/>, ScienceWise <http://sciencewise.info/>, Lumais <http://www.lumais.com/> :Date: 2015-07 """ from __future__ import print_function, division # Required for stderr output, must be the first import import sys import os # Pathes processing #import igraph as ig import random as rand try: # ATTENTION: Python3 newer treats imports as realtive and results in error here unlike Python2 from utils.parser_nsl import asymnet, loadNsl #pylint: disable=E0611,E0401 except ImportError: # Note: this case should be the second because explicit relative imports cause various errors # under Python2 and Python3, which complicates thier handling from .utils.parser_nsl import asymnet, loadNsl #pylint: disable=E0611,E0401 # Default number of the resulting clusterings (partitions, i.e files that contain disjoint clusters) _RESNUM = 1 def parseParams(args): """Parse user-specified parameters returns - parsed input arguments, Params() """ assert isinstance(args, (tuple, list)) and args, 'Input arguments must be specified' prm = Params() for arg in args: # Validate input format preflen = 3 if arg[0] != '-' or len(arg) <= preflen: raise ValueError('Unexpected argument: ' + arg) if arg[1] == 'g': prm.groundtruth = arg[preflen:] prm.outext = os.path.splitext(prm.groundtruth)[1] elif arg[1] == 'i': pos = arg.find('=', 2) if pos == -1 or arg[2] not in 'ud=' or len(arg) == pos + 1: raise ValueError('Unexpected argument: ' + arg) pos += 1 prm.network = arg[pos:] prm.outname, netext = os.path.splitext(os.path.split(prm.network)[1]) prm.dirnet = asymnet(netext.lower(), arg[2] == 'd') if not prm.outname: raise ValueError('Invalid network name (is a directory): ' + prm.network) elif arg[1] == 'n': prm.outnum = int(arg[preflen:]) assert prm.outnum >= 1, 'outnum must be a natural number' elif arg[1] == 'r': prm.randseed = arg[preflen:] elif arg[1] == 'o': prm.outdir = arg[preflen:] else: raise ValueError('Unexpected argument: ' + arg) if not (prm.groundtruth and prm.network): raise ValueError('Input network and groundtruth file names must be specified') if not prm.outdir: prm.outdir = os.path.split(prm.network)[0] if not prm.outdir: prm.outdir = '.' if not prm.randseed: try: prm.randseed = ''.join(str(ord(c)) for c in os.urandom(8)) except NotImplementedError: prm.randseed = str(rand.random()) prm.outpseed = True return prm def randcommuns(*args): """Generate random clusterings for the specified network""" prm = parseParams(args) print('Starting randcommuns clustering:' '\n\tgroundtruth: {}' '\n\t{} network: {}' '\n\t{} cls of {} in {} with randseed: {}' .format(prm.groundtruth, 'directed' if prm.dirnet else 'undirected', prm.network , prm.outnum, prm.outname + prm.outext, prm.outdir, prm.randseed)) # Load Data from simple real-world networks graph = loadNsl(prm.network, prm.dirnet) # ig.Graph.Read_Ncol(network, directed=dirnet) # , weights=False # Load statistics from the ground thruth groundstat = [] with open(prm.groundtruth, 'r') as fground: for line in fground: # Skip empty lines and comments (possible header) if not line or line[0] == '#': continue groundstat.append(len(line.split())) # Create outpdir if required if prm.outdir and not os.path.exists(prm.outdir): os.makedirs(prm.outdir) # Geneate rand clsuterings rand.seed(prm.randseed) while prm.outnum > 0: prm.outnum -= 1 # Active (remained) nodes indices of the input network actnodes = set(graph.vs.indices) #pylint: disable=E1101 clusters = [] # Forming clusters # Reference size of the ground truth clusters (they migh have overlaps unlike the current partitioning) for clmarg in groundstat: nodes = [] # Content of the current cluster # Check whether all nodes of the initial network are mapped if not actnodes: break # Select subsequent rand node ind = rand.sample(actnodes, 1)[0] actnodes.remove(ind) nodes.append(ind) inode = 0 # Index of the node in the current cluster # Select neighbors of the selected nodes to fill the clusters while len(nodes) < clmarg and actnodes: for nd in graph.vs[nodes[inode]].neighbors(): #pylint: disable=E1136 if nd.index not in actnodes: continue actnodes.remove(nd.index) nodes.append(nd.index) if len(nodes) >= clmarg or not actnodes: break inode += 1 if inode >= len(nodes) and len(nodes) < clmarg and actnodes: ind = rand.sample(actnodes, 1)[0] actnodes.remove(ind) nodes.append(ind) # Use original labels of the nodes clusters.append(graph.vs[ind]['name'] for ind in nodes) #pylint: disable=E1136 # Output resulting clusters with open('/'.join((prm.outdir, ''.join((prm.outname, '_', str(prm.outnum), prm.outext)))), 'w') as fout: for cl in clusters: # Note: print() unlike fout.write() appends the newline print(' '.join(cl), file=fout) # Output randseed used for the generated clusterings # Output to the dir above if possible to not mix cluster levels with rand seed if prm.outpseed: with open('/'.join((prm.outdir, (os.path.splitext(prm.outname)[0] + '.seed'))), 'w') as fout: # Note: print() unlike fout.write() appends the newline print(prm.randseed, file=fout) print('Random clusterings are successfully generated') if __name__ == '__main__': if len(sys.argv) > 2: randcommuns(*sys.argv[1:]) else: print('\n'.join(('Produces random disjoint partitioning (clusters are formed with rand nodes and their neighbors)' ' for the input network specified in the NSL format (generalizaiton of NCOL, SNAP, etc.)\n', 'Usage: {app} -g=<ground_truth> -i[{{u, d}}]=<input_network> [-n=<res_num>] [-r=<rand_seed>] [-o=<outp_dir>]', '', ' -g=<ground_truth> - ground truth clustering as a template for sizes of the resulting communities', ' -i[X]=<input_network> - file of the input network in the format: <src_id> <dst_id> [<weight>]', ' Xu - undirected input network (<src_id> <dst_id> implies also <dst_id> <src_id>). Default', ' Xd - directed input network (both <src_id> <dst_id> and <dst_id> <src_id> are specified)', ' NOTE: (un)directed flag is considered only for the networks with non-NSL file extension', ' -n=<res_num> - number of the resulting clusterings to generate. Default: {resnum}', ' -r=<rand_seed> - random seed, string. Default: value from the system rand source (otherwise current time)', ' -o=<output_communities> - . Default: ./<input_network>/' )).format(app=sys.argv[0], resnum=_RESNUM))
40.392157
116
0.701942
d29d169f662bf82cfbfb0172089e264d38e0b3c3
17,578
py
Python
utils/save_atten.py
xiaomengyc/SPG
0006659c5be4c3451f8c9a188f1e91e9ff682fa9
[ "MIT" ]
152
2018-07-25T01:55:33.000Z
2022-02-02T15:16:09.000Z
utils/save_atten.py
xiaomengyc/SPG
0006659c5be4c3451f8c9a188f1e91e9ff682fa9
[ "MIT" ]
15
2018-09-13T06:35:16.000Z
2021-08-05T06:23:16.000Z
utils/save_atten.py
xiaomengyc/SPG
0006659c5be4c3451f8c9a188f1e91e9ff682fa9
[ "MIT" ]
27
2018-07-26T03:47:55.000Z
2021-04-05T08:06:41.000Z
import numpy as np import cv2 import os import torch import os import time from torchvision import models, transforms from torch.utils.data import DataLoader from torch.optim import SGD from torch.autograd import Variable idx2catename = {'voc20': ['aeroplane','bicycle','bird','boat','bottle','bus','car','cat','chair','cow','diningtable','dog','horse', 'motorbike','person','pottedplant','sheep','sofa','train','tvmonitor'], 'coco80': ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']}
42.458937
131
0.529867
d29d26d475e134ec64d93b0a0c67aac73b58249e
453
py
Python
integration/config/service_names.py
hawflau/serverless-application-model
d2cf4b7e23d26cdf677c564d53bb58e6a5b6cac2
[ "Apache-2.0" ]
null
null
null
integration/config/service_names.py
hawflau/serverless-application-model
d2cf4b7e23d26cdf677c564d53bb58e6a5b6cac2
[ "Apache-2.0" ]
1
2020-03-03T01:46:46.000Z
2020-03-03T01:46:46.000Z
integration/config/service_names.py
hawflau/serverless-application-model
d2cf4b7e23d26cdf677c564d53bb58e6a5b6cac2
[ "Apache-2.0" ]
null
null
null
COGNITO = "Cognito" SERVERLESS_REPO = "ServerlessRepo" MODE = "Mode" XRAY = "XRay" LAYERS = "Layers" HTTP_API = "HttpApi" IOT = "IoT" CODE_DEPLOY = "CodeDeploy" ARM = "ARM" GATEWAY_RESPONSES = "GatewayResponses" MSK = "MSK" KMS = "KMS" CWE_CWS_DLQ = "CweCwsDlq" CODE_SIGN = "CodeSign" MQ = "MQ" USAGE_PLANS = "UsagePlans" SCHEDULE_EVENT = "ScheduleEvent" DYNAMO_DB = "DynamoDB" KINESIS = "Kinesis" SNS = "SNS" SQS = "SQS" CUSTOM_DOMAIN = "CustomDomain"
19.695652
38
0.708609
d29e1b642a0cdbe5b86c0d36bda20ce0cce1d92a
2,373
py
Python
tools/onnx_utilis/export_vfe_weight.py
neolixcn/OpenPCDet
32bae37db13711a4fb35ad2980068470bb6cee1c
[ "Apache-2.0" ]
null
null
null
tools/onnx_utilis/export_vfe_weight.py
neolixcn/OpenPCDet
32bae37db13711a4fb35ad2980068470bb6cee1c
[ "Apache-2.0" ]
null
null
null
tools/onnx_utilis/export_vfe_weight.py
neolixcn/OpenPCDet
32bae37db13711a4fb35ad2980068470bb6cee1c
[ "Apache-2.0" ]
null
null
null
import onnx import onnxruntime import torch import onnx.numpy_helper # added by huxi, load rpn config from pcdet.pointpillar_quantize_config import load_rpn_config_json # ======================================== config_dict = load_rpn_config_json.get_config() onnx_model_file = config_dict["vfe_onnx_file"] onnx_model = onnx.load(onnx_model_file) onnx.checker.check_model(onnx_model) #check model #[tensor_mat_weight] = [t for t in onnx_model.graph.initializer if t.name == "linear.weight"] [tensor_mat_weight] = [t for t in onnx_model.graph.initializer if t.name == "14"] [tensor_bn_gamma] = [t for t in onnx_model.graph.initializer if t.name == "norm.weight"] [tensor_bn_beta] = [t for t in onnx_model.graph.initializer if t.name == "norm.bias"] [tensor_bn_mean] = [t for t in onnx_model.graph.initializer if t.name == "norm.running_mean"] [tensor_bn_var] = [t for t in onnx_model.graph.initializer if t.name == "norm.running_var"] mat_w = onnx.numpy_helper.to_array(tensor_mat_weight) mat_w = mat_w.transpose() mat_w_list = list(mat_w.flatten()) bn_gamma_w = onnx.numpy_helper.to_array(tensor_bn_gamma) bn_gamma_w_list = list(bn_gamma_w.flatten()) bn_beta_w = onnx.numpy_helper.to_array(tensor_bn_beta) bn_beta_w_list = list(bn_beta_w.flatten()) bn_mean_w = onnx.numpy_helper.to_array(tensor_bn_mean) bn_mean_w_list = list(bn_mean_w.flatten()) bn_var_w = onnx.numpy_helper.to_array(tensor_bn_var) bn_var_w_list = list(bn_var_w.flatten()) result_line = "" exported_vfe_weight_file = config_dict["vfe_exported_weight_file"] with open(exported_vfe_weight_file, 'w') as f: for idx,val in enumerate(mat_w_list): result_line += str(val) result_line += " " result_line += "\n" for idx,val in enumerate(bn_gamma_w_list): result_line += str(val) result_line += " " result_line += "\n" for idx,val in enumerate(bn_beta_w_list): result_line += str(val) result_line += " " result_line += "\n" for idx,val in enumerate(bn_mean_w_list): result_line += str(val) result_line += " " result_line += "\n" for idx,val in enumerate(bn_var_w_list): result_line += str(val) result_line += " " f.write(result_line)
28.25
93
0.702908
d29e58f5104bd6d4a19025c66f8dbd6cd3fc3f1a
1,825
py
Python
color_extractor/cluster.py
hcoura/color-extractor
a69fc4a9a8b7c90d292f954d289c84a38323eda6
[ "MIT" ]
276
2016-07-25T10:00:06.000Z
2022-03-10T16:56:26.000Z
color_extractor/cluster.py
hcoura/color-extractor
a69fc4a9a8b7c90d292f954d289c84a38323eda6
[ "MIT" ]
13
2017-05-25T12:45:30.000Z
2022-03-11T23:16:30.000Z
color_extractor/cluster.py
hcoura/color-extractor
a69fc4a9a8b7c90d292f954d289c84a38323eda6
[ "MIT" ]
74
2016-12-14T07:31:18.000Z
2022-03-12T18:36:57.000Z
from sklearn.cluster import KMeans from .exceptions import KMeansException from .task import Task
26.838235
78
0.572603
d29e853085f1e22d6f5c45806ff223b5999daf1d
315
py
Python
notebooks/datasets.py
jweill-aws/jupyterlab-data-explorer
3db8eed9562f35d2b0e44370cf22f32ac9ffbc4d
[ "BSD-3-Clause" ]
173
2019-01-04T05:18:08.000Z
2022-03-28T11:15:30.000Z
notebooks/datasets.py
jweill-aws/jupyterlab-data-explorer
3db8eed9562f35d2b0e44370cf22f32ac9ffbc4d
[ "BSD-3-Clause" ]
115
2019-01-04T01:09:41.000Z
2022-03-24T01:07:00.000Z
notebooks/datasets.py
jweill-aws/jupyterlab-data-explorer
3db8eed9562f35d2b0e44370cf22f32ac9ffbc4d
[ "BSD-3-Clause" ]
34
2019-06-12T16:46:53.000Z
2022-02-01T08:41:40.000Z
# # @license BSD-3-Clause # # Copyright (c) 2019 Project Jupyter Contributors. # Distributed under the terms of the 3-Clause BSD License. import IPython.display import pandas
21
67
0.730159
d29f3df5f35ab4781444eaf48243bf8b792bb433
1,154
py
Python
django_india/conf.py
k-mullapudi/django-india
662a5fb363ac4360b573f5864df65619f2794dc8
[ "MIT" ]
null
null
null
django_india/conf.py
k-mullapudi/django-india
662a5fb363ac4360b573f5864df65619f2794dc8
[ "MIT" ]
null
null
null
django_india/conf.py
k-mullapudi/django-india
662a5fb363ac4360b573f5864df65619f2794dc8
[ "MIT" ]
null
null
null
import django.conf url_bases = { 'geonames': { 'dump': 'http://download.geonames.org/export/dump/', 'zip': 'http://download.geonames.org/export/zip/', }, } india_country_code = 'IN' files = { 'state': { 'filename': '', 'urls': [ url_bases['geonames']['dump'] + '{filename}', ], 'fields': [ ] }, 'district': { 'filename': '', 'urls': [ url_bases['geonames']['dump'] + '{filename}', ], 'fields': [ ] }, 'city': { 'filename': '', 'urls': [ url_bases['geonames']['dump'] + '{filename}', ], 'fields': [ ] } } LANGUAGE_DATA = { }
19.233333
73
0.47747
d29f77fa5fac3eb65fe044b9f6c664cd6a9d69a3
1,588
py
Python
src/dao/evaluation_dao.py
Asconius/trading-bot
df544f058d12c5378a0f8c110e28d49d983e0393
[ "Apache-2.0" ]
2
2021-06-04T11:27:02.000Z
2021-12-19T03:24:51.000Z
src/dao/evaluation_dao.py
Asconius/trading-bot
df544f058d12c5378a0f8c110e28d49d983e0393
[ "Apache-2.0" ]
22
2020-08-24T05:16:11.000Z
2021-12-13T20:51:25.000Z
src/dao/evaluation_dao.py
Asconius/trading-bot
df544f058d12c5378a0f8c110e28d49d983e0393
[ "Apache-2.0" ]
null
null
null
from decimal import Decimal from typing import List from src.dao.dao import DAO from src.dto.attempt_dto import AttemptDTO from src.entity.evaluation_entity import EvaluationEntity from src.utils.utils import Utils
40.717949
116
0.706549
d29fef12d764089bdcfe8679c802e9724d8f9325
1,031
py
Python
src/lib/others/info_gathering/finder/finding_comment.py
nahuelhm17/vault_scanner
574da226db5d274794d751d9d7959cd785bc9990
[ "MIT" ]
230
2019-01-10T07:43:01.000Z
2022-03-25T03:16:07.000Z
src/lib/others/info_gathering/finder/finding_comment.py
nahuelhm17/vault_scanner
574da226db5d274794d751d9d7959cd785bc9990
[ "MIT" ]
65
2018-11-18T12:48:27.000Z
2019-01-05T22:40:07.000Z
src/lib/others/info_gathering/finder/finding_comment.py
nahuelhm17/vault_scanner
574da226db5d274794d751d9d7959cd785bc9990
[ "MIT" ]
64
2019-01-16T11:56:18.000Z
2022-01-12T17:28:37.000Z
#! /usr/bin/python import requests import re from bs4 import BeautifulSoup import colors
27.131579
69
0.596508
d2a2c147c06d327188733c71e9a83b70f75131b1
27
py
Python
micro-benchmark-key-errs/snippets/dicts/type_coercion/main.py
WenJinfeng/PyCG
b45e8e04fe697d8301cf27222a8f37646d69f168
[ "Apache-2.0" ]
121
2020-12-16T20:31:37.000Z
2022-03-21T20:32:43.000Z
micro-benchmark-key-errs/snippets/dicts/type_coercion/main.py
WenJinfeng/PyCG
b45e8e04fe697d8301cf27222a8f37646d69f168
[ "Apache-2.0" ]
24
2021-03-13T00:04:00.000Z
2022-03-21T17:28:11.000Z
micro-benchmark-key-errs/snippets/dicts/type_coercion/main.py
WenJinfeng/PyCG
b45e8e04fe697d8301cf27222a8f37646d69f168
[ "Apache-2.0" ]
19
2021-03-23T10:58:47.000Z
2022-03-24T19:46:50.000Z
d = {"1": "a"} d[1] d["1"]
6.75
14
0.259259
d2a35e41a7de7ed1c211d10b17e2843c3afc87ce
2,753
py
Python
scripts/link_assignment.py
metagenomics/antibio
ac79c64417c749ed40263fc97d22498097f2e9b9
[ "MIT" ]
4
2015-11-03T22:00:33.000Z
2017-10-21T06:57:35.000Z
scripts/link_assignment.py
metagenomics/antibio
ac79c64417c749ed40263fc97d22498097f2e9b9
[ "MIT" ]
49
2015-09-28T11:32:38.000Z
2016-04-11T14:05:00.000Z
scripts/link_assignment.py
metagenomics/antibio
ac79c64417c749ed40263fc97d22498097f2e9b9
[ "MIT" ]
2
2018-08-27T15:15:45.000Z
2020-03-31T01:50:48.000Z
#!/usr/bin/python # This program revises the existing overview file. # If a keyword is found in an Abstract of an accession of a gene, the url of the abstract is added to the overview file # The revised overview.txt is created in the same directory of the old one and named overview_new.txt """ Usage: link_assignment.py -o <overview> -pub <pubhits> -h --help Please enter the files overview.txt and the pubhits. """ from docopt import docopt from sys import argv import csv import os import util def build_overview_link(pubhits_dict, gene_id, links): """ builds the pubhits link out of the gene id and the pubhits dict :param pubhits_dict: pubhits dictionary :param gene_id: gene id :param links: existsing links :return: links """ pubhits_acc = pubhits_dict[gene_id][util.PUBHITS_ACC_INDEX] pubhits_link = pubhits_dict[gene_id][util.PUBHITS_LINK_INDEX] if links.strip() == util.NO_LINK: new_links = [pubhits_acc + ":" + pubhits_link] else: new_links = [links, pubhits_acc + ":" + pubhits_link] overview_link = ','.join(new_links) if not overview_link or overview_link == util.TODO: overview_link = util.NO_KEYWORDS return overview_link def set_link_in_row(old_row, pubhits_dict): """ set link in existing overview row (dictionary) :param old_row: overview row :param pubhits_dict: pubhits dictionary :return: revised overview row """ gene_id = old_row[util.GENE_ID] if (gene_id in pubhits_dict): old_row[util.LINKS] = build_overview_link(pubhits_dict, gene_id, old_row[util.LINKS]) return old_row if __name__ == '__main__': main()
35.753247
119
0.694878
d2a6ca53031a949367ecbf3f9d3bfdb61563f697
5,421
py
Python
app/views.py
LauretteMongina/Instagram-clone
617135bcebcf6b73f2de7af73a66c177718d338c
[ "MIT" ]
null
null
null
app/views.py
LauretteMongina/Instagram-clone
617135bcebcf6b73f2de7af73a66c177718d338c
[ "MIT" ]
null
null
null
app/views.py
LauretteMongina/Instagram-clone
617135bcebcf6b73f2de7af73a66c177718d338c
[ "MIT" ]
null
null
null
from django.shortcuts import render,redirect,get_object_or_404 from django.contrib.auth.decorators import login_required from .models import * import cloudinary import cloudinary.uploader import cloudinary.api from django.http import HttpResponseRedirect, JsonResponse from .forms import RegistrationForm, UpdateUserForm, UpdateUserProfileForm, ImageForm, CommentForm from django.contrib.auth import login, authenticate from .models import Image, Comment, Profile from django.contrib.auth.models import User from django.template.loader import render_to_string from django.views.generic import RedirectView from .email import send_welcome_email # Create your views here.
34.310127
98
0.642317
d2a7333fba6a0b271b7f3ddd6746591c934cb750
1,557
py
Python
at_export_config.py
Fmstrat/FreeCAD-ArchTextures
e3af6198ea5e07848602a3b8ba01ebab2335d6b1
[ "MIT" ]
21
2018-11-16T05:56:31.000Z
2021-11-09T13:21:53.000Z
at_export_config.py
Fmstrat/FreeCAD-ArchTextures
e3af6198ea5e07848602a3b8ba01ebab2335d6b1
[ "MIT" ]
39
2018-10-02T18:16:18.000Z
2022-02-11T13:45:50.000Z
at_export_config.py
Fmstrat/FreeCAD-ArchTextures
e3af6198ea5e07848602a3b8ba01ebab2335d6b1
[ "MIT" ]
10
2019-07-15T16:34:51.000Z
2022-01-25T23:57:03.000Z
import FreeCAD, FreeCADGui from arch_texture_utils.resource_utils import iconPath import arch_texture_utils.qtutils as qtutils from arch_texture_utils.selection_utils import findSelectedTextureConfig if __name__ == "__main__": command = ExportTextureConfigCommand(); if command.IsActive(): command.Activated() else: qtutils.showInfo("No open Document", "There is no open document") else: import archtexture_toolbars archtexture_toolbars.toolbarManager.registerCommand(ExportTextureConfigCommand())
33.12766
122
0.689788
d2a75f44feb7064f817bce0160b3db28ad77852c
597
py
Python
barcode/charsets/ean.py
Azd325/python-barcode
b41b1d5d479fb0ad3290a0a6235a8d3203d34ee9
[ "MIT" ]
null
null
null
barcode/charsets/ean.py
Azd325/python-barcode
b41b1d5d479fb0ad3290a0a6235a8d3203d34ee9
[ "MIT" ]
null
null
null
barcode/charsets/ean.py
Azd325/python-barcode
b41b1d5d479fb0ad3290a0a6235a8d3203d34ee9
[ "MIT" ]
null
null
null
EDGE = '101' MIDDLE = '01010' CODES = { 'A': ( '0001101', '0011001', '0010011', '0111101', '0100011', '0110001', '0101111', '0111011', '0110111', '0001011' ), 'B': ( '0100111', '0110011', '0011011', '0100001', '0011101', '0111001', '0000101', '0010001', '0001001', '0010111' ), 'C': ( '1110010', '1100110', '1101100', '1000010', '1011100', '1001110', '1010000', '1000100', '1001000', '1110100' ), } LEFT_PATTERN = ( 'AAAAAA', 'AABABB', 'AABBAB', 'AABBBA', 'ABAABB', 'ABBAAB', 'ABBBAA', 'ABABAB', 'ABABBA', 'ABBABA' )
28.428571
73
0.515913
d2a835bc55a30790d6234339c5e466df15a50aed
2,787
py
Python
Sushant_Boosting/code.py
sushant-bahekar/ga-learner-dsmp-repo
1087bec60382c2b3156f26cb87629a3b931fc41f
[ "MIT" ]
null
null
null
Sushant_Boosting/code.py
sushant-bahekar/ga-learner-dsmp-repo
1087bec60382c2b3156f26cb87629a3b931fc41f
[ "MIT" ]
null
null
null
Sushant_Boosting/code.py
sushant-bahekar/ga-learner-dsmp-repo
1087bec60382c2b3156f26cb87629a3b931fc41f
[ "MIT" ]
null
null
null
# -------------- import pandas as pd from sklearn.model_selection import train_test_split #path - Path of file # Code starts here df = pd.read_csv(path) df.head(5) X = df.drop(['customerID','Churn'],1) y = df['Churn'] X_train,X_test,y_train,y_test = train_test_split(X, y, test_size = 0.3, random_state = 0) # -------------- import numpy as np from sklearn.preprocessing import LabelEncoder # Code starts here #Replacing spaces with 'NaN' in train dataset X_train['TotalCharges'].replace(' ',np.NaN, inplace=True) #Replacing spaces with 'NaN' in test dataset X_test['TotalCharges'].replace(' ',np.NaN, inplace=True) #Converting the type of column from X_train to float X_train['TotalCharges'] = X_train['TotalCharges'].astype(float) #Converting the type of column from X_test to float X_test['TotalCharges'] = X_test['TotalCharges'].astype(float) #Filling missing values X_train['TotalCharges'].fillna(X_train['TotalCharges'].mean(),inplace=True) X_test['TotalCharges'].fillna(X_train['TotalCharges'].mean(), inplace=True) #Check value counts print(X_train.isnull().sum()) cat_cols = X_train.select_dtypes(include='O').columns.tolist() #Label encoding train data for x in cat_cols: le = LabelEncoder() X_train[x] = le.fit_transform(X_train[x]) #Label encoding test data for x in cat_cols: le = LabelEncoder() X_test[x] = le.fit_transform(X_test[x]) #Encoding train data target y_train = y_train.replace({'No':0, 'Yes':1}) #Encoding test data target y_test = y_test.replace({'No':0, 'Yes':1}) # -------------- from sklearn.ensemble import AdaBoostClassifier from sklearn.metrics import accuracy_score,classification_report,confusion_matrix # Code starts here print(X_train, X_test, y_train, y_test) ada_model = AdaBoostClassifier(random_state = 0) ada_model.fit(X_train, y_train) y_pred = ada_model.predict(X_test) ada_score = accuracy_score(y_test, y_pred) ada_score ada_cm = confusion_matrix(y_test, y_pred) ada_cm # -------------- from xgboost import XGBClassifier from sklearn.model_selection import GridSearchCV #Parameter list parameters={'learning_rate':[0.1,0.15,0.2,0.25,0.3], 'max_depth':range(1,3)} # Code starts here xgb_model = XGBClassifier(random_state=0) xgb_model.fit(X_train, y_train) y_pred = xgb_model.predict(X_test) xgb_score = accuracy_score(y_test, y_pred) xgb_cm = confusion_matrix(y_test, y_pred) xgb_cr = classification_report(y_test, y_pred) clf_model = GridSearchCV(estimator=xgb_model,param_grid=parameters) clf_model.fit(X_train, y_train) y_pred = clf_model.predict(X_test) clf_score = accuracy_score(y_test, y_pred) clf_cm = confusion_matrix(y_test, y_pred) clf_cr = classification_report(y_test, y_pred) print(xgb_score, clf_score) print(xgb_cm, clf_cm) print(xgb_cr, xgb_cr)
24.663717
89
0.742375
d2a9213337ceeb22964f6608d3d20eb1d939ae74
16,566
py
Python
slsgd.py
xcgoner/ecml2019-slsgd
e4856b2015d4c7c39e28743dab2222ef8e0131fa
[ "MIT" ]
3
2019-09-10T15:46:04.000Z
2020-09-21T17:53:10.000Z
slsgd.py
xcgoner/ecml2019-slsgd
e4856b2015d4c7c39e28743dab2222ef8e0131fa
[ "MIT" ]
null
null
null
slsgd.py
xcgoner/ecml2019-slsgd
e4856b2015d4c7c39e28743dab2222ef8e0131fa
[ "MIT" ]
null
null
null
import argparse, time, logging, os, math, random os.environ["MXNET_USE_OPERATOR_TUNING"] = "0" import numpy as np from scipy import stats import mxnet as mx from mxnet import gluon, nd from mxnet import autograd as ag from mxnet.gluon import nn from mxnet.gluon.data.vision import transforms from gluoncv.model_zoo import get_model from gluoncv.utils import makedirs, LRScheduler from os import listdir import os.path import argparse import pickle from mpi4py import MPI mpi_comm = MPI.COMM_WORLD mpi_size = mpi_comm.Get_size() mpi_rank = mpi_comm.Get_rank() # print('rank: %d' % (mpi_rank), flush=True) parser = argparse.ArgumentParser() parser.add_argument("-d", "--dir", type=str, help="dir of the data", required=True) parser.add_argument("--valdir", type=str, help="dir of the val data", required=True) parser.add_argument("--batchsize", type=int, help="batchsize", default=8) parser.add_argument("--epochs", type=int, help="epochs", default=100) parser.add_argument("--interval", type=int, help="log interval", default=10) parser.add_argument("--nsplit", type=int, help="number of split", default=40) parser.add_argument("--lr", type=float, help="learning rate", default=0.001) parser.add_argument("--alpha", type=float, help="moving average", default=1.0) parser.add_argument("--alpha-decay", type=float, help="decay factor of alpha", default=0.5) parser.add_argument("--alpha-decay-epoch", type=str, help="epoch of alpha decay", default='800') parser.add_argument("--log", type=str, help="dir of the log file", default='train_cifar100.log') parser.add_argument("--classes", type=int, help="number of classes", default=20) parser.add_argument("--iterations", type=int, help="number of local epochs", default=50) parser.add_argument("--aggregation", type=str, help="aggregation method", default='mean') parser.add_argument("--nbyz", type=int, help="number of Byzantine workers", default=0) parser.add_argument("--trim", type=int, help="number of trimmed workers on one side", default=0) # parser.add_argument("--lr-decay", type=float, help="lr decay rate", default=0.1) # parser.add_argument("--lr-decay-epoch", type=str, help="lr decay epoch", default='400') parser.add_argument("--iid", type=int, help="IID setting", default=0) parser.add_argument("--model", type=str, help="model", default='mobilenetv2_1.0') parser.add_argument("--save", type=int, help="save", default=0) parser.add_argument("--start-epoch", type=int, help="epoch start from", default=-1) parser.add_argument("--seed", type=int, help="random seed", default=733) args = parser.parse_args() # print(args, flush=True) filehandler = logging.FileHandler(args.log) streamhandler = logging.StreamHandler() if mpi_rank == 0: logger = logging.getLogger('') logger.setLevel(logging.INFO) logger.addHandler(filehandler) logger.addHandler(streamhandler) mx.random.seed(args.seed + mpi_rank) random.seed(args.seed + mpi_rank) np.random.seed(args.seed + mpi_rank) data_dir = os.path.join(args.dir, 'dataset_split_{}'.format(args.nsplit)) train_dir = os.path.join(data_dir, 'train') # val_dir = os.path.join(data_dir, 'val') val_train_dir = os.path.join(args.valdir, 'train') val_val_dir = os.path.join(args.valdir, 'val') training_files = [] for filename in sorted(listdir(train_dir)): absolute_filename = os.path.join(train_dir, filename) training_files.append(absolute_filename) context = mx.cpu() classes = args.classes train_data_list = [] for training_file in training_files: [train_X, train_Y] = get_train_batch(training_file) train_dataset = mx.gluon.data.dataset.ArrayDataset(train_X, train_Y) train_data = gluon.data.DataLoader(train_dataset, batch_size=args.batchsize, shuffle=True, last_batch='rollover', num_workers=1) train_data_list.append(train_data) [val_train_X, val_train_Y] = get_val_train_batch(val_train_dir) val_train_dataset = mx.gluon.data.dataset.ArrayDataset(val_train_X, val_train_Y) val_train_data = gluon.data.DataLoader(val_train_dataset, batch_size=1000, shuffle=False, last_batch='keep', num_workers=1) [val_val_X, val_val_Y] = get_val_val_batch(val_val_dir) val_val_dataset = mx.gluon.data.dataset.ArrayDataset(val_val_X, val_val_Y) val_val_data = gluon.data.DataLoader(val_val_dataset, batch_size=1000, shuffle=False, last_batch='keep', num_workers=1) model_name = args.model if model_name == 'default': net = gluon.nn.Sequential() with net.name_scope(): # First convolutional layer net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.BatchNorm()) net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.BatchNorm()) net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2)) net.add(gluon.nn.Dropout(rate=0.25)) # Second convolutional layer # net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2)) # Third convolutional layer net.add(gluon.nn.Conv2D(channels=128, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.BatchNorm()) net.add(gluon.nn.Conv2D(channels=128, kernel_size=3, padding=(1,1), activation='relu')) net.add(gluon.nn.BatchNorm()) net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2)) net.add(gluon.nn.Dropout(rate=0.25)) # net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu')) # net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu')) # net.add(gluon.nn.Conv2D(channels=64, kernel_size=3, padding=(1,1), activation='relu')) # net.add(gluon.nn.MaxPool2D(pool_size=2, strides=2)) # Flatten and apply fullly connected layers net.add(gluon.nn.Flatten()) # net.add(gluon.nn.Dense(512, activation="relu")) # net.add(gluon.nn.Dense(512, activation="relu")) net.add(gluon.nn.Dense(512, activation="relu")) net.add(gluon.nn.Dropout(rate=0.25)) net.add(gluon.nn.Dense(classes)) else: model_kwargs = {'ctx': context, 'pretrained': False, 'classes': classes} net = get_model(model_name, **model_kwargs) if model_name.startswith('cifar') or model_name == 'default': net.initialize(mx.init.Xavier(), ctx=context) else: net.initialize(mx.init.MSRAPrelu(), ctx=context) # # no weight decay # for k, v in net.collect_params('.*beta|.*gamma|.*bias').items(): # v.wd_mult = 0.0 optimizer = 'sgd' lr = args.lr # optimizer_params = {'momentum': 0.9, 'learning_rate': lr, 'wd': 0.0001} optimizer_params = {'momentum': 0.0, 'learning_rate': lr, 'wd': 0.0} # lr_decay_epoch = [int(i) for i in args.lr_decay_epoch.split(',')] alpha_decay_epoch = [int(i) for i in args.alpha_decay_epoch.split(',')] trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params) loss_func = gluon.loss.SoftmaxCrossEntropyLoss() train_metric = mx.metric.Accuracy() acc_top1 = mx.metric.Accuracy() acc_top5 = mx.metric.TopKAccuracy(5) train_cross_entropy = mx.metric.CrossEntropy() # warmup # print('warm up', flush=True) trainer.set_learning_rate(0.01) # train_data = random.choice(train_data_list) train_data = train_data_list[90] for local_epoch in range(5): for i, (data, label) in enumerate(train_data): with ag.record(): outputs = net(data) loss = loss_func(outputs, label) loss.backward() trainer.step(args.batchsize) if args.start_epoch > 0: break if args.start_epoch > 0: break # # force initialization # train_data = random.choice(train_data_list) # for i, (data, label) in enumerate(train_data): # outputs = net(data) if mpi_rank == 0: params_prev = [param.data().copy() for param in net.collect_params().values()] else: params_prev = None nd.waitall() # broadcast params_prev = mpi_comm.bcast(params_prev, root=0) for param, param_prev in zip(net.collect_params().values(), params_prev): param.set_data(param_prev) if mpi_rank == 0: worker_list = list(range(mpi_size)) training_file_index_list = [i for i in range(len(training_files))] alpha = args.alpha randperm_choice_list = [] randperm_list = [i for i in range(args.nsplit)] for i in range(int(math.ceil(args.epochs * mpi_size / args.nsplit))): random.shuffle(randperm_list) randperm_choice_list = randperm_choice_list + randperm_list if args.start_epoch > 0: [dirname, postfix] = os.path.splitext(args.log) filename = dirname + ("_%04d.params" % (args.start_epoch)) net.load_parameters(filename, ctx=context) acc_top1.reset() acc_top5.reset() train_cross_entropy.reset() for i, (data, label) in enumerate(val_val_data): outputs = net(data) acc_top1.update(label, outputs) acc_top5.update(label, outputs) for i, (data, label) in enumerate(val_train_data): outputs = net(data) train_cross_entropy.update(label, nd.softmax(outputs)) _, top1 = acc_top1.get() _, top5 = acc_top5.get() _, crossentropy = train_cross_entropy.get() top1_list = mpi_comm.gather(top1, root=0) top5_list = mpi_comm.gather(top5, root=0) crossentropy_list = mpi_comm.gather(crossentropy, root=0) if mpi_rank == 0: top1_list = np.array(top1_list) top5_list = np.array(top5_list) crossentropy_list = np.array(crossentropy_list) logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f, loss=%f, lr=%f, alpha=%f'%(args.start_epoch, top1_list.mean(), top5_list.mean(), crossentropy_list.mean(), trainer.learning_rate, alpha)) nd.waitall() time_0 = time.time() for epoch in range(args.start_epoch+1, args.epochs): # train_metric.reset() # if epoch in lr_decay_epoch: # lr = lr * args.lr_decay if epoch in alpha_decay_epoch: alpha = alpha * args.alpha_decay tic = time.time() if args.iid == 0: if mpi_rank == 0: training_file_index_sublist = randperm_choice_list[(mpi_size * epoch):(mpi_size * epoch + mpi_size)] # logger.info(training_file_index_sublist) else: training_file_index_sublist = None training_file_index = mpi_comm.scatter(training_file_index_sublist, root=0) train_data = train_data_list[training_file_index] trainer = gluon.Trainer(net.collect_params(), optimizer, optimizer_params) trainer.set_learning_rate(lr) if alpha < 1: for param, param_prev in zip(net.collect_params().values(), params_prev): if param.grad_req != 'null': param_prev[:] = param.data() * (1-alpha) # select byz workers if args.nbyz > 0: if mpi_rank == 0: random.shuffle(worker_list) byz_worker_list = worker_list[0:args.nbyz] else: byz_worker_list = None byz_worker_list = mpi_comm.bcast(byz_worker_list, root=0) else: byz_worker_list = [] if mpi_rank in byz_worker_list: # byz worker [byz_train_X, byz_train_Y] = get_train_batch_byz(random.choice(training_files)) byz_train_dataset = mx.gluon.data.dataset.ArrayDataset(byz_train_X, byz_train_Y) byz_train_data = gluon.data.DataLoader(byz_train_dataset, batch_size=args.batchsize, shuffle=True, last_batch='rollover', num_workers=1) net.initialize(mx.init.MSRAPrelu(), ctx=context, force_reinit=True) for local_epoch in range(args.iterations): for i, (data, label) in enumerate(byz_train_data): with ag.record(): outputs = net(data) loss = loss_func(outputs, label) loss.backward() trainer.step(args.batchsize) else: # train # local epoch for local_epoch in range(args.iterations): if args.iid == 1: train_data = random.choice(train_data_list) for i, (data, label) in enumerate(train_data): with ag.record(): outputs = net(data) loss = loss_func(outputs, label) loss.backward() trainer.step(args.batchsize) # aggregation nd.waitall() params_np = [param.data().copy().asnumpy() for param in net.collect_params().values()] params_np_list = mpi_comm.gather(params_np, root=0) if mpi_rank == 0: n_params = len(params_np) if args.aggregation == "trim" or args.trim > 0: params_np = [ ( stats.trim_mean( np.stack( [params[j] for params in params_np_list], axis=0), args.trim/mpi_size, axis=0 ) ) for j in range(n_params) ] else: params_np = [ ( np.mean( np.stack( [params[j] for params in params_np_list], axis=0), axis=0 ) ) for j in range(n_params) ] else: params_np = None params_np = mpi_comm.bcast(params_np, root=0) params_nd = [ nd.array(param_np) for param_np in params_np ] for param, param_nd in zip(net.collect_params().values(), params_nd): param.set_data(param_nd) if alpha < 1: # moving average for param, param_prev in zip(net.collect_params().values(), params_prev): if param.grad_req != 'null': weight = param.data() weight[:] = weight * alpha + param_prev # test nd.waitall() toc = time.time() if ( epoch % args.interval == 0 or epoch == args.epochs-1 ) : acc_top1.reset() acc_top5.reset() train_cross_entropy.reset() for i, (data, label) in enumerate(val_val_data): outputs = net(data) acc_top1.update(label, outputs) acc_top5.update(label, outputs) for i, (data, label) in enumerate(val_train_data): outputs = net(data) train_cross_entropy.update(label, nd.softmax(outputs)) _, top1 = acc_top1.get() _, top5 = acc_top5.get() _, crossentropy = train_cross_entropy.get() top1_list = mpi_comm.gather(top1, root=0) top5_list = mpi_comm.gather(top5, root=0) crossentropy_list = mpi_comm.gather(crossentropy, root=0) if mpi_rank == 0: top1_list = np.array(top1_list) top5_list = np.array(top5_list) crossentropy_list = np.array(crossentropy_list) logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f, loss=%f, lr=%f, alpha=%f, time=%f, elapsed=%f'%(epoch, top1_list.mean(), top5_list.mean(), crossentropy_list.mean(), trainer.learning_rate, alpha, toc-tic, time.time()-time_0)) # logger.info('[Epoch %d] validation: acc-top1=%f acc-top5=%f'%(epoch, top1, top5)) if args.save == 1: [dirname, postfix] = os.path.splitext(args.log) filename = dirname + ("_%04d.params" % (epoch)) net.save_parameters(filename) nd.waitall()
40.306569
253
0.650247
d2a9e60639815c6fa23b7d5054d4eac994971146
59,644
py
Python
predictor.py
MIC-DKFZ/DetectionAndRegression
40f3cb92ec6447767bd85b62a015b0d50e32ad26
[ "Apache-2.0" ]
40
2019-09-24T08:11:35.000Z
2022-02-23T13:49:01.000Z
predictor.py
MIC-DKFZ/MedicalDetectionRegression
40f3cb92ec6447767bd85b62a015b0d50e32ad26
[ "Apache-2.0" ]
13
2019-11-04T10:52:40.000Z
2022-03-11T23:57:14.000Z
predictor.py
MIC-DKFZ/MedicalDetectionRegression
40f3cb92ec6447767bd85b62a015b0d50e32ad26
[ "Apache-2.0" ]
22
2019-08-28T15:32:25.000Z
2022-02-18T11:27:30.000Z
#!/usr/bin/env python # Copyright 2019 Division of Medical Image Computing, German Cancer Research Center (DKFZ). # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os from multiprocessing import Pool import pickle import time import numpy as np import torch from scipy.stats import norm from collections import OrderedDict import plotting as plg import utils.model_utils as mutils import utils.exp_utils as utils def apply_wbc_to_patient(inputs): """ wrapper around prediction box consolidation: weighted box clustering (wbc). processes a single patient. loops over batch elements in patient results (1 in 3D, slices in 2D) and foreground classes, aggregates and stores results in new list. :return. patient_results_list: list over batch elements. each element is a list over boxes, where each box is one dictionary: [[box_0, ...], [box_n,...]]. batch elements are slices for 2D predictions, and a dummy batch dimension of 1 for 3D predictions. :return. pid: string. patient id. """ regress_flag, in_patient_results_list, pid, class_dict, clustering_iou, n_ens = inputs out_patient_results_list = [[] for _ in range(len(in_patient_results_list))] for bix, b in enumerate(in_patient_results_list): for cl in list(class_dict.keys()): boxes = [(ix, box) for ix, box in enumerate(b) if (box['box_type'] == 'det' and box['box_pred_class_id'] == cl)] box_coords = np.array([b[1]['box_coords'] for b in boxes]) box_scores = np.array([b[1]['box_score'] for b in boxes]) box_center_factor = np.array([b[1]['box_patch_center_factor'] for b in boxes]) box_n_overlaps = np.array([b[1]['box_n_overlaps'] for b in boxes]) try: box_patch_id = np.array([b[1]['patch_id'] for b in boxes]) except KeyError: #backward compatibility for already saved pred results ... omg box_patch_id = np.array([b[1]['ens_ix'] for b in boxes]) box_regressions = np.array([b[1]['regression'] for b in boxes]) if regress_flag else None box_rg_bins = np.array([b[1]['rg_bin'] if 'rg_bin' in b[1].keys() else float('NaN') for b in boxes]) box_rg_uncs = np.array([b[1]['rg_uncertainty'] if 'rg_uncertainty' in b[1].keys() else float('NaN') for b in boxes]) if 0 not in box_scores.shape: keep_scores, keep_coords, keep_n_missing, keep_regressions, keep_rg_bins, keep_rg_uncs = \ weighted_box_clustering(box_coords, box_scores, box_center_factor, box_n_overlaps, box_rg_bins, box_rg_uncs, box_regressions, box_patch_id, clustering_iou, n_ens) for boxix in range(len(keep_scores)): clustered_box = {'box_type': 'det', 'box_coords': keep_coords[boxix], 'box_score': keep_scores[boxix], 'cluster_n_missing': keep_n_missing[boxix], 'box_pred_class_id': cl} if regress_flag: clustered_box.update({'regression': keep_regressions[boxix], 'rg_uncertainty': keep_rg_uncs[boxix], 'rg_bin': keep_rg_bins[boxix]}) out_patient_results_list[bix].append(clustered_box) # add gt boxes back to new output list. out_patient_results_list[bix].extend([box for box in b if box['box_type'] == 'gt']) return [out_patient_results_list, pid] def weighted_box_clustering(box_coords, scores, box_pc_facts, box_n_ovs, box_rg_bins, box_rg_uncs, box_regress, box_patch_id, thresh, n_ens): """Consolidates overlapping predictions resulting from patch overlaps, test data augmentations and temporal ensembling. clusters predictions together with iou > thresh (like in NMS). Output score and coordinate for one cluster are the average weighted by individual patch center factors (how trustworthy is this candidate measured by how centered its position within the patch is) and the size of the corresponding box. The number of expected predictions at a position is n_data_aug * n_temp_ens * n_overlaps_at_position (1 prediction per unique patch). Missing predictions at a cluster position are defined as the number of unique patches in the cluster, which did not contribute any predict any boxes. :param dets: (n_dets, (y1, x1, y2, x2, (z1), (z2), scores, box_pc_facts, box_n_ovs). :param box_coords: y1, x1, y2, x2, (z1), (z2). :param scores: confidence scores. :param box_pc_facts: patch-center factors from position on patch tiles. :param box_n_ovs: number of patch overlaps at box position. :param box_rg_bins: regression bin predictions. :param box_rg_uncs: (n_dets,) regression uncertainties (from model mrcnn_aleatoric). :param box_regress: (n_dets, n_regression_features). :param box_patch_id: ensemble index. :param thresh: threshold for iou_matching. :param n_ens: number of models, that are ensembled. (-> number of expected predictions per position). :return: keep_scores: (n_keep) new scores of boxes to be kept. :return: keep_coords: (n_keep, (y1, x1, y2, x2, (z1), (z2)) new coordinates of boxes to be kept. """ dim = 2 if box_coords.shape[1] == 4 else 3 y1 = box_coords[:,0] x1 = box_coords[:,1] y2 = box_coords[:,2] x2 = box_coords[:,3] areas = (y2 - y1 + 1) * (x2 - x1 + 1) if dim == 3: z1 = box_coords[:, 4] z2 = box_coords[:, 5] areas *= (z2 - z1 + 1) # order is the sorted index. maps order to index o[1] = 24 (rank1, ix 24) order = scores.argsort()[::-1] keep_scores = [] keep_coords = [] keep_n_missing = [] keep_regress = [] keep_rg_bins = [] keep_rg_uncs = [] while order.size > 0: i = order[0] # highest scoring element yy1 = np.maximum(y1[i], y1[order]) xx1 = np.maximum(x1[i], x1[order]) yy2 = np.minimum(y2[i], y2[order]) xx2 = np.minimum(x2[i], x2[order]) w = np.maximum(0, xx2 - xx1 + 1) h = np.maximum(0, yy2 - yy1 + 1) inter = w * h if dim == 3: zz1 = np.maximum(z1[i], z1[order]) zz2 = np.minimum(z2[i], z2[order]) d = np.maximum(0, zz2 - zz1 + 1) inter *= d # overlap between currently highest scoring box and all boxes. ovr = inter / (areas[i] + areas[order] - inter) ovr_fl = inter.astype('float64') / (areas[i] + areas[order] - inter.astype('float64')) assert np.all(ovr==ovr_fl), "ovr {}\n ovr_float {}".format(ovr, ovr_fl) # get all the predictions that match the current box to build one cluster. matches = np.nonzero(ovr > thresh)[0] match_n_ovs = box_n_ovs[order[matches]] match_pc_facts = box_pc_facts[order[matches]] match_patch_id = box_patch_id[order[matches]] match_ov_facts = ovr[matches] match_areas = areas[order[matches]] match_scores = scores[order[matches]] # weight all scores in cluster by patch factors, and size. match_score_weights = match_ov_facts * match_areas * match_pc_facts match_scores *= match_score_weights # for the weighted average, scores have to be divided by the number of total expected preds at the position # of the current cluster. 1 Prediction per patch is expected. therefore, the number of ensembled models is # multiplied by the mean overlaps of patches at this position (boxes of the cluster might partly be # in areas of different overlaps). n_expected_preds = n_ens * np.mean(match_n_ovs) # the number of missing predictions is obtained as the number of patches, # which did not contribute any prediction to the current cluster. n_missing_preds = np.max((0, n_expected_preds - np.unique(match_patch_id).shape[0])) # missing preds are given the mean weighting # (expected prediction is the mean over all predictions in cluster). denom = np.sum(match_score_weights) + n_missing_preds * np.mean(match_score_weights) # compute weighted average score for the cluster avg_score = np.sum(match_scores) / denom # compute weighted average of coordinates for the cluster. now only take existing # predictions into account. avg_coords = [np.sum(y1[order[matches]] * match_scores) / np.sum(match_scores), np.sum(x1[order[matches]] * match_scores) / np.sum(match_scores), np.sum(y2[order[matches]] * match_scores) / np.sum(match_scores), np.sum(x2[order[matches]] * match_scores) / np.sum(match_scores)] if dim == 3: avg_coords.append(np.sum(z1[order[matches]] * match_scores) / np.sum(match_scores)) avg_coords.append(np.sum(z2[order[matches]] * match_scores) / np.sum(match_scores)) if box_regress is not None: # compute wt. avg. of regression vectors (component-wise average) avg_regress = np.sum(box_regress[order[matches]] * match_scores[:, np.newaxis], axis=0) / np.sum( match_scores) avg_rg_bins = np.round(np.sum(box_rg_bins[order[matches]] * match_scores) / np.sum(match_scores)) avg_rg_uncs = np.sum(box_rg_uncs[order[matches]] * match_scores) / np.sum(match_scores) else: avg_regress = np.array(float('NaN')) avg_rg_bins = np.array(float('NaN')) avg_rg_uncs = np.array(float('NaN')) # some clusters might have very low scores due to high amounts of missing predictions. # filter out the with a conservative threshold, to speed up evaluation. if avg_score > 0.01: keep_scores.append(avg_score) keep_coords.append(avg_coords) keep_n_missing.append((n_missing_preds / n_expected_preds * 100)) # relative keep_regress.append(avg_regress) keep_rg_uncs.append(avg_rg_uncs) keep_rg_bins.append(avg_rg_bins) # get index of all elements that were not matched and discard all others. inds = np.nonzero(ovr <= thresh)[0] inds_where = np.where(ovr<=thresh)[0] assert np.all(inds == inds_where), "inds_nonzero {} \ninds_where {}".format(inds, inds_where) order = order[inds] return keep_scores, keep_coords, keep_n_missing, keep_regress, keep_rg_bins, keep_rg_uncs def nms_2to3D(dets, thresh): """ Merges 2D boxes to 3D cubes. For this purpose, boxes of all slices are regarded as lying in one slice. An adaptation of Non-maximum suppression is applied where clusters are found (like in NMS) with the extra constraint that suppressed boxes have to have 'connected' z coordinates w.r.t the core slice (cluster center, highest scoring box, the prevailing box). 'connected' z-coordinates are determined as the z-coordinates with predictions until the first coordinate for which no prediction is found. example: a cluster of predictions was found overlap > iou thresh in xy (like NMS). The z-coordinate of the highest scoring box is 50. Other predictions have 23, 46, 48, 49, 51, 52, 53, 56, 57. Only the coordinates connected with 50 are clustered to one cube: 48, 49, 51, 52, 53. (46 not because nothing was found in 47, so 47 is a 'hole', which interrupts the connection). Only the boxes corresponding to these coordinates are suppressed. All others are kept for building of further clusters. This algorithm works better with a certain min_confidence of predictions, because low confidence (e.g. noisy/cluttery) predictions can break the relatively strong assumption of defining cubes' z-boundaries at the first 'hole' in the cluster. :param dets: (n_detections, (y1, x1, y2, x2, scores, slice_id) :param thresh: iou matchin threshold (like in NMS). :return: keep: (n_keep,) 1D tensor of indices to be kept. :return: keep_z: (n_keep, [z1, z2]) z-coordinates to be added to boxes, which are kept in order to form cubes. """ y1 = dets[:, 0] x1 = dets[:, 1] y2 = dets[:, 2] x2 = dets[:, 3] assert np.all(y1 <= y2) and np.all(x1 <= x2), """"the definition of the coordinates is crucially important here: where maximum is taken needs to be the lower coordinate""" scores = dets[:, -2] slice_id = dets[:, -1] areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.argsort()[::-1] keep = [] keep_z = [] while order.size > 0: # order is the sorted index. maps order to index: order[1] = 24 means (rank1, ix 24) i = order[0] # highest scoring element yy1 = np.maximum(y1[i], y1[order]) # highest scoring element still in >order<, is compared to itself: okay? xx1 = np.maximum(x1[i], x1[order]) yy2 = np.minimum(y2[i], y2[order]) xx2 = np.minimum(x2[i], x2[order]) h = np.maximum(0.0, yy2 - yy1 + 1) w = np.maximum(0.0, xx2 - xx1 + 1) inter = h * w iou = inter / (areas[i] + areas[order] - inter) matches = np.argwhere( iou > thresh) # get all the elements that match the current box and have a lower score slice_ids = slice_id[order[matches]] core_slice = slice_id[int(i)] upper_holes = [ii for ii in np.arange(core_slice, np.max(slice_ids)) if ii not in slice_ids] lower_holes = [ii for ii in np.arange(np.min(slice_ids), core_slice) if ii not in slice_ids] max_valid_slice_id = np.min(upper_holes) if len(upper_holes) > 0 else np.max(slice_ids) min_valid_slice_id = np.max(lower_holes) if len(lower_holes) > 0 else np.min(slice_ids) z_matches = matches[(slice_ids <= max_valid_slice_id) & (slice_ids >= min_valid_slice_id)] # expand by one z voxel since box content is surrounded w/o overlap, i.e., z-content computed as z2-z1 z1 = np.min(slice_id[order[z_matches]]) - 1 z2 = np.max(slice_id[order[z_matches]]) + 1 keep.append(i) keep_z.append([z1, z2]) order = np.delete(order, z_matches, axis=0) return keep, keep_z def apply_2d_3d_merging_to_patient(inputs): """ wrapper around 2Dto3D merging operation. Processes a single patient. Takes 2D patient results (slices in batch dimension) and returns 3D patient results (dummy batch dimension of 1). Applies an adaption of Non-Maximum Surpression (Detailed methodology is described in nms_2to3D). :return. results_dict_boxes: list over batch elements (1 in 3D). each element is a list over boxes, where each box is one dictionary: [[box_0, ...], [box_n,...]]. :return. pid: string. patient id. """ in_patient_results_list, pid, class_dict, merge_3D_iou = inputs out_patient_results_list = [] for cl in list(class_dict.keys()): det_boxes, slice_ids = [], [] # collect box predictions over batch dimension (slices) and store slice info as slice_ids. for batch_ix, batch in enumerate(in_patient_results_list): batch_element_det_boxes = [(ix, box) for ix, box in enumerate(batch) if (box['box_type'] == 'det' and box['box_pred_class_id'] == cl)] det_boxes += batch_element_det_boxes slice_ids += [batch_ix] * len(batch_element_det_boxes) box_coords = np.array([batch[1]['box_coords'] for batch in det_boxes]) box_scores = np.array([batch[1]['box_score'] for batch in det_boxes]) slice_ids = np.array(slice_ids) if 0 not in box_scores.shape: keep_ix, keep_z = nms_2to3D( np.concatenate((box_coords, box_scores[:, None], slice_ids[:, None]), axis=1), merge_3D_iou) else: keep_ix, keep_z = [], [] # store kept predictions in new results list and add corresponding z-dimension info to coordinates. for kix, kz in zip(keep_ix, keep_z): keep_box = det_boxes[kix][1] keep_box['box_coords'] = list(keep_box['box_coords']) + kz out_patient_results_list.append(keep_box) gt_boxes = [box for b in in_patient_results_list for box in b if box['box_type'] == 'gt'] if len(gt_boxes) > 0: assert np.all([len(box["box_coords"]) == 6 for box in gt_boxes]), "expanded preds to 3D but GT is 2D." out_patient_results_list += gt_boxes return [[out_patient_results_list], pid] # additional list wrapping is extra batch dim.
59.229394
192
0.598803
d2aa2e4deaca6a1a85b89b1e9c89d89fa5c4d8f5
424
py
Python
archive/jonesboro/__init__.py
jayktee/scrapers-us-municipal
ff52a331e91cb590a3eda7db6c688d75b77acacb
[ "MIT" ]
67
2015-04-28T19:28:18.000Z
2022-01-31T03:27:17.000Z
archive/jonesboro/__init__.py
jayktee/scrapers-us-municipal
ff52a331e91cb590a3eda7db6c688d75b77acacb
[ "MIT" ]
202
2015-01-15T18:43:12.000Z
2021-11-23T15:09:10.000Z
archive/jonesboro/__init__.py
jayktee/scrapers-us-municipal
ff52a331e91cb590a3eda7db6c688d75b77acacb
[ "MIT" ]
54
2015-01-27T03:15:45.000Z
2021-09-10T19:35:32.000Z
from pupa.scrape import Jurisdiction from legistar.ext.pupa import LegistarPeopleScraper
28.266667
87
0.735849
d2aa498a5dc13b5e44bb5a53742aa0908d8d79da
2,766
py
Python
src/config.py
La-tale/MessyTable
42ae08294f1a576d2477a4b4c12b2aec047c2ba9
[ "MIT" ]
32
2020-07-13T04:30:00.000Z
2022-03-17T12:04:32.000Z
src/config.py
La-tale/MessyTable
42ae08294f1a576d2477a4b4c12b2aec047c2ba9
[ "MIT" ]
12
2020-08-31T02:58:37.000Z
2022-03-26T04:05:27.000Z
src/config.py
La-tale/MessyTable
42ae08294f1a576d2477a4b4c12b2aec047c2ba9
[ "MIT" ]
8
2020-07-27T05:20:33.000Z
2022-02-04T06:58:37.000Z
import yaml import os def parse_config(args): """ prepare configs """ file_dir = os.path.dirname(os.path.realpath('__file__')) messytable_dir = os.path.realpath(os.path.join(file_dir, '..')) config_pathname = os.path.join(messytable_dir,'models',args.config_dir,'train.yaml') config = yaml.load(open(config_pathname, 'r')) config['messytable_dir'] = messytable_dir config['config_dir'] = os.path.join(messytable_dir,'models',args.config_dir) config['data_dir'] = os.path.join(messytable_dir, 'data') if 'data_dir' not in config else config['data_dir'] # NOTE: either indicate data_dir or put the data in messytable/data config['img_dir'] = os.path.join(config['data_dir'],'images') config['train_label_pathname'] = os.path.join(config['data_dir'],'labels',config['train_json']) config['num_workers'] = config['num_workers'] if 'num_workers' in config else 16 config['milestones'] = config['milestones'] if 'milestones' in config else [60, 80] config['split_samples_in_func'] = config['split_samples_in_func'] if 'split_samples_in_func' in config else True config['loss_func'] = config['loss_func'] if 'loss_func' in config else 'ERROR_LOSS_FUNC' config['triplet_margin'] = config['triplet_margin'] if 'triplet_margin' in config else 0.3 config['data_augmentation'] = config['data_augmentation'] if 'data_augmentation' in config else False config['cropped_img_size'] = (config['cropped_height'],config['cropped_width']) config['original_img_size'] = (config['img_height'],config['img_width']) config['scene_ratio'] = config['scene_ratio'] if 'scene_ratio' in config else 1.0 config['cam_selected_num'] = config['cam_selected_num'] if 'cam_selected_num' in config else 8 config['triplet_sampling_ratio'] = config['triplet_sampling_ratio'] if 'triplet_sampling_ratio' in config else [0.5,0.3,0.2] config['image_pairs_per_batch'] = config['image_pairs_per_batch'] if 'image_pairs_per_batch' in config else 24 config['triplet_batch_size'] = config['triplet_batch_size'] if 'triplet_batch_size' in config else config['batch_size'] config['learning_rate'] = float(config['learning_rate']) config['zoomout_crop_num'] = 'single_crop' if len(config['zoomout_ratio']) == 1 else 'multi_crops' # make cam_pairs test_cam_pairs = [] for i in range(1,9): for j in range(i+1,10): test_cam_pairs.append((str(i),str(j))) reversed_cam_pairs = [] for cam_pair in test_cam_pairs: reversed_cam_pairs.append((cam_pair[1],cam_pair[0])) config['test_cam_pairs'] = test_cam_pairs config['train_cam_pairs'] = test_cam_pairs + reversed_cam_pairs config['cam_list'] = [str(i) for i in range(1,10)] return config
56.44898
181
0.713304
d2ab49c4b3562bad12874570d0c5751dda4cf3e6
1,194
py
Python
tests/settings.py
josemarimanio/django-adminlte2-templates
d39ab5eaec674c4725015fe43fc93e74dce78a6e
[ "MIT" ]
10
2020-03-21T10:50:11.000Z
2022-03-04T08:36:43.000Z
tests/settings.py
josemarimanio/django-adminlte2-templates
d39ab5eaec674c4725015fe43fc93e74dce78a6e
[ "MIT" ]
6
2020-06-06T08:48:29.000Z
2021-06-10T18:49:35.000Z
tests/settings.py
josemarimanio/django-adminlte2-templates
d39ab5eaec674c4725015fe43fc93e74dce78a6e
[ "MIT" ]
1
2021-09-14T02:00:43.000Z
2021-09-14T02:00:43.000Z
import os BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) SECRET_KEY = '!t_(11ght0&nmb&$tf4to=gdg&u$!hsm3@)c6dzp=zdc*c9zci' # nosec INSTALLED_APPS = [ 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'adminlte2_templates', 'tests', ] MIDDLEWARE = [ 'django.contrib.sessions.middleware.SessionMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', ] ROOT_URLCONF = 'tests.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'DIRS': [os.path.join(BASE_DIR, 'tests/templates')], 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'adminlte2_templates.context_processors.template', ], }, }, ] DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } PASSWORD_HASHERS = [ 'django.contrib.auth.hashers.MD5PasswordHasher', ]
23.88
74
0.629816
d2ae04ea58cc84694d33370988510f0b8bdcadb9
2,658
py
Python
two-variables-function-fitting/fxy_gen.py
ettoremessina/fitting-with-mlp-using-tensorflow
50303c7161521f690c37b80a72a281129052365b
[ "MIT" ]
9
2020-03-21T08:45:28.000Z
2021-11-30T02:49:41.000Z
two-variables-function-fitting/fxy_gen.py
ettoremessina/fitting-with-mlp-using-tensorflow
50303c7161521f690c37b80a72a281129052365b
[ "MIT" ]
null
null
null
two-variables-function-fitting/fxy_gen.py
ettoremessina/fitting-with-mlp-using-tensorflow
50303c7161521f690c37b80a72a281129052365b
[ "MIT" ]
3
2020-04-08T15:35:03.000Z
2022-03-22T02:19:02.000Z
import argparse import numpy as np import csv if __name__ == "__main__": parser = argparse.ArgumentParser(description='fxy_gen.py generates a synthetic dataset file calling a two-variables real function on a rectangle') parser.add_argument('--dsout', type=str, dest='ds_output_filename', required=True, help='dataset output file (csv format)') parser.add_argument('--fxy', type=str, dest='func_xy_body', required=True, help='f(x, y) body (lamba format)') parser.add_argument('--rxbegin', type=float, dest='range_xbegin', required=False, default=-5.0, help='begin x range (default:-5.0)') parser.add_argument('--rxend', type=float, dest='range_xend', required=False, default=+5.0, help='end x range (default:+5.0)') parser.add_argument('--rybegin', type=float, dest='range_ybegin', required=False, default=-5.0, help='begin y range (default:-5.0)') parser.add_argument('--ryend', type=float, dest='range_yend', required=False, default=+5.0, help='end y range (default:+5.0)') parser.add_argument('--rstep', type=float, dest='range_step', required=False, default=0.01, help='step range (default: 0.01)') args = parser.parse_args() print("#### Started {} {} ####".format(__file__, args)); x_values = np.arange(args.range_xbegin, args.range_xend, args.range_step, dtype=float) y_values = np.arange(args.range_ybegin, args.range_yend, args.range_step, dtype=float) func_xy = eval('lambda x, y: ' + args.func_xy_body) csv_ds_output_file = open(args.ds_output_filename, 'w') with csv_ds_output_file: writer = csv.writer(csv_ds_output_file, delimiter=',') for i in range(0, x_values.size): for j in range(0, y_values.size): writer.writerow([x_values[i], y_values[j], func_xy(x_values[i], y_values[j])]) print("#### Terminated {} ####".format(__file__));
37.971429
150
0.482318
d2aee573a11ac0e4ec731ba7feda47d776f90ea2
995
py
Python
custom/icds_reports/dashboard_utils.py
tstalka/commcare-hq
902412b0f97ba0daac173fe284f3adc4c01bcd76
[ "BSD-3-Clause" ]
null
null
null
custom/icds_reports/dashboard_utils.py
tstalka/commcare-hq
902412b0f97ba0daac173fe284f3adc4c01bcd76
[ "BSD-3-Clause" ]
null
null
null
custom/icds_reports/dashboard_utils.py
tstalka/commcare-hq
902412b0f97ba0daac173fe284f3adc4c01bcd76
[ "BSD-3-Clause" ]
null
null
null
from corehq.apps.locations.util import location_hierarchy_config from custom.icds_reports.utils import icds_pre_release_features
34.310345
78
0.729648
d2af35f5ecd1284185b97cd7fd48a1dabdbf319d
1,714
py
Python
data_input.py
zpcore/OnePass
fc102fae172c617535d4661bfa99a0302cbe09db
[ "MIT" ]
null
null
null
data_input.py
zpcore/OnePass
fc102fae172c617535d4661bfa99a0302cbe09db
[ "MIT" ]
null
null
null
data_input.py
zpcore/OnePass
fc102fae172c617535d4661bfa99a0302cbe09db
[ "MIT" ]
null
null
null
import json import string, sys from random import * # tok = Token() # tok.get_input() # print(json.dumps(tok, cls=MyEncoder))
32.339623
101
0.656943
d2af5783fc08617f08a4edb9dc33a39579f11d65
1,401
py
Python
examples/python/test_dict.py
SmartEconomyWorkshop/workshop
5961dcc8832f60b3a0407cb9a8361ba5485ac280
[ "MIT" ]
79
2017-10-22T03:35:06.000Z
2021-12-02T10:28:06.000Z
examples/python/test_dict.py
SmartEconomyWorkshop/workshop
5961dcc8832f60b3a0407cb9a8361ba5485ac280
[ "MIT" ]
122
2017-10-19T12:34:08.000Z
2020-08-20T12:38:17.000Z
examples/python/test_dict.py
SmartEconomyWorkshop/workshop
5961dcc8832f60b3a0407cb9a8361ba5485ac280
[ "MIT" ]
76
2017-10-19T05:09:55.000Z
2020-12-08T12:03:59.000Z
from boa_test.tests.boa_test import BoaTest from boa.compiler import Compiler from neo.Settings import settings from neo.Prompt.Commands.BuildNRun import TestBuild
36.868421
108
0.666667
d2b08bd5689396a0415385c35a4d92cedae61e22
520
py
Python
deployment_classifier/setup.py
m-santh/VayuAnukulani
d1b881ac6268c24761dc0ef6db296d7e5ee1a22e
[ "MIT" ]
1
2021-04-19T17:04:03.000Z
2021-04-19T17:04:03.000Z
deployment_classifier/setup.py
m-santh/VayuAnukulani
d1b881ac6268c24761dc0ef6db296d7e5ee1a22e
[ "MIT" ]
18
2020-01-28T22:36:26.000Z
2020-07-28T17:01:35.000Z
deployment_classifier/setup.py
m-santh/VayuAnukulani
d1b881ac6268c24761dc0ef6db296d7e5ee1a22e
[ "MIT" ]
3
2019-04-01T10:33:20.000Z
2020-10-23T23:29:09.000Z
from setuptools import find_packages from setuptools import setup REQUIRED_PACKAGES = ['tensorflow==1.8.0','pandas==0.23.1','setuptools==38.7.0','numpy==1.14.1','Keras==2.1.4','scikit_learn==0.19.1','h5py'] setup( name='classifier', version='0.1', install_requires=REQUIRED_PACKAGES, packages=find_packages(), include_package_data=True, description='My training application package.', author='Divyam Madaan', author_email='divyam3897@gmail.com', license='MIT', zip_safe=False )
28.888889
140
0.701923
d2b08ef7b1d20d9d85caa8e8727b92065aef39a2
1,023
py
Python
day5.py
zsmoore/Advent-Of-Code-2017
895a7fbaa8b8b82a338dac967bccbf97b2092b20
[ "MIT" ]
null
null
null
day5.py
zsmoore/Advent-Of-Code-2017
895a7fbaa8b8b82a338dac967bccbf97b2092b20
[ "MIT" ]
null
null
null
day5.py
zsmoore/Advent-Of-Code-2017
895a7fbaa8b8b82a338dac967bccbf97b2092b20
[ "MIT" ]
null
null
null
import sys import copy if __name__ == "__main__": main()
22.23913
60
0.567937
d2b26b4fc46e989fc34f786c463f49d76b84289c
4,949
py
Python
pycudasirecon/_recon_params.py
tlambert03/pycudasirecon
17ca242b1cfed14216d97df480ca2c7f3471d770
[ "MIT" ]
2
2021-06-09T15:35:50.000Z
2021-06-10T05:33:11.000Z
pycudasirecon/_recon_params.py
tlambert03/pycudasirecon
17ca242b1cfed14216d97df480ca2c7f3471d770
[ "MIT" ]
null
null
null
pycudasirecon/_recon_params.py
tlambert03/pycudasirecon
17ca242b1cfed14216d97df480ca2c7f3471d770
[ "MIT" ]
null
null
null
import os from contextlib import contextmanager from tempfile import NamedTemporaryFile from typing import Optional, Sequence from pydantic import BaseModel, Field, FilePath
40.235772
88
0.658921
d2b2f379a4dedf2bd69de6e708c00763f4c5952f
4,098
py
Python
tesseract_converters/tesseract_to_sa_converter.py
superannotateai/annotateonline-input-converters
753211f48676d06718bb2d32501ba1df3ace9121
[ "Apache-2.0" ]
10
2020-04-30T08:36:08.000Z
2021-02-27T21:46:45.000Z
tesseract_converters/tesseract_to_sa_converter.py
superannotateai/input_converters
753211f48676d06718bb2d32501ba1df3ace9121
[ "Apache-2.0" ]
5
2020-03-27T07:16:36.000Z
2020-07-06T04:45:47.000Z
tesseract_converters/tesseract_to_sa_converter.py
superannotateai/annotateonline-input-converters
753211f48676d06718bb2d32501ba1df3ace9121
[ "Apache-2.0" ]
2
2020-06-26T20:02:10.000Z
2020-06-30T20:56:04.000Z
import os import json import argparse if __name__ == '__main__': main()
32.784
80
0.476086
d2b3079900df546aeac436f737e69c681f72b12c
24,525
py
Python
fhirclient/r4models/contract_tests.py
cspears-mitre/CapStatement
2390566ed75d420e0615e3a0aacb77e8c030fdcc
[ "Apache-2.0" ]
1
2021-12-24T11:14:38.000Z
2021-12-24T11:14:38.000Z
fhirclient/r4models/contract_tests.py
cspears-mitre/CapStatement
2390566ed75d420e0615e3a0aacb77e8c030fdcc
[ "Apache-2.0" ]
null
null
null
fhirclient/r4models/contract_tests.py
cspears-mitre/CapStatement
2390566ed75d420e0615e3a0aacb77e8c030fdcc
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Generated from FHIR 3.6.0-bd605d07 on 2018-12-20. # 2018, SMART Health IT. import os import io import unittest import json from . import contract from .fhirdate import FHIRDate
69.279661
152
0.685219
d2b34796cb7b21344e2370533fa5aa6227ece2be
9,978
py
Python
evaluation/evaluation.py
Ennosigaeon/xautoml
6e49ee8b2ffb6d19dcfd9cbe8b3397416c9b5ded
[ "BSD-3-Clause" ]
4
2022-02-27T08:54:08.000Z
2022-03-30T21:19:29.000Z
evaluation/evaluation.py
Ennosigaeon/xautoml
6e49ee8b2ffb6d19dcfd9cbe8b3397416c9b5ded
[ "BSD-3-Clause" ]
1
2022-02-28T09:41:00.000Z
2022-03-02T07:44:17.000Z
evaluation/evaluation.py
Ennosigaeon/xautoml
6e49ee8b2ffb6d19dcfd9cbe8b3397416c9b5ded
[ "BSD-3-Clause" ]
2
2022-03-01T00:38:09.000Z
2022-03-21T09:38:49.000Z
import math import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns from scipy.stats import ttest_ind from sklearn.preprocessing import LabelEncoder questionnaire, requirements, tasks = load_data() print_visual_design(index(questionnaire, slice(27, 32))) print_previous_knowledge(index(questionnaire, slice(6, 11))) calculate_sus(index(questionnaire, slice(32, 42))) plot_priority_distribution(requirements) calculate_task_success(tasks) calculate_trust_result(index(questionnaire, slice(14, 20)), index(questionnaire, slice(20, 26))) print('Correlation ML expertise and understanding of ML model') print(questionnaire.iloc[:, [6, 15]].corr())
35.763441
173
0.538785
d2b462f25f6094199e7adc2a1e6de5c3e66fd2f5
4,941
py
Python
matplotlib/tutorials_python/colors/colors.py
gottaegbert/penter
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
[ "MIT" ]
13
2020-01-04T07:37:38.000Z
2021-08-31T05:19:58.000Z
matplotlib/tutorials_python/colors/colors.py
gottaegbert/penter
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
[ "MIT" ]
3
2020-06-05T22:42:53.000Z
2020-08-24T07:18:54.000Z
matplotlib/tutorials_python/colors/colors.py
gottaegbert/penter
8cbb6be3c4bf67c7c69fa70e597bfbc3be4f0a2d
[ "MIT" ]
9
2020-10-19T04:53:06.000Z
2021-08-31T05:20:01.000Z
""" ***************** Specifying Colors ***************** Matplotlib recognizes the following formats to specify a color: * an RGB or RGBA (red, green, blue, alpha) tuple of float values in closed interval ``[0, 1]`` (e.g., ``(0.1, 0.2, 0.5)`` or ``(0.1, 0.2, 0.5, 0.3)``); * a hex RGB or RGBA string (e.g., ``'#0f0f0f'`` or ``'#0f0f0f80'``; case-insensitive); * a shorthand hex RGB or RGBA string, equivalent to the hex RGB or RGBA string obtained by duplicating each character, (e.g., ``'#abc'``, equivalent to ``'#aabbcc'``, or ``'#abcd'``, equivalent to ``'#aabbccdd'``; case-insensitive); * a string representation of a float value in ``[0, 1]`` inclusive for gray level (e.g., ``'0.5'``); * one of ``{'b', 'g', 'r', 'c', 'm', 'y', 'k', 'w'}``, they are the single character short-hand notations for blue, green, red, cyan, magenta, yellow, black, and white. * a X11/CSS4 color name (case-insensitive); * a name from the `xkcd color survey`_, prefixed with ``'xkcd:'`` (e.g., ``'xkcd:sky blue'``; case insensitive); * one of the Tableau Colors from the 'T10' categorical palette (the default color cycle): ``{'tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan'}`` (case-insensitive); * a "CN" color spec, i.e. ``'C'`` followed by a number, which is an index into the default property cycle (``matplotlib.rcParams['axes.prop_cycle']``); the indexing is intended to occur at rendering time, and defaults to black if the cycle does not include color. .. _xkcd color survey: https://xkcd.com/color/rgb/ "Red", "Green", and "Blue" are the intensities of those colors, the combination of which span the colorspace. How "Alpha" behaves depends on the ``zorder`` of the Artist. Higher ``zorder`` Artists are drawn on top of lower Artists, and "Alpha" determines whether the lower artist is covered by the higher. If the old RGB of a pixel is ``RGBold`` and the RGB of the pixel of the Artist being added is ``RGBnew`` with Alpha ``alpha``, then the RGB of the pixel is updated to: ``RGB = RGBOld * (1 - Alpha) + RGBnew * Alpha``. Alpha of 1 means the old color is completely covered by the new Artist, Alpha of 0 means that pixel of the Artist is transparent. For more information on colors in matplotlib see * the :doc:`/gallery/color/color_demo` example; * the `matplotlib.colors` API; * the :doc:`/gallery/color/named_colors` example. "CN" color selection -------------------- "CN" colors are converted to RGBA as soon as the artist is created. For example, """ import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl th = np.linspace(0, 2*np.pi, 128) demo('default') demo('seaborn') ############################################################################### # will use the first color for the title and then plot using the second # and third colors of each style's ``mpl.rcParams['axes.prop_cycle']``. # # # .. _xkcd-colors: # # xkcd v X11/CSS4 # --------------- # # The xkcd colors are derived from a user survey conducted by the # webcomic xkcd. `Details of the survey are available on the xkcd blog # <https://blog.xkcd.com/2010/05/03/color-survey-results/>`__. # # Out of 148 colors in the CSS color list, there are 95 name collisions # between the X11/CSS4 names and the xkcd names, all but 3 of which have # different hex values. For example ``'blue'`` maps to ``'#0000FF'`` # where as ``'xkcd:blue'`` maps to ``'#0343DF'``. Due to these name # collisions all of the xkcd colors have ``'xkcd:'`` prefixed. As noted in # the blog post, while it might be interesting to re-define the X11/CSS4 names # based on such a survey, we do not do so unilaterally. # # The name collisions are shown in the table below; the color names # where the hex values agree are shown in bold. import matplotlib._color_data as mcd import matplotlib.patches as mpatch overlap = {name for name in mcd.CSS4_COLORS if "xkcd:" + name in mcd.XKCD_COLORS} fig = plt.figure(figsize=[4.8, 16]) ax = fig.add_axes([0, 0, 1, 1]) for j, n in enumerate(sorted(overlap, reverse=True)): weight = None cn = mcd.CSS4_COLORS[n] xkcd = mcd.XKCD_COLORS["xkcd:" + n].upper() if cn == xkcd: weight = 'bold' r1 = mpatch.Rectangle((0, j), 1, 1, color=cn) r2 = mpatch.Rectangle((1, j), 1, 1, color=xkcd) txt = ax.text(2, j+.5, ' ' + n, va='center', fontsize=10, weight=weight) ax.add_patch(r1) ax.add_patch(r2) ax.axhline(j, color='k') ax.text(.5, j + 1.5, 'X11', ha='center', va='center') ax.text(1.5, j + 1.5, 'xkcd', ha='center', va='center') ax.set_xlim(0, 3) ax.set_ylim(0, j + 2) ax.axis('off')
36.330882
79
0.646225
d2b6b250831a7174cf7989d9fc42a91268a025cd
1,313
py
Python
12-listComprehensions.py
pgiardiniere/notes-WhirlwindTourOfPython
10f483ea4452f0a45f2103886992fd77c2f3ac7c
[ "CC0-1.0" ]
null
null
null
12-listComprehensions.py
pgiardiniere/notes-WhirlwindTourOfPython
10f483ea4452f0a45f2103886992fd77c2f3ac7c
[ "CC0-1.0" ]
null
null
null
12-listComprehensions.py
pgiardiniere/notes-WhirlwindTourOfPython
10f483ea4452f0a45f2103886992fd77c2f3ac7c
[ "CC0-1.0" ]
null
null
null
# List Comprehensions ######################### ### Basic List Comprehensions ######################### # allow us to circumvent constructing lists with for loops l = [] # The Old Way for n in range(12): l.append(n**2) [n ** 2 for n in range(12)] # Comprehension way # General Syntax: # [ `expr` for `var` in `iterable` ] ### Multiple iteration --- use tuples! [(i, j) for i in range(2) for j in range(3)] ### Conditionals on the Iterator [i for i in range(20) if i % 3 > 0] #S={i|0<=i<20, 3!|i, iI} l = [] # equivalent old-school construction: for val in range(20): if val % 3: l.append(val) ### Conditionals on the Value # C code :: single-line conditional operator ? # int absval = (val < 0) ? -val : val # Python code :: single-line conditional operator if-else val = -10 val if val >= 0 else -val # if 3 !| val -> val in list. # if 2 | val -> -val. [val if val % 2 else -val for val in range(20) if val % 3] ######################### ### Other comprehensions ######################### { n**2 for n in range(12) } # Set comprehension { n:n**2 for n in range(12) } # Dict comprehension { a % 3 for a in range(1000) } # a = {0, 1, 2} # GENERATOR EXPRESSION ---- see next chapter for deets ( n**2 for n in range(12) )
26.795918
61
0.545316
d2b6cbdba4cdbf4de3ed032d08f889932f594f92
1,515
py
Python
src/chemical_roles/export/cli.py
bgyori/chemical-roles
31a917e911075e3be7eea509e143d3ff48e942cc
[ "MIT" ]
5
2021-02-05T01:27:53.000Z
2021-07-12T15:47:08.000Z
src/chemical_roles/export/cli.py
bgyori/chemical-roles
31a917e911075e3be7eea509e143d3ff48e942cc
[ "MIT" ]
8
2019-10-10T13:02:18.000Z
2020-05-11T18:41:56.000Z
src/chemical_roles/export/cli.py
bgyori/chemical-roles
31a917e911075e3be7eea509e143d3ff48e942cc
[ "MIT" ]
5
2020-06-07T13:11:34.000Z
2021-07-12T14:24:01.000Z
# -*- coding: utf-8 -*- """CLI for Chemical Roles exporters.""" import os import click from ..constants import DATA directory_option = click.option('--directory', default=DATA) if __name__ == '__main__': export()
21.041667
106
0.684488
d2b7475246a09fa72d42e65c0defb8588ba3890e
4,681
py
Python
gdsfactory/geometry/write_drc.py
jorgepadilla19/gdsfactory
68e1c18257a75d4418279851baea417c8899a165
[ "MIT" ]
42
2020-05-25T09:33:45.000Z
2022-03-29T03:41:19.000Z
gdsfactory/geometry/write_drc.py
jorgepadilla19/gdsfactory
68e1c18257a75d4418279851baea417c8899a165
[ "MIT" ]
133
2020-05-28T18:29:04.000Z
2022-03-31T22:21:42.000Z
gdsfactory/geometry/write_drc.py
jorgepadilla19/gdsfactory
68e1c18257a75d4418279851baea417c8899a165
[ "MIT" ]
17
2020-06-30T07:07:50.000Z
2022-03-17T15:45:27.000Z
"""Write DRC rule decks in klayout. TODO: - add min area - define derived layers (composed rules) """ import pathlib from dataclasses import asdict, is_dataclass from typing import List, Optional try: from typing import Literal except ImportError: from typing_extensions import Literal from gdsfactory.config import logger from gdsfactory.install import get_klayout_path from gdsfactory.types import Dict, Layer, PathType layer_name_to_min_width: Dict[str, float] RuleType = Literal[ "width", "space", "enclosing", ] def rule_width(value: float, layer: str, angle_limit: float = 90) -> str: """Min feature size""" category = "width" error = f"{layer} {category} {value}um" return ( f"{layer}.{category}({value}, angle_limit({angle_limit}))" f".output('{error}', '{error}')" ) def rule_space(value: float, layer: str, angle_limit: float = 90) -> str: """Min Space between shapes of layer""" category = "space" error = f"{layer} {category} {value}um" return ( f"{layer}.{category}({value}, angle_limit({angle_limit}))" f".output('{error}', '{error}')" ) def rule_separation(value: float, layer1: str, layer2: str): """Min space between different layers""" error = f"min {layer1} {layer2} separation {value}um" return f"{layer1}.separation({layer2}, {value})" f".output('{error}', '{error}')" def rule_enclosing( value: float, layer1: str, layer2: str, angle_limit: float = 90 ) -> str: """Layer1 must be enclosed by layer2 by value. checks if layer1 encloses (is bigger than) layer2 by value """ error = f"{layer1} enclosing {layer2} by {value}um" return ( f"{layer1}.enclosing({layer2}, angle_limit({angle_limit}), {value})" f".output('{error}', '{error}')" ) def write_layer_definition(layer_map: Dict[str, Layer]) -> str: """Returns layer_map definition script for klayout Args: layer_map: can be dict or dataclass """ layer_map = asdict(layer_map) if is_dataclass(layer_map) else layer_map return [ f"{key} = input({value[0]}, {value[1]})" for key, value in layer_map.items() ] def write_drc_deck(rules: List[str], layer_map: Dict[str, Layer]) -> str: """Returns drc_rule_deck for klayou Args: rules: list of rules layer_map: layer definitions can be dict or dataclass """ script = [] script += write_layer_definition(layer_map=layer_map) script += ["\n"] script += rules return "\n".join(script) def write_drc_deck_macro( name="generic", filepath: Optional[PathType] = None, shortcut: str = "Ctrl+Shift+D", **kwargs, ) -> str: """Write script for klayout rule deck Args: name: drc rule deck name filepath: Optional macro path (defaults to .klayout/drc/name.lydrc) Keyword Args: rules: list of rules layer_map: layer definitions can be dict or dataclass Keyword Args: rules: list of rules layer_map: layer definitions can be dict or dataclass """ script = f"""<?xml version="1.0" encoding="utf-8"?> <klayout-macro> <description>{name} DRC</description> <version/> <category>drc</category> <prolog/> <epilog/> <doc/> <autorun>false</autorun> <autorun-early>false</autorun-early> <shortcut>{shortcut}</shortcut> <show-in-menu>true</show-in-menu> <group-name>drc_scripts</group-name> <menu-path>tools_menu.drc.end</menu-path> <interpreter>dsl</interpreter> <dsl-interpreter-name>drc-dsl-xml</dsl-interpreter-name> <text># {name} DRC # Read about DRC scripts in the User Manual under "Design Rule Check (DRC)" # Based on SOEN pdk https://github.com/usnistgov/SOEN-PDK/tree/master/tech/OLMAC # http://klayout.de/doc/manual/drc_basic.html report("generic DRC") tiles(100) tile_borders(2) threads(3) """ script += write_drc_deck(**kwargs) script += """ </text> </klayout-macro> """ filepath = filepath or get_klayout_path() / "drc" / f"{name}.lydrc" filepath = pathlib.Path(filepath) filepath.write_text(script) logger.info(f"Wrote DRC deck to {filepath}") return script if __name__ == "__main__": import gdsfactory as gf rules = [ rule_width(layer="WG", value=0.2), rule_space(layer="WG", value=0.2), rule_width(layer="M1", value=1), rule_width(layer="M2", value=2), rule_space(layer="M2", value=2), rule_separation(layer1="HEATER", layer2="M1", value=1.0), rule_enclosing(layer1="M1", layer2="VIAC", value=0.2), ] drc_rule_deck = write_drc_deck_macro(rules=rules, layer_map=gf.LAYER) print(drc_rule_deck)
26.902299
85
0.654134
d2b75bb3697ff16713aa871c5e493e77fa916f5c
1,620
py
Python
virtus/core/migrations/0004_auto_20180417_1625.py
eltonjncorreia/gerenciar-dados-virtus
b8e1b8caa152b18221046f6841761d805b232268
[ "MIT" ]
null
null
null
virtus/core/migrations/0004_auto_20180417_1625.py
eltonjncorreia/gerenciar-dados-virtus
b8e1b8caa152b18221046f6841761d805b232268
[ "MIT" ]
null
null
null
virtus/core/migrations/0004_auto_20180417_1625.py
eltonjncorreia/gerenciar-dados-virtus
b8e1b8caa152b18221046f6841761d805b232268
[ "MIT" ]
null
null
null
# Generated by Django 2.0.4 on 2018-04-17 19:25 from django.db import migrations, models
38.571429
114
0.569753
d2b7ebb7c7ccc1338b94c19d7637e3ceac872b46
2,173
py
Python
image_demo.py
a888999a/yolov3fusion1
3659898aee34a351e95ea545236b8bc682901498
[ "MIT" ]
7
2020-09-23T10:37:17.000Z
2021-12-26T00:23:02.000Z
image_demo.py
a888999a/yolov3fusion1
3659898aee34a351e95ea545236b8bc682901498
[ "MIT" ]
null
null
null
image_demo.py
a888999a/yolov3fusion1
3659898aee34a351e95ea545236b8bc682901498
[ "MIT" ]
null
null
null
#! /usr/bin/env python # coding=utf-8 #================================================================ # Copyright (C) 2019 * Ltd. All rights reserved. # # Editor : VIM # File name : image_demo.py # Author : YunYang1994 # Created date: 2019-01-20 16:06:06 # Description : # #================================================================ import cv2 import numpy as np import core.utils as utils import tensorflow as tf from PIL import Image return_elements = ["input/input_rgb:0","input/input_lwir:0", "pred_sbbox/concat_2:0", "pred_mbbox/concat_2:0", "pred_lbbox/concat_2:0"] pb_file = "./yolov3_coco.pb" image_path_rgb = r"C:\Users\gary\Desktop\b09\test\JPEGImages\rgb\set06_V000_I00019.jpg" image_path_lwir = r"C:\Users\gary\Desktop\b09\test\JPEGImages\lwir\set06_V000_I00019.jpg" num_classes = 1 input_size = 416 graph = tf.Graph() original_rgb = cv2.imread(image_path_rgb) original_lwir = cv2.imread(image_path_lwir) original_image_rgb = cv2.cvtColor(original_rgb, cv2.COLOR_BGR2RGB) original_image_lwir = cv2.cvtColor(original_lwir, cv2.COLOR_BGR2RGB) original_image_size = original_image_rgb.shape[:2] image_rgb,image_lwir = utils.image_preporcess(np.copy(original_image_rgb),np.copy(original_image_lwir), [input_size, input_size]) image_rgb = image_rgb[np.newaxis, ...] image_lwir = image_lwir[np.newaxis, ...] return_tensors = utils.read_pb_return_tensors(graph, pb_file, return_elements) with tf.Session(graph=graph) as sess: pred_sbbox, pred_mbbox, pred_lbbox = sess.run( [return_tensors[2], return_tensors[3], return_tensors[4]], feed_dict={ return_tensors[0]: image_rgb,return_tensors[1]: image_lwir}) pred_bbox = np.concatenate([np.reshape(pred_sbbox, (-1, 5 + num_classes)), np.reshape(pred_mbbox, (-1, 5 + num_classes)), np.reshape(pred_lbbox, (-1, 5 + num_classes))], axis=0) bboxes = utils.postprocess_boxes(pred_bbox, original_image_size, input_size, 0.3) bboxes = utils.nms(bboxes, 0.45, method='nms') image = utils.draw_bbox(original_image_rgb, bboxes) image = Image.fromarray(image) image.show()
35.048387
135
0.673263
d2b930c9508039d505766f1d70318392c9baf277
7,090
py
Python
Sensor/main.py
mahsahadian/EdgeBenchmarkTool
cafddb2eb66732da0bff8f26107788e3c93fbe2f
[ "MIT" ]
null
null
null
Sensor/main.py
mahsahadian/EdgeBenchmarkTool
cafddb2eb66732da0bff8f26107788e3c93fbe2f
[ "MIT" ]
null
null
null
Sensor/main.py
mahsahadian/EdgeBenchmarkTool
cafddb2eb66732da0bff8f26107788e3c93fbe2f
[ "MIT" ]
2
2022-01-31T01:55:56.000Z
2022-02-01T01:43:20.000Z
import cv2 from datetime import * import time import logging import base64 import sys import os import shutil import paho.mqtt.client as mqtt from influxdb import InfluxDBClient import datetime import sys import re from typing import NamedTuple import json from dotenv import load_dotenv load_dotenv("sensor-variables.env") log = logging.getLogger() log.setLevel('DEBUG') handler = logging.StreamHandler() handler.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s")) log.addHandler(handler) logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') logger = logging.getLogger(__name__) print('Hello 1') def on_connect(client, userdata, flags, rc): """ The callback for when the client receives a CONNACK response from the server.""" print('Connected with result code ' + str(rc)) client.subscribe('topic') # The callback for when a PUBLISH message is received from the server. camera_id = os.getenv('CAMERA_ID') # sys.argv[1] # 123 destination_cluster_ip = os.getenv('DESTINATION_CLUSTER_IP') #sys.argv[2] # '132.207.170.59' JPGQuality = os.getenv('JPGQUALITY')#int(sys.argv[3] ) # 20 transmitdelay = os.getenv('TRANSMITDELAY') # int(sys.argv[4]) # 10 check_looping = 0 INFLUXDB_DATABASE = os.getenv('INFLUXDB_DATABASE_NAME') influx_client = InfluxDBClient(os.getenv('INFLUXDB_DATABASE_IP'), os.getenv('INFLUXDB_DATABASE_PORT'), database=INFLUXDB_DATABASE) _init_influxdb_database() #while True: camera = Camera(camera_id, destination_cluster_ip, JPGQuality, transmitdelay, './imagesout') camera.processVideoStream()
33.130841
146
0.629478
d2b975627d7b7c61820ad7bec967dad5b7b1e8aa
4,511
py
Python
oxide/plugins/other/StartupItems.py
john-clark/rust-oxide-umod
56feca04f96d8a43a1b56e080fc81d526f7471c3
[ "MIT" ]
13
2019-05-13T08:03:50.000Z
2022-02-06T16:44:35.000Z
oxide/plugins/other/StartupItems.py
john-clark/rust-oxide-umod
56feca04f96d8a43a1b56e080fc81d526f7471c3
[ "MIT" ]
null
null
null
oxide/plugins/other/StartupItems.py
john-clark/rust-oxide-umod
56feca04f96d8a43a1b56e080fc81d526f7471c3
[ "MIT" ]
8
2019-12-12T15:48:03.000Z
2021-12-24T17:04:45.000Z
# Note: # I add an underscore at the biginning of the variable name for example: "_variable" to prevent # conflicts with build-in variables from Oxide. # Use to manage the player's inventory. import ItemManager # Use to get player's information. import BasePlayer # The plug-in name should be the same as the class name and file name.
51.261364
153
0.570162
d2bbabe21477b77848cbfcaba239a66c8fe04262
1,043
py
Python
error_handler.py
jrg1381/sm_asr_console
47c4090075deaaa7f58e9a092423a58bc7b0a30f
[ "MIT" ]
2
2019-08-07T11:08:06.000Z
2021-01-20T11:28:37.000Z
error_handler.py
jrg1381/sm_asr_console
47c4090075deaaa7f58e9a092423a58bc7b0a30f
[ "MIT" ]
null
null
null
error_handler.py
jrg1381/sm_asr_console
47c4090075deaaa7f58e9a092423a58bc7b0a30f
[ "MIT" ]
null
null
null
# encoding: utf-8 """ Parameterized decorator for catching errors and displaying them in an error popup """ from enum import Enum import npyscreen # PythonDecorators/decorator_function_with_arguments.py def error_handler(title, dialog_type=DialogType.CONFIRM): """ Decorator for functions to catch their exceptions and display them in an error popup :param title The title of the error pop-up :param dialog_type A DialogType enum """ return wrap
29.8
89
0.681687
d2bbf8bdae1a8922b42a68b17b2aafcf8fd38f67
13,043
py
Python
parlai/tasks/taskmaster2/agents.py
min942773/parlai_wandb
1d9ba1a0df2199d0247cee8c4929a2598ac7e41a
[ "MIT" ]
2
2017-09-20T21:49:51.000Z
2018-08-12T06:58:10.000Z
parlai/tasks/taskmaster2/agents.py
min942773/parlai_wandb
1d9ba1a0df2199d0247cee8c4929a2598ac7e41a
[ "MIT" ]
7
2021-01-12T01:07:03.000Z
2022-03-12T00:50:45.000Z
parlai/tasks/taskmaster2/agents.py
min942773/parlai_wandb
1d9ba1a0df2199d0247cee8c4929a2598ac7e41a
[ "MIT" ]
1
2021-01-07T11:45:03.000Z
2021-01-07T11:45:03.000Z
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Taskmaster-2 implementation for ParlAI. No official train/valid/test splits are available as of 2020-05-18, so we make our own splits. """ import os import pandas as pd import hashlib from collections import Counter from parlai.core.opt import Opt from parlai.core.teachers import DialogTeacher from parlai.core.metrics import AverageMetric, F1Metric, BleuMetric from parlai.utils.misc import warn_once import json import parlai.utils.logging as logging from typing import Optional, Tuple from parlai.core.message import Message from parlai.utils.io import PathManager import parlai.tasks.taskmaster2.build as build_ DOMAINS = [ 'flights', 'food-ordering', 'hotels', 'movies', 'restaurant-search', 'sports', 'music', ] ONTO_TOKEN = "Onto:" CALL_TOKEN = "Call:" RESP_TOKEN = "Result:"
35.734247
86
0.521966
d2bc823500d7e835a13076bd5554f0f404893ff4
243
py
Python
jmeter_api/timers/__init__.py
dashawn888/jmeter_api
1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd
[ "Apache-2.0" ]
11
2020-03-22T13:30:21.000Z
2021-12-25T06:23:44.000Z
jmeter_api/timers/__init__.py
dashawn888/jmeter_api
1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd
[ "Apache-2.0" ]
2
2020-03-23T00:06:42.000Z
2021-02-24T21:41:40.000Z
jmeter_api/timers/__init__.py
dashawn888/jmeter_api
1ab5b02f3a7c8ad1b84fc50db4fe1fc2fa7c91bd
[ "Apache-2.0" ]
3
2020-11-09T14:14:25.000Z
2021-05-27T02:54:38.000Z
from jmeter_api.timers.constant_throughput_timer.elements import ConstantThroughputTimer, BasedOn from jmeter_api.timers.constant_timer.elements import ConstantTimer from jmeter_api.timers.uniform_random_timer.elements import UniformRandTimer
60.75
97
0.90535
d2bd972bab298994d41d91b8c6a75e48470ccec5
2,520
py
Python
tensorfn/distributed/launch.py
rosinality/tensorfn
cd410c5e6f6906d223f740501e711b9cfae260e4
[ "Apache-2.0" ]
13
2021-04-08T03:09:42.000Z
2022-03-18T08:27:17.000Z
tensorfn/distributed/launch.py
rosinality/tensorfn
cd410c5e6f6906d223f740501e711b9cfae260e4
[ "Apache-2.0" ]
2
2020-08-16T20:25:34.000Z
2021-07-13T00:35:52.000Z
tensorfn/distributed/launch.py
rosinality/tensorfn
cd410c5e6f6906d223f740501e711b9cfae260e4
[ "Apache-2.0" ]
null
null
null
import os import torch from torch import distributed as dist from torch import multiprocessing as mp from tensorfn import distributed as dist_fn
27.096774
101
0.636508
d2bffe6b8d76be452fc84a9fa325b868d681f43c
4,097
py
Python
VideoStitchingSubsystem/StereoCameraAPIs/MonoLensStream.py
AriaPahlavan/see-through-adas-core
7cc530243d324aecd9db538883bb77ee2d519661
[ "Apache-2.0" ]
null
null
null
VideoStitchingSubsystem/StereoCameraAPIs/MonoLensStream.py
AriaPahlavan/see-through-adas-core
7cc530243d324aecd9db538883bb77ee2d519661
[ "Apache-2.0" ]
null
null
null
VideoStitchingSubsystem/StereoCameraAPIs/MonoLensStream.py
AriaPahlavan/see-through-adas-core
7cc530243d324aecd9db538883bb77ee2d519661
[ "Apache-2.0" ]
null
null
null
from enum import Enum from threading import Thread import cv2 import time
28.255172
107
0.573102
d2c143baf7ea1e8434d64873e45800bbd43dfe04
444
py
Python
sdk/python/approzium/mysql/connector/pooling.py
UpGado/approzium
306b40f16a1ba0dfbe3a312e1c40881e98518137
[ "Apache-2.0" ]
59
2020-07-14T17:18:09.000Z
2022-02-24T07:39:22.000Z
sdk/python/approzium/mysql/connector/pooling.py
UpGado/approzium
306b40f16a1ba0dfbe3a312e1c40881e98518137
[ "Apache-2.0" ]
66
2020-07-09T19:11:55.000Z
2022-03-15T11:42:55.000Z
sdk/python/approzium/mysql/connector/pooling.py
UpGado/approzium
306b40f16a1ba0dfbe3a312e1c40881e98518137
[ "Apache-2.0" ]
9
2020-07-09T19:20:45.000Z
2022-02-24T07:39:26.000Z
from mysql.connector.pooling import MySQLConnectionPool from ._connect import _parse_kwargs, _patch_MySQLConnection
31.714286
61
0.75
d2c30d506f338f0ad2e0b0a0c5af2f47676aea3a
267
py
Python
setup.py
Faust-Wang/vswarm
d18ce643218c18ef1e762f40562104b2a0926ad7
[ "MIT" ]
21
2021-03-03T10:51:46.000Z
2022-03-28T11:00:35.000Z
setup.py
Faust-Wang/vswarm
d18ce643218c18ef1e762f40562104b2a0926ad7
[ "MIT" ]
2
2021-07-21T07:57:16.000Z
2022-03-17T12:41:51.000Z
setup.py
hvourtsis/vswarm
d18ce643218c18ef1e762f40562104b2a0926ad7
[ "MIT" ]
8
2021-02-27T14:29:55.000Z
2022-01-05T19:40:38.000Z
# Do not manually invoke this setup.py, use catkin instead! from setuptools import setup from catkin_pkg.python_setup import generate_distutils_setup setup_args = generate_distutils_setup( packages=['vswarm'], package_dir={'': 'src'} ) setup(**setup_args)
22.25
60
0.764045
d2c38a755a40c6e19281f0cc94b831f228ba7f94
250
py
Python
实例学习Numpy与Matplotlib/创建 numpy.array.py
shao1chuan/pythonbook
cd9877d04e1e11422d38cc051e368d3d9ce2ab45
[ "MulanPSL-1.0" ]
95
2020-10-11T04:45:46.000Z
2022-02-25T01:50:40.000Z
实例学习Numpy与Matplotlib/创建 numpy.array.py
shao1chuan/pythonbook
cd9877d04e1e11422d38cc051e368d3d9ce2ab45
[ "MulanPSL-1.0" ]
null
null
null
实例学习Numpy与Matplotlib/创建 numpy.array.py
shao1chuan/pythonbook
cd9877d04e1e11422d38cc051e368d3d9ce2ab45
[ "MulanPSL-1.0" ]
30
2020-11-05T09:01:00.000Z
2022-03-08T05:58:55.000Z
import numpy as np nparr = np.array([i for i in range(10)]) a = np.zeros(10) f = np.zeros(10,dtype=float) n = np.full((3,5),44) r = np.random.randint(0,100,size=(3,5)) r2 = np.random.random((3,5)) x = np.linspace(0,100,50) print(nparr,a,f,n,r,r2,x)
22.727273
40
0.64
d2c38e45f035250f5b56f9b05cf87de9978e93b9
4,790
py
Python
examples/DecryptLoginExamples/crawlers/weibomonitor/weibomonitor.py
hedou/DecryptLogin
ff86a5d378c8a42d1caebbb7482658a95053f716
[ "Apache-2.0" ]
null
null
null
examples/DecryptLoginExamples/crawlers/weibomonitor/weibomonitor.py
hedou/DecryptLogin
ff86a5d378c8a42d1caebbb7482658a95053f716
[ "Apache-2.0" ]
null
null
null
examples/DecryptLoginExamples/crawlers/weibomonitor/weibomonitor.py
hedou/DecryptLogin
ff86a5d378c8a42d1caebbb7482658a95053f716
[ "Apache-2.0" ]
null
null
null
''' Function: Author: Charles : Charles ''' import re import time from DecryptLogin import login ''''''
43.153153
161
0.571816
d2c3e3e6ef11ddd684a0bcebf23085d7e1d9152c
1,191
py
Python
crawlai/items/critter/base_critter.py
apockill/CreepyCrawlAI
2862c03e686801884ffb579a7be29f3c9d0da610
[ "MIT" ]
13
2020-05-04T03:11:26.000Z
2021-12-05T03:57:45.000Z
crawlai/items/critter/base_critter.py
apockill/CreepyCrawlAI
2862c03e686801884ffb579a7be29f3c9d0da610
[ "MIT" ]
null
null
null
crawlai/items/critter/base_critter.py
apockill/CreepyCrawlAI
2862c03e686801884ffb579a7be29f3c9d0da610
[ "MIT" ]
null
null
null
from godot.bindings import ResourceLoader from crawlai.grid_item import GridItem from crawlai.items.food import Food from crawlai.math_utils import clamp from crawlai.turn import Turn from crawlai.position import Position _critter_resource = ResourceLoader.load("res://Game/Critter/Critter.tscn")
24.306122
74
0.715365
d2c4507ff5f2b0e60108a433da49147fd8f6e6c4
3,008
py
Python
exercises/networking_selfpaced/networking-workshop/collections/ansible_collections/community/general/plugins/doc_fragments/nios.py
tr3ck3r/linklight
5060f624c235ecf46cb62cefcc6bddc6bf8ca3e7
[ "MIT" ]
17
2017-06-07T23:15:01.000Z
2021-08-30T14:32:36.000Z
ansible/ansible/plugins/doc_fragments/nios.py
SergeyCherepanov/ansible
875711cd2fd6b783c812241c2ed7a954bf6f670f
[ "MIT" ]
9
2017-06-25T03:31:52.000Z
2021-05-17T23:43:12.000Z
ansible/ansible/plugins/doc_fragments/nios.py
SergeyCherepanov/ansible
875711cd2fd6b783c812241c2ed7a954bf6f670f
[ "MIT" ]
3
2018-05-26T21:31:22.000Z
2019-09-28T17:00:45.000Z
# -*- coding: utf-8 -*- # Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
35.809524
104
0.635306
d2c4dfb8a30f8c36fa075d277e4458a4776a5ca8
25,299
py
Python
torchrec/metrics/rec_metric.py
xing-liu/torchrec
82ffde7a69fdb9c66b79a753d6f03afa5db3f73e
[ "BSD-3-Clause" ]
814
2022-02-23T17:24:14.000Z
2022-03-31T16:52:23.000Z
torchrec/metrics/rec_metric.py
xing-liu/torchrec
82ffde7a69fdb9c66b79a753d6f03afa5db3f73e
[ "BSD-3-Clause" ]
89
2022-02-23T17:29:56.000Z
2022-03-31T23:44:13.000Z
torchrec/metrics/rec_metric.py
xing-liu/torchrec
82ffde7a69fdb9c66b79a753d6f03afa5db3f73e
[ "BSD-3-Clause" ]
68
2022-02-23T17:42:17.000Z
2022-03-28T06:39:55.000Z
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. #!/usr/bin/env python3 import abc import math from collections import defaultdict, deque from dataclasses import dataclass from enum import Enum from typing import ( Any, Callable, cast, Deque, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Type, TypeVar, Union, ) import torch import torch.distributed as dist import torch.nn as nn from torchmetrics import Metric from torchrec.metrics.metrics_config import RecComputeMode, RecTaskInfo from torchrec.metrics.metrics_namespace import ( compose_metric_key, MetricNameBase, MetricNamespaceBase, MetricPrefix, ) RecModelOutput = Union[torch.Tensor, Dict[str, torch.Tensor]] DefaultValueT = TypeVar("DefaultValueT") ComputeIterType = Iterator[ Tuple[RecTaskInfo, MetricNameBase, torch.Tensor, MetricPrefix] ] MAX_BUFFER_COUNT = 1000 def compute(self) -> List[MetricComputationReport]: if self._my_rank == 0 or self._compute_on_all_ranks: return self._compute() else: return [] def local_compute(self) -> List[MetricComputationReport]: return self._compute() class RecMetric(nn.Module, abc.ABC): r"""The main class template to implement a recommendation metric. This class contains the recommendation tasks information (RecTaskInfo) and the actual computation object (RecMetricComputation). RecMetric processes all the information related to RecTaskInfo and models and pass the required signals to the computation object, allowing the implementation of RecMetricComputation to focus on the mathemetical meaning. A new metric that inherit RecMetric must override the following attributes in its own __init__(): `_namespace` and `_metrics_computations`. No other methods should be overridden. Args: world_size (int): the number of trainers. my_rank (int): the rank of this trainer. batch_size (int): batch size used by this trainer. tasks (List[RecTaskInfo]): the information of the model tasks. compute_mode (RecComputeMode): the computation mode. See RecComputeMode. window_size (int): the window size for the window metric. fused_update_limit (int): the maximum number of updates to be fused. compute_on_all_ranks (bool): whether to compute metrics on all ranks. This is necessary if non-leader rank want to consume global metrics result. process_group (Optional[ProcessGroup]): the process group used for the communication. Will use the default process group if not specified. Call Args: Not supported. Returns: Not supported. Example:: ne = NEMetric( world_size=4, my_rank=0, batch_size=128, tasks=DefaultTaskInfo, ) """ _computation_class: Type[RecMetricComputation] _namespace: MetricNamespaceBase _metrics_computations: nn.ModuleList _tasks: List[RecTaskInfo] _window_size: int _tasks_iter: Callable[[str], ComputeIterType] _update_buffers: Dict[str, List[RecModelOutput]] _default_weights: Dict[Tuple[int, ...], torch.Tensor] PREDICTIONS: str = "predictions" LABELS: str = "labels" WEIGHTS: str = "weights" # TODO(stellaya): Refactor the _[fused, unfused]_tasks_iter methods and replace the # compute_scope str input with an enum def _fuse_update_buffers(self) -> Dict[str, RecModelOutput]: ret: Dict[str, RecModelOutput] = {} for key, output_list in self._update_buffers.items(): if len(output_list) > 0: ret[key] = fuse(output_list) else: assert key == self.WEIGHTS output_list.clear() return ret def _check_fused_update(self, force: bool) -> None: if self._fused_update_limit <= 0: return if len(self._update_buffers[self.PREDICTIONS]) == 0: return if ( not force and len(self._update_buffers[self.PREDICTIONS]) < self._fused_update_limit ): return fused_arguments = self._fuse_update_buffers() self._update( predictions=fused_arguments[self.PREDICTIONS], labels=fused_arguments[self.LABELS], weights=fused_arguments.get(self.WEIGHTS, None), ) def _create_default_weights(self, predictions: torch.Tensor) -> torch.Tensor: weights = self._default_weights.get(predictions.size(), None) if weights is None: weights = torch.ones_like(predictions) self._default_weights[predictions.size()] = weights return weights def _check_nonempty_weights(self, weights: torch.Tensor) -> torch.Tensor: return torch.gt(torch.count_nonzero(weights, dim=-1), 0) def _update( self, *, predictions: RecModelOutput, labels: RecModelOutput, weights: Optional[RecModelOutput], ) -> None: with torch.no_grad(): if self._compute_mode == RecComputeMode.FUSED_TASKS_COMPUTATION: assert isinstance(predictions, torch.Tensor) # Reshape the predictions to size([len(self._tasks), self._batch_size]) predictions = predictions.view(-1, self._batch_size) assert isinstance(labels, torch.Tensor) labels = labels.view(-1, self._batch_size) if weights is None: weights = self._create_default_weights(predictions) else: assert isinstance(weights, torch.Tensor) weights = weights.view(-1, self._batch_size) # has_valid_weights is a tensor of bool whose length equals to the number # of tasks. Each value in it is corresponding to whether the weights # are valid, i.e. are set to non-zero values for that task in this update. # If has_valid_weights are Falses for all the tasks, we just ignore this # update. has_valid_weights = self._check_nonempty_weights(weights) if torch.any(has_valid_weights): self._metrics_computations[0].update( predictions=predictions, labels=labels, weights=weights ) self._metrics_computations[0].has_valid_update.logical_or_( has_valid_weights ).byte() else: for task, metric_ in zip(self._tasks, self._metrics_computations): if task.name not in predictions: continue if torch.numel(predictions[task.name]) == 0: assert torch.numel(labels[task.name]) == 0 assert weights is None or torch.numel(weights[task.name]) == 0 continue # Reshape the predictions to size([1, self._batch_size]) task_predictions = predictions[task.name].view(1, -1) task_labels = labels[task.name].view(1, -1) if weights is None: task_weights = self._create_default_weights(task_predictions) else: task_weights = weights[task.name].view(1, -1) # has_valid_weights is a tensor with only 1 value corresponding to # whether the weights are valid, i.e. are set to non-zero values for # the task in this update. # If has_valid_update[0] is False, we just ignore this update. has_valid_weights = self._check_nonempty_weights(task_weights) if has_valid_weights[0]: metric_.update( predictions=task_predictions, labels=task_labels, weights=task_weights, ) metric_.has_valid_update.logical_or_(has_valid_weights).byte() def update( self, *, predictions: RecModelOutput, labels: RecModelOutput, weights: Optional[RecModelOutput], ) -> None: if self._fused_update_limit > 0: self._update_buffers[self.PREDICTIONS].append(predictions) self._update_buffers[self.LABELS].append(labels) if weights is not None: self._update_buffers[self.WEIGHTS].append(weights) self._check_fused_update(force=False) else: self._update(predictions=predictions, labels=labels, weights=weights) # The implementation of compute is very similar to local_compute, but compute overwrites # the abstract method compute in torchmetrics.Metric, which is wrapped by _wrap_compute def get_memory_usage(self) -> Dict[torch.Tensor, int]: r"""Estimates the memory of the rec metric instance's underlying tensors; returns the map of tensor to size """ tensor_map = {} attributes_q = deque(self.__dict__.values()) while attributes_q: attribute = attributes_q.popleft() if isinstance(attribute, torch.Tensor): tensor_map[attribute] = ( attribute.size().numel() * attribute.element_size() ) elif isinstance(attribute, WindowBuffer): attributes_q.extend(attribute.buffers) elif isinstance(attribute, Mapping): attributes_q.extend(attribute.values()) elif isinstance(attribute, Sequence) and not isinstance(attribute, str): attributes_q.extend(attribute) elif hasattr(attribute, "__dict__") and not isinstance(attribute, Enum): attributes_q.extend(attribute.__dict__.values()) return tensor_map # pyre-fixme[14]: `state_dict` overrides method defined in `Module` inconsistently. class RecMetricList(nn.Module): """ A list module to encapulate multiple RecMetric instances and provide the same interfaces as RecMetric. Args: rec_metrics (List[RecMetric]: the list of the input RecMetrics. Call Args: Not supported. Returns: Not supported. Example:: ne = NEMetric( world_size=4, my_rank=0, batch_size=128, tasks=DefaultTaskInfo ) metrics = RecMetricList([ne]) """ rec_metrics: nn.ModuleList
38.331818
117
0.616072
d2c55dd79284c9bf304a1f86538b6964cbb89f09
7,594
py
Python
alison.py
johanhoiness/SlothBot
556f9e0f67aa90543bd98889b06a4b939e30450d
[ "MIT" ]
1
2017-06-28T09:24:49.000Z
2017-06-28T09:24:49.000Z
alison.py
johanhoiness/SlothBot
556f9e0f67aa90543bd98889b06a4b939e30450d
[ "MIT" ]
null
null
null
alison.py
johanhoiness/SlothBot
556f9e0f67aa90543bd98889b06a4b939e30450d
[ "MIT" ]
null
null
null
__author__ = 'JohnHiness' import sys import os import random import time import string import connection from time import strftime import ceq import json, urllib2 import thread args = sys.argv req_files = ['filegen.py', 'connection.py', 'commands.py', 'general.py', 'automatics.py'] for filename in req_files: if os.path.exists(filename) == False: print "Required file \"{}\" not found. Make sure you have acquired all files.".format(filename) sys.exit(1) import filegen if os.path.exists('config.py') == False: print 'No configuration-file found. Generating config.py' filegen.gen_config() python = sys.executable print str(python)+'||'+str(python)+'||'+ str(* sys.argv) os.execl(python, python, * sys.argv) if os.path.exists('revar.py') == False: print 'No reconfigurable file found. Generating revar.py' filegen.gen_revar() python = sys.executable print str(python)+'||'+str(python)+'||'+ str(* sys.argv) os.execl(python, python, * sys.argv) import config import revar import filegen import commands import general import automatics if not revar.channels: revar.channels = config.channel.replace(', ', ',').replace(' ', ',').split(',') if len(args) > 1: if args[1].lower() == 'reconfig' or args[1].lower() == 'config': answr = raw_input("This will have you regenerate the configuration file and all old configurations will be lost.\nAre you sure you want to do this?(y/n) ") while answr.lower() != 'y' or answr.lower() != 'n': answr = raw_input("You must use the letters Y or N to answer: ") if answr.lower() == 'y': filegen.gen_config() sys.exit(0) if answr.lower() == 'n': sys.exit(0) elif args[1].lower() == 'help': print "Usage: python alison.py <help | reconfig | >" sys.exit(0) else: print "Flag not recognized." sys.exit(1) if __name__ == '__main__': thread.start_new_thread(automatics.get_ftime, ()) connect(config.server, config.port) thread.start_new_thread(automatics.autoping, ()) thread.start_new_thread(automatics.autoweather, ()) thread.start_new_thread(automatics.checkpongs, ()) thread.start_new_thread(automatics.who_channel, ()) s = connection.s readbuffer = '' while True: readbuffer = readbuffer + s.recv(2048) temp = string.split(readbuffer, "\n") readbuffer = temp.pop() for rline in temp: rline = string.rstrip(rline) rline = string.split(rline) g = general if not server_responses(rline) and len(rline) > 3: msg = ' '.join(rline[3:])[1:] user = rline[0][1:][:rline[0].find('!')][:-1] chan = rline[2] if chan.lower() == revar.bot_nick.lower(): chan = user if config.verbose: print g.ftime + ' << ' + ' '.join(rline) else: print g.ftime + ' << ' + chan + ' <{}> '.format(user) + msg if general.check_bottriggers(msg): thread.start_new_thread(botendtriggerd, (chan, user, msg),) break thread.start_new_thread(find_imdb_link, (chan, msg), ) thread.start_new_thread(work_line, (chan, user, msg), ) msg = general.check_midsentencetrigger(msg) msg = general.check_triggers(msg) if msg: thread.start_new_thread(work_command, (chan, user, msg), )
29.095785
157
0.658019
d2c5679b86d58ca48ad37cdef98dbe5e554266cb
2,364
py
Python
pyroomacoustics/experimental/tests/test_deconvolution.py
HemaZ/pyroomacoustics
c401f829c71ff03a947f68f9b6b2f48346ae84b2
[ "MIT" ]
1
2020-02-13T14:39:37.000Z
2020-02-13T14:39:37.000Z
pyroomacoustics/experimental/tests/test_deconvolution.py
HemaZ/pyroomacoustics
c401f829c71ff03a947f68f9b6b2f48346ae84b2
[ "MIT" ]
null
null
null
pyroomacoustics/experimental/tests/test_deconvolution.py
HemaZ/pyroomacoustics
c401f829c71ff03a947f68f9b6b2f48346ae84b2
[ "MIT" ]
1
2021-01-14T08:42:47.000Z
2021-01-14T08:42:47.000Z
from unittest import TestCase import numpy as np from scipy.signal import fftconvolve import pyroomacoustics as pra # fix seed for repeatability np.random.seed(0) h_len = 30 x_len = 1000 SNR = 1000. # decibels h_lp = np.fft.irfft(np.ones(5), n=h_len) h_rand = np.random.randn(h_len) h_hann = pra.hann(h_len, flag='symmetric') x = np.random.randn(x_len) noise = np.random.randn(x_len + h_len - 1) def generate_signals(SNR, x, h, noise): ''' run convolution ''' # noise standard deviation sigma_noise = 10**(-SNR / 20.) y = fftconvolve(x, h) y += sigma_noise * noise return y, sigma_noise if __name__ == '__main__': import matplotlib.pyplot as plt h = h_hann y, sigma_noise = generate_signals(SNR, x, h, noise) h_hat1 = pra.experimental.deconvolve(y, x, length=h_len) res1 = np.linalg.norm(y - fftconvolve(x, h_hat1))**2 / y.shape[0] mse1 = np.linalg.norm(h_hat1 - h)**2 / h_len h_hat2 = pra.experimental.wiener_deconvolve(y, x, length=h_len, noise_variance=sigma_noise**2, let_n_points=15) res2 = np.linalg.norm(y - fftconvolve(x, h_hat2))**2 / y.shape[0] mse2 = np.linalg.norm(h_hat2 - h)**2 / h_len print('MSE naive: rmse=', np.sqrt(mse1), ' res=', pra.dB(res1, power=True)) print('MSE Wiener: rmse=', np.sqrt(mse2), ' res=', pra.dB(res1, power=True)) plt.plot(h) plt.plot(h_hat1) plt.plot(h_hat2) plt.legend(['Original', 'Naive', 'Wiener']) plt.show()
26.266667
115
0.630711
d2c5ccb03692b30b21e99cbcada633194e147414
7,423
py
Python
pthelper/img_to_txt.py
hkcountryman/veg-scanner
6b3aa4d0799c901cecdbc0f4b5ca61b0d754ab30
[ "MIT" ]
null
null
null
pthelper/img_to_txt.py
hkcountryman/veg-scanner
6b3aa4d0799c901cecdbc0f4b5ca61b0d754ab30
[ "MIT" ]
null
null
null
pthelper/img_to_txt.py
hkcountryman/veg-scanner
6b3aa4d0799c901cecdbc0f4b5ca61b0d754ab30
[ "MIT" ]
null
null
null
import cv2 as cv from deskew import determine_skew import numpy as np from PIL import Image, ImageFilter, ImageOps from pytesseract import image_to_string from skimage import io from skimage.color import rgb2gray from skimage.transform import rotate from spellchecker import SpellChecker import traceback # On Windows, you need to tell it where Tesseract is installed, for example: # pytesseract.pytesseract.tesseract_cmd = r'C:\\Program Files\\Tesseract-OCR\\tesseract.exe # OCR Stuff #################################################################################################### def to_text(pic): """ Read and return text from an image. Args: pic: filename string, pathlib.Path object, or file object to read. Returns: Text from the image. """ try: img = Image.open(pic) except FileNotFoundError as e: print("File " + pic + " does not exist.") quit() except PIL.UnidentifiedImageError as e: print("That file is not an image.") quit() except: print("Unanticipated error:") traceback.print_exc() quit() remove_alpha(img) text = image_to_string(img) return text def valid_text(ocr, accuracy_pct, language="en", distance=2, case_sensitive=True): # this spellchecker sucks """ Checks that the output of to_text() makes sense. To build your own dictionary, see https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#how-to-build-a-new-dictionary Args: ocr: string to analyze. accuracy_pct: percentage of words in ocr that should be in the dictionary. language: language of dictionary (default English); see https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#changing-language distance: Levenshtein distance (default 2 for shorter words); see https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#basic-usage https://en.wikipedia.org/wiki/Levenshtein_distance Returns: Boolean indicating success of to_text(): True: to_text() makes sense. False: to_text() returned nonsense. """ if ocr == "": return False # if it returned nothing word_list = ocr.split() # get list of all words in input string spell = SpellChecker(language=language, distance=distance, case_sensitive=case_sensitive) misspelled = spell.unknown(word_list) # list of unknown words from word_list #print(misspelled) #print(word_list) if (len(word_list) - len(misspelled)) / len(word_list) < accuracy_pct / 100: return False # if it returned gibberish return True # otherwise, all good def parse(pic, accuracy_pct, language="en", distance=2, case_sensitive=True): """ Attempts OCR with image and decides if processing is needed. Args: pic: filename string, pathlib.Path object, or file object to read. accuracy_pct: percentage of words in string that should be in the dictionary. language: language of dictionary (default English); see https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#changing-language distance: Levenshtein distance (default 2 for shorter words); see https://pyspellchecker.readthedocs.io/en/latest/quickstart.html#basic-usage https://en.wikipedia.org/wiki/Levenshtein_distance Returns: Text from the image if OCR was successful; otherwise a failure message. """ text = to_text(pic) if valid_text(text, accuracy_pct, language=language, distance=distance, case_sensitive=case_sensitive): return text else: return "OCR failed." # time for processing # Image Processing Stuff #################################################################################################### def remove_alpha(pic): """ Removes the alpha channel from an image, if it exists. Necessary for OCR. Args: pic: PIL.Image object to convert. Returns: The PIL.Image object in RGB format. """ return pic.convert("RGB") def invert(pic): """ Inverts the colors in an image. Useful if OCR doesn't work. Args: pic: PIL.Image object to invert. Returns: The inverted PIL.Image object. """ return ImageOps.invert(remove_alpha(pic)) # negative colors '''def resize(pic): # needs work: possible key error "dpi" """ Resizes an image that is less than 300 dpi. Useful if OCR doesn't work. Args: pic: PIL.Image object to resize. Returns: The resized PIL.Image object. """ pic = remove_alpha(pic) res = pic.info["dpi"] # fetch tuple of dpi lower = min(res) # get the lower of the two entries in the tuple factor = 300 / lower # how much should we scale? resized = pic.resize((round(pic.size[0]*factor), round(pic.size[1]*factor))) # scale it! return resized''' def threshold(pic, gaussian=True): # needs work """ Applies thresholding to the image. Doesn't work. (Tesseract already tries the Otsu algorithm.) Args: pic: filename string, pathlib.Path object, or file object to read. gaussian: boolean: True: apply adaptive Gaussian thresholding. False: apply adaptive mean thresholding. Returns: The image with thresholding. """ img = cv.imread("test2.jpg", 0) if gaussian: # adaptive Gaussian thresholding img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 11, 2) else: # adaptive mean thresholding img = cv.adaptiveThreshold(img, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, 11, 2) return Image.fromarray(img) def denoise(pic): # needs work """ Allegedly removes noise? Useful if OCR doesn't work. Args: pic: filename string, pathlib.Path object, or file object to read. Returns: The denoised image. """ img = cv.imread(pic) img = cv.fastNlMeansDenoising(img) return Image.fromarray(img) def dilate(pic, size): """ Dilates the text (grows edges of characters) if it's against a common background. Useful if OCR doesn't work. Args: pic: PIL.Image object to dilate. size: kernel size, in pixels. Recommend starting at 1. Returns: The dilated PIL.Image object. """ pic = remove_alpha(pic) return pic.filter(ImageFilter.MaxFilter(size)) def erode(pic, size): """ Erodes the text (shrinks edges of characters) if it's against a common background. Useful if OCR doesn't work. Args: pic: PIL.Image object to erode. size: kernel size, in pixels. Recommend starting at 1. Returns: The eroded PIL.Image object. """ pic = remove_alpha(pic) return pic.filter(ImageFilter.MinFilter(size)) def deskew(pic, output): # needs work """ Deskews an image. Useful if OCR doesn't work. Args: pic: filename string, pathlib.Path object, or file object to read. output: string to save output as """ # Thanks to Stephane Brunner (https://github.com/sbrunner) for deskew and the code! img = io.imread(pic) grayscale = rgb2gray(img) angle = determine_skew(grayscale) rotated = rotate(img, angle, resize=True) * 255 io.imsave(output, rotated.astype(np.uint8))
33.588235
108
0.649064
d2c5ed1f81d8bfe0be0278969594e7da6dcf2781
3,544
py
Python
scripts/training.py
tobinsouth/privacy-preserving-synthetic-mobility-data
fd4d1851b47e3e7304761a894b460e8345fae5db
[ "MIT" ]
null
null
null
scripts/training.py
tobinsouth/privacy-preserving-synthetic-mobility-data
fd4d1851b47e3e7304761a894b460e8345fae5db
[ "MIT" ]
null
null
null
scripts/training.py
tobinsouth/privacy-preserving-synthetic-mobility-data
fd4d1851b47e3e7304761a894b460e8345fae5db
[ "MIT" ]
null
null
null
# Params learning_rate = 0.001 k = 0.0025 x0 =2500 epochs = 4 batch_size=16 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") import torch, numpy as np from tqdm import tqdm # Get the dataloader from dataloader import get_train_test trainStays, testStays = get_train_test(train_size=0.95, batch_size=batch_size, shuffle=True, dataset='cuebiq') # Load and define the model from VAE import SentenceVAE # Model params params = dict( vocab_size = trainStays.dataset.dataset._vocab_size, max_sequence_length = trainStays.dataset.dataset._max_seq_len, embedding_size = 256, rnn_type = 'gru', hidden_size = 256, num_layers = 1, bidirectional = False, latent_size = 16, word_dropout = 0, embedding_dropout = 0.5, sos_idx=0, eos_idx=0, pad_idx=0, unk_idx=1, device=device, ) model = SentenceVAE(**params) model = model.to(device) # Device is defined in VAE # Custom loss function from paper NLL = torch.nn.NLLLoss(ignore_index=0, reduction='sum') def loss_fn(logp, target, mean, logv, step, k, x0): """The loss function used in the paper, taken from https://github.com/timbmg/Sentence-VAE""" target = target.view(-1) logp = logp.view(-1, logp.size(2)) # Negative Log Likelihood NLL_loss = NLL(logp, target) # KL Divergence KL_loss = -0.5 * torch.sum(1 + logv - mean.pow(2) - logv.exp()) KL_weight = float(1/(1+np.exp(-k*(step-x0)))) return NLL_loss, KL_loss, KL_weight optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) # Logging with tensorboard from torch.utils.tensorboard import SummaryWriter LOG_DIR = "runs/cuebiq" comment = f' batch_size = {batch_size} lr = {learning_rate} dp = False' train_writer = SummaryWriter(LOG_DIR + "/train", comment=comment) val_writer = SummaryWriter(LOG_DIR + "/val", comment=comment) # Run training loop step = 0 for epoch in range(epochs): running_loss = 0.0 for i, batch in enumerate(tqdm(trainStays, miniters=500)): batch = batch.to(device) # Forward pass logp, mean, logv, z = model(batch) # loss calculation NLL_loss, KL_loss, KL_weight = loss_fn(logp, batch, mean, logv, step, k, x0) loss = (NLL_loss + KL_weight * KL_loss) / batch_size loss.to(device) # backward + optimization optimizer.zero_grad() loss.backward() optimizer.step() step += 1 running_loss += loss.item() if i % 1000 == 999: train_writer.add_scalar('loss', running_loss / 1000, epoch * len(trainStays) + i) running_loss = 0.0 # Periodic Validation and checkpointing if i % 20000 == 19999: model.eval() val_loss = 0.0 for batch in testStays: batch = batch.to(device) logp, mean, logv, z = model(batch) NLL_loss, KL_loss, KL_weight = loss_fn(logp, batch, mean, logv, step, k, x0) loss = (NLL_loss + KL_weight * KL_loss) / batch_size val_loss += loss.item() val_writer.add_scalar('loss', val_loss / 20000, epoch * len(trainStays) + i) model.train() torch.save({ 'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict(), 'loss': val_loss / 10000, 'params': params, }, '../models/cuebiq_vae.pt') train_writer.close() val_writer.close()
30.290598
110
0.628668