max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
UE4Parse/Assets/Objects/FPropertyTag.py
|
zbx911/pyUE4Parse
| 13
|
12777251
|
from UE4Parse.BinaryReader import BinaryStream
from UE4Parse.Assets.Objects.FName import FName
from UE4Parse.Versions.EUnrealEngineObjectUE4Version import UE4Versions
from UE4Parse.Assets.Objects.FGuid import FGuid
from Usmap import StructProps
from Usmap.Objects.FPropertyTag import FPropertyTag as UsmapTag
class FPropertyTag2:
def __init__(self, **kwargs) -> None:
for k,v in kwargs.items():
setattr(self, k, v)
class FPropertyTag:
ArrayIndex = 0
position = 0
BoolVal: int
EnumName: FName
EnumType: FName
HasPropertyGuid: bool = 0
InnerType: FName
Name: FName
PropertyGuid: FGuid
Size: int
SizeOffset: int
StructGuid: FGuid
StructName: FName
Type: FName
ValueType: FName
def __init__(self, reader: BinaryStream, propMappings: StructProps = None):
if propMappings:
propdata = propMappings.data
self.Name = FName(propMappings.Name)
self.ArrayIndex = propMappings.ArraySize
# data section
for attr in ["EnumName", "InnerType", "StructName", "ValueType", "Type"]:
val = getattr(propdata, attr, None)
if val is None:
continue
if attr == "InnerType":
self.InnerData = val #FPropertyTag2(**val)
elif attr == "ValueType":
self.ValueData = val #FPropertyTag2(val)
if isinstance(val, str):
val = FName(val)
if isinstance(val, UsmapTag):
val = FName(val.Type)
setattr(self, attr, val)
return
self.Name = reader.readFName()
if self.Name.isNone:
return
self.Type = reader.readFName()
self.Size = reader.readInt32()
self.ArrayIndex = reader.readInt32()
self.position = reader.base_stream.tell()
if self.Type.Number == 0:
Type = self.Type.string
if Type == "StructProperty":
self.StructName = reader.readFName()
if reader.version >= UE4Versions.VER_UE4_STRUCT_GUID_IN_PROPERTY_TAG:
self.StructGuid = FGuid(reader)
elif Type == "BoolProperty":
self.BoolVal = reader.readByteToInt()
elif Type == "ByteProperty" or Type == "EnumProperty":
self.EnumName = reader.readFName()
elif Type == "ArrayProperty":
if reader.version >= UE4Versions.VAR_UE4_ARRAY_PROPERTY_INNER_TAGS:
self.InnerType = reader.readFName()
elif Type == "SetProperty":
if reader.version >= UE4Versions.VER_UE4_PROPERTY_TAG_SET_MAP_SUPPORT:
self.InnerType = reader.readFName()
elif Type == "MapProperty":
if reader.version >= UE4Versions.VER_UE4_PROPERTY_TAG_SET_MAP_SUPPORT:
self.InnerType = reader.readFName()
self.ValueType = reader.readFName()
HasPropertyGuid = reader.readByteToInt()
if HasPropertyGuid != 0:
FGuid(reader)
self.end_pos = reader.tell()
def __repr__(self):
return f"<{self.Name.string} : {self.Type.string}>"
| 2.21875
| 2
|
test/unit/__init__.py
|
tonchik-tm/yookassa-sdk-python
| 30
|
12777252
|
<filename>test/unit/__init__.py
# -*- coding: utf-8 -*-
"""Top-level package for YooKassa API Python Client Library."""
| 1.007813
| 1
|
src/ampycloud/plots/core.py
|
MeteoSwiss/ampycloud
| 0
|
12777253
|
<reponame>MeteoSwiss/ampycloud
"""
Copyright (c) 2021-2022 MeteoSwiss, contributors listed in AUTHORS.
Distributed under the terms of the 3-Clause BSD License.
SPDX-License-Identifier: BSD-3-Clause
Module contains: core plotting routines
"""
# Import from Python
import logging
from typing import Union
# Import from this module
from ..data import CeiloChunk
from ..logger import log_func_call
from .diagnostics import DiagnosticPlot
from .tools import set_mplstyle
# Instantiate the module logger
logger = logging.getLogger(__name__)
@set_mplstyle
@log_func_call(logger)
def diagnostic(chunk: CeiloChunk, upto: str = 'layers', show_ceilos: bool = False,
ref_metar: str = None, ref_metar_origin: str = None,
show: bool = True,
save_stem: str = None, save_fmts: Union[list, str] = None) -> None:
""" A function to create the ampycloud diagnostic plot all the way to the layering step
(included). This is the ultimate ampycloud plot that shows it all (or not - you choose !).
Args:
chunk (CeiloChunk): the CeiloChunk to look at.
upto (str, optional): up to which algorithm steps to plot. Can be one of
['raw_data', 'slices', 'groups', 'layers']. Defaults to 'layers'.
show_ceilos (bool, optional): if True, hits will be colored as a function of the
responsible ceilometer. Defaults to False. No effects unless ``upto='raw data'``.
ref_metar (str, optional): reference METAR message. Defaults to None.
ref_metar_origin (str, optional): name of the source of the reference METAR set with
ref_metar. Defaults to None.
show (bool, optional): will show the plot on the screen if True. Defaults to False.
save_stem (str, optional): if set, will save the plot with this stem (which can include a
path as well). Deafults to None.
save_fmts (list|str, optional): a list of file formats to export the plot to. Defaults to
None = ['pdf'].
Example:
::
from datetime import datetime
import ampycloud
from ampycloud.utils import mocker
from ampycloud.plots import diagnostic
# First create some mock data for the example
mock_data = mocker.canonical_demo_data()
# Then run the ampycloud algorithm on it
chunk = ampycloud.run(mock_data, geoloc='Mock data', ref_dt=datetime.now())
# Create the full ampycloud diagnostic plot
diagnostic(chunk, upto='layers', show=True)
"""
# If the user gave me a unique file format, deal with it
if isinstance(save_fmts, str):
save_fmts = [save_fmts]
if save_fmts is None:
save_fmts = ['pdf']
# Very well, let's start by instantiating a new DiagnosticPlot.
adp = DiagnosticPlot(chunk)
if upto == 'raw_data':
adp.show_hits_only(show_ceilos=show_ceilos)
if upto in ['slices', 'groups', 'layers']:
adp.show_slices()
adp.format_slice_axes()
if upto in ['groups', 'layers']:
adp.show_groups(show_points=(upto == 'groups'))
adp.format_group_axes()
if upto == 'layers':
adp.show_layers()
adp.add_metar()
# And add all the common stuff
adp.add_ref_metar(ref_metar_origin, ref_metar)
adp.format_primary_axes()
adp.add_ceilo_count()
adp.add_max_hits()
adp.add_geoloc_and_ref_dt()
# Save it
if save_stem is not None:
adp.save(save_stem, fmts=save_fmts)
# Show it ?
if show:
adp.show()
# Close the figure to free the memory
if not show:
adp.close_fig()
| 2.375
| 2
|
Poker2.py
|
errore/python_poker
| 2
|
12777254
|
import sys
import random
import pygame
def create_player_cards():
# 创建卡片信息,player
_card = [x for x in range(13)]
cards = []
player = [[], [], [], []]
# 单副牌(除去大小王)
for x in range(4):
color = list(map(lambda n: (n, x), _card))
cards = cards + color
# 再加一副牌
cards = cards * 2
# 洗牌
count = 0
random.shuffle(cards)
# 发牌
for ct in cards:
player[count % 4].append(ct)
count += 1
return player
def sort_by_card(_card):
n, _ = _card
if n <= 1:
n += 13
return n
'''--------------main-----------------'''
# 初始化显示
pygame.init()
size = width, height = 1280, 720
black = 0, 0, 0
screen = pygame.display.set_mode(size)
# 载入牌面
card_colors = ('k', 'l', 'p', 's') # 花色
card_images = [[], [], [], []]
for c in range(4):
for i in range(1, 14):
img = pygame.image.load(f"img/{card_colors[c]}{i}.png")
card_images[c].append(img) # 载入所有牌面
players_cards = create_player_cards()
l_count = 0
for li in range(4):
r_count = 0
players_cards[li].sort(key=sort_by_card)
for c in players_cards[li]:
card, c_colors = c
screen.blit(card_images[c_colors][card], (150 + r_count, 50 + l_count))
pygame.time.wait(10)
pygame.display.flip()
r_count += 30
l_count += 100
# 主循环
while 1:
# 处理退出
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
| 3.109375
| 3
|
Dataset/Leetcode/train/111/329.py
|
kkcookies99/UAST
| 0
|
12777255
|
class Solution:
def XXX(self, root: TreeNode) -> int:
if not root:
return 0
self.min_depth = float('inf')
def dfs(root, depth):
if not root:
return
if not root.left and not root.right:
self.min_depth = min(self.min_depth, depth)
dfs(root.left, depth+1)
dfs(root.right, depth+1)
dfs(root, 1)
return self.min_depth
| 3.203125
| 3
|
test/utils/testcaseparser.py
|
NiklasRosenstein/craftr-dsl
| 1
|
12777256
|
<gh_stars>1-10
from __future__ import annotations
import os
import re
import typing as t
from dataclasses import dataclass
from pathlib import Path
import pytest
from .sectionfileparser import Section, Type, parse_section_file
@dataclass
class CaseData:
filename: str
name: str
input: str
input_line: int
expects: str
expects_line: int
expects_syntax_error: bool
outputs: t.Optional[str]
outputs_line: t.Optional[int]
options: set[str]
def parse_testcase_file(content: str, filename: str, can_have_outputs: bool) -> t.Iterator[CaseData]:
"""
Parses a Craftr DSL parser test case file. Such a file must be of the following form:
```
=== TEST <test_name> ===
<craftr_dsl_code>
<...>
=== EXPECTS ===
<generated_python_code>
<...>
=== END ===
```
Multiple such blocks may be contained in a single file.
"""
it = parse_section_file(content)
try:
while True:
section = next(it, None)
if not section:
break
if section.type == Type.Body and section.value.isspace():
continue
options = set()
while section and section.type == Type.Marker:
m = re.match(r'OPTION\s+(\w+)', section.value)
if not m:
break
options.add(m.group(1))
section = next(it, None)
if not section:
raise ValueError(f'{filename}: missing TEST section')
test_section = section
m = re.match(r'(DISABLED\s+)?TEST\s+(\w+)$', test_section.value)
if test_section.type != Type.Marker or not m:
raise ValueError(f'{filename}: expected TEST section at line {test_section.line}, got {test_section}')
test_disabled = m.group(1)
test_name = m.group(2)
test_body = next(it)
if test_body.type != Type.Body:
raise ValueError(f'{filename}: expected TEST section body at line {test_body.line}')
expects_section = next(it)
m = re.match(r'EXPECTS(\s+SYNTAX ERROR)?$', expects_section.value)
if expects_section.type != Type.Marker or not m:
raise ValueError(f'{filename}: expected EXPECTS section at line {expects_section.line}, got {expects_section}')
expects_syntax_error = m.group(1)
expects_body = next(it)
if expects_body.type != Type.Body:
raise ValueError(f'{filename}: expected EXPECTS section body at line {test_body.line}')
next_section = next(it)
if next_section.type != Type.Marker or next_section.value not in ('OUTPUTS', 'END'):
raise ValueError(f'{filename}: expected OUTPUTS|END section at line {next_section.line}, got {next_section}')
outputs_body: t.Optional[Section] = None
if next_section.value == 'OUTPUTS' and can_have_outputs:
outputs_body = next(it)
if outputs_body.type != Type.Body:
raise ValueError(f'{filename}: expected OUTPUT section body at line {outputs_body.line}')
next_section = next(it)
if next_section.type != Type.Marker or next_section.value != 'END':
raise ValueError(f'{filename}: expected END section at line {next_section.line}, got {next_section}')
if not test_disabled:
yield CaseData(
filename,
test_name,
test_body.value,
test_body.line,
expects_body.value,
expects_body.line,
bool(expects_syntax_error),
outputs_body.value if outputs_body else None,
outputs_body.line if outputs_body else None,
options,
)
except StopIteration:
raise ValueError(f'{filename}: incomplete test case section')
def cases_from(path: Path, can_have_outputs: bool) -> t.Callable[[t.Callable], t.Callable]:
"""
Decorator for a test function to parametrize it wil the test cases from a directory.
"""
def _load(path):
return {t.name: t for t in parse_testcase_file(path.read_text(), str(path), can_have_outputs)}
test_cases = {}
for root, dirs, files in os.walk(path):
for filename in map(Path(root).joinpath, files):
if filename.suffix == '.txt':
test_cases[filename] = _load(filename)
test_parameters = [(path, name) for path, tests in test_cases.items() for name in tests]
def decorator(func: t.Callable) -> t.Callable:
@pytest.mark.parametrize('path,name', test_parameters)
def wrapper(path, name):
return func(test_cases[path][name])
wrapper.__name__ = func.__name__
return wrapper
return decorator
| 2.65625
| 3
|
riboraptor/ribocode_utils.py
|
saketkc/riboraptor
| 10
|
12777257
|
<reponame>saketkc/riboraptor<filename>riboraptor/ribocode_utils.py<gh_stars>1-10
#!/usr/bin/env python
# -*- coding:UTF-8 -*-
__author__ = "<NAME>"
from collections import namedtuple
import numpy as np
from scipy import stats
from scipy.stats import find_repeats, distributions, ttest_1samp
WilcoxonResult = namedtuple("WilcoxonResult", ("statistic", "pvalue"))
def extract_frame(orf_psite_array):
"""
extract frame0 , frame1, frame2 vector
"""
if orf_psite_array.size % 3 != 0:
shiftn = orf_psite_array.size % 3
orf_psite_array2 = orf_psite_array[:-shiftn]
f0 = orf_psite_array2[0 : orf_psite_array2.size : 3]
f1 = orf_psite_array2[1 : orf_psite_array2.size : 3]
f2 = orf_psite_array2[2 : orf_psite_array2.size : 3]
else:
f0 = orf_psite_array[0 : orf_psite_array.size : 3]
f1 = orf_psite_array[1 : orf_psite_array.size : 3]
f2 = orf_psite_array[2 : orf_psite_array.size : 3]
return f0, f1, f2
def wilcoxon_greater(x, y, zero_method="wilcox", correction=False):
"""
data if x is larger than y, single-sided.
"""
if np.allclose(x, y, equal_nan=True):
return WilcoxonResult(np.nan, np.nan)
"""
shamelessly stolen from scipy
"""
if len(x) < 10 and not (np.allclose(x, x[0]) and np.allclose(y, y[0])):
# sample size too small, using the ttest
t_statistic, t_pvalue = ttest_1samp(x - y, popmean=0)
if np.mean(x - y) > 0:
t_pvalue /= 2.0
else:
t_pvalue = 1 - t_pvalue / 2.0
return WilcoxonResult(t_statistic, t_pvalue)
if zero_method not in ["wilcox", "pratt", "zsplit"]:
raise ValueError(
"Zero method should be either 'wilcox' " "or 'pratt' or 'zsplit'"
)
if y is None:
d = np.asarray(x)
else:
x, y = list(map(np.asarray, (x, y)))
if len(x) != len(y):
raise ValueError("Unequal N in wilcoxon. Aborting.")
d = x - y
d[(d == 0) & (x + y != 0)] = -1 # penalty for equal value
if zero_method == "wilcox":
# Keep all non-zero differences
d = np.compress(np.not_equal(d, 0), d, axis=-1)
count = len(d)
# if count < 10:
# warnings.warn("Warning: sample size too small for normal approximation.")
r = stats.rankdata(abs(d))
r_plus = np.sum((d > 0) * r, axis=0)
r_minus = np.sum((d < 0) * r, axis=0)
if zero_method == "zsplit":
r_zero = np.sum((d == 0) * r, axis=0)
r_plus += r_zero / 2.0
r_minus += r_zero / 2.0
T = min(r_plus, r_minus)
mn = count * (count + 1.0) * 0.25
se = count * (count + 1.0) * (2.0 * count + 1.0)
if zero_method == "pratt":
r = r[d != 0]
replist, repnum = find_repeats(r)
if repnum.size != 0:
# Correction for repeated elements.
se -= 0.5 * (repnum * (repnum * repnum - 1)).sum()
se = np.sqrt(se / 24)
correction = 0.5 * int(bool(correction)) * np.sign(T - mn)
z = (T - mn - correction) / se
if r_plus > r_minus:
prob = distributions.norm.sf(abs(z))
else:
prob = 1 - distributions.norm.sf(abs(z))
return WilcoxonResult(T, prob)
def combine_pvals(pvalues, method="stouffer"):
"""
:param pvs
:return: combined pvalue
"""
pvs = pvalues[~np.isnan(pvalues)]
if pvs.size != 2:
comb_pv = np.nan
else:
comb_pv = stats.combine_pvalues(pvalues, method=method)[1]
return comb_pv
def test_frame(f0, f1, f2):
"""
data if f0>f1, f0>f2
"""
pv1 = wilcoxon_greater(f0, f1)
pv2 = wilcoxon_greater(f0, f2)
pv = combine_pvals(np.array([pv1.pvalue, pv2.pvalue]))
return pv1, pv2, pv
| 2.578125
| 3
|
chaco/scatterplot_1d.py
|
martinRenou/chaco
| 0
|
12777258
|
"""
Scatterplot in one dimension only
"""
from __future__ import absolute_import
from numpy import empty
# Enthought library imports
from enable.api import black_color_trait, ColorTrait, MarkerTrait
from traits.api import Any, Bool, Callable, Enum, Float, Str
# local imports
from .base_1d_plot import Base1DPlot
from .scatterplot import render_markers
class ScatterPlot1D(Base1DPlot):
""" A scatterplot that in 1D """
# The type of marker to use. This is a mapped trait using strings as the
# keys.
marker = MarkerTrait
# The pixel size of the marker, not including the thickness of the outline.
marker_size = Float(4.0)
# The CompiledPath to use if **marker** is set to "custom". This attribute
# must be a compiled path for the Kiva context onto which this plot will
# be rendered. Usually, importing kiva.GraphicsContext will do
# the right thing.
custom_symbol = Any
# The function which actually renders the markers
render_markers_func = Callable(render_markers)
# The thickness, in pixels, of the outline to draw around the marker. If
# this is 0, no outline is drawn.
line_width = Float(1.0)
# The fill color of the marker.
color = black_color_trait
# The color of the outline to draw around the marker.
outline_color = black_color_trait
#------------------------------------------------------------------------
# Selection and selection rendering
# A selection on the lot is indicated by setting the index or value
# datasource's 'selections' metadata item to a list of indices, or the
# 'selection_mask' metadata to a boolean array of the same length as the
# datasource.
#------------------------------------------------------------------------
#: the plot data metadata name to watch for selection information
selection_metadata_name = Str("selections")
#: whether or not to display a selection
show_selection = Bool(True)
#: the marker type for selected points
selection_marker = MarkerTrait
#: the marker size for selected points
selection_marker_size = Float(4.0)
#: the thickness, in pixels, of the selected points
selection_line_width = Float(1.0)
#: the color of the selected points
selection_color = ColorTrait("yellow")
#: the outline color of the selected points
selection_outline_color = black_color_trait
#: The fade amount for unselected regions
unselected_alpha = Float(0.3)
#: The marker outline width to use for unselected points
unselected_line_width = Float(1.0)
#: alignment of markers relative to non-index direction
alignment = Enum("center", "left", "right", "top", "bottom")
#: offset of markers relative to non-index direction in pixels
marker_offset = Float
#: private trait holding postion of markers relative to non-index direction
_marker_position = Float
def _draw_plot(self, gc, view_bounds=None, mode="normal"):
coord = self._compute_screen_coord()
pts = empty(shape=(len(coord), 2))
if self.orientation == 'v':
pts[:, 1] = coord
pts[:, 0] = self._marker_position
else:
pts[:, 0] = coord
pts[:, 1] = self._marker_position
self._render(gc, pts)
def _render(self, gc, pts):
with gc:
gc.clip_to_rect(self.x, self.y, self.width, self.height)
if not self.index:
return
name = self.selection_metadata_name
md = self.index.metadata
if name in md and md[name] is not None and len(md[name]) > 0:
selected_mask = md[name][0]
selected_pts = pts[selected_mask]
unselected_pts = pts[~selected_mask]
color = list(self.color_)
color[3] *= self.unselected_alpha
outline_color = list(self.outline_color_)
outline_color[3] *= self.unselected_alpha
if unselected_pts.size > 0:
self.render_markers_func(gc, unselected_pts, self.marker,
self.marker_size, tuple(color),
self.unselected_line_width, tuple(outline_color),
self.custom_symbol)
if selected_pts.size > 0:
self.render_markers_func(gc, selected_pts, self.marker,
self.marker_size, self.selection_color_,
self.line_width, self.outline_color_,
self.custom_symbol)
else:
self.render_markers_func(gc, pts, self.marker,
self.marker_size, self.color_, self.line_width,
self.outline_color_, self.custom_symbol)
def __marker_positon_default(self):
return self._get_marker_position()
def _get_marker_position(self):
x, y = self.position
w, h = self.bounds
if self.orientation == 'v':
y, h = x, w
if self.alignment == 'center':
position = y + h/2.0
elif self.alignment in ['left', 'bottom']:
position = y
elif self.alignment in ['right', 'top']:
position = y + h
position += self.marker_offset
return position
def _bounds_changed(self, old, new):
super(ScatterPlot1D, self)._bounds_changed(old, new)
self._marker_position = self._get_marker_position()
def _bounds_items_changed(self, event):
super(ScatterPlot1D, self)._bounds_items_changed(event)
self._marker_position = self._get_marker_position()
def _orientation_changed(self):
super(ScatterPlot1D, self)._orientation_changed()
self._marker_position = self._get_marker_position()
def _alignment_changed(self):
self._marker_position = self._get_marker_position()
| 2.765625
| 3
|
main_app/forms.py
|
m-code12/Rescue
| 2
|
12777259
|
from django.forms import ModelForm
from .models import contact
from django import forms
class ContactForm(ModelForm):
class Meta:
model = contact
fields = ['name', 'email', 'relation']
Father = 'Father'
Mother = 'Mother'
Brother = 'Brother'
Sister = 'Sister'
Husband = 'Husband'
Friend = 'Friend'
Relative = 'Relative'
Other = 'Other'
relations = (
(Father, 'Father'),
(Mother, 'Mother'),
(Brother, 'Brother'),
(Sister, 'Sister'),
(Husband, 'Husband'),
(Friend, 'Friend'),
(Relative, 'Relative'),
(Other, 'Other'),
)
widgets = {
'relation': forms.Select(choices=relations, attrs={'class': 'form-control'}),
}
| 2.5
| 2
|
backend/apps/volontulo/migrations/0001_initial.py
|
magul/volontulo
| 16
|
12777260
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
from django.conf import settings
import uuid
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Badge',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=150)),
('slug', models.CharField(max_length=150)),
('priority', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='Offer',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('description', models.TextField()),
('requirements', models.TextField()),
('time_commitment', models.TextField()),
('benefits', models.TextField()),
('location', models.CharField(max_length=150)),
('title', models.CharField(max_length=150)),
('started_at', models.DateTimeField(blank=True, null=True)),
('finished_at', models.DateTimeField(blank=True, null=True)),
('time_period', models.CharField(blank=True, default='', max_length=150)),
('status_old', models.CharField(default='NEW', max_length=30, null=True)),
('offer_status', models.CharField(default='unpublished', choices=[('unpublished', 'Unpublished'), ('published', 'Published'), ('rejected', 'Rejected')], max_length=16)),
('recruitment_status', models.CharField(default='open', choices=[('open', 'Open'), ('supplemental', 'Supplemental'), ('closed', 'Closed')], max_length=16)),
('action_status', models.CharField(default='ongoing', choices=[('future', 'Future'), ('ongoing', 'Ongoing'), ('finished', 'Finished')], max_length=16)),
('votes', models.BooleanField(default=0)),
('recruitment_start_date', models.DateTimeField(blank=True, null=True)),
('recruitment_end_date', models.DateTimeField(blank=True, null=True)),
('reserve_recruitment', models.BooleanField(default=True)),
('reserve_recruitment_start_date', models.DateTimeField(blank=True, null=True)),
('reserve_recruitment_end_date', models.DateTimeField(blank=True, null=True)),
('action_ongoing', models.BooleanField(default=False)),
('constant_coop', models.BooleanField(default=False)),
('action_start_date', models.DateTimeField(blank=True, null=True)),
('action_end_date', models.DateTimeField(blank=True, null=True)),
('volunteers_limit', models.IntegerField(blank=True, default=0, null=True)),
],
),
migrations.CreateModel(
name='OfferImage',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('path', models.ImageField(upload_to='offers/')),
('is_main', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
('offer', models.ForeignKey(to='volontulo.Offer')),
],
),
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=150)),
('address', models.CharField(max_length=150)),
('description', models.TextField()),
],
),
migrations.CreateModel(
name='OrganizationGallery',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('path', models.ImageField(upload_to='gallery/')),
('is_main', models.BooleanField(default=False)),
('organization', models.ForeignKey(to='volontulo.Organization', related_name='images')),
],
),
migrations.CreateModel(
name='UserBadges',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('created_at', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('description', models.CharField(max_length=255)),
('counter', models.IntegerField(blank=True, default=0)),
('badge', models.ForeignKey(to='volontulo.Badge')),
('content_type', models.ForeignKey(null=True, to='contenttypes.ContentType')),
],
),
migrations.CreateModel(
name='UserGallery',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('image', models.ImageField(upload_to='profile/')),
('is_avatar', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', auto_created=True, primary_key=True, serialize=False)),
('is_administrator', models.BooleanField(default=False)),
('uuid', models.UUIDField(default=uuid.uuid4, unique=True)),
('badges', models.ManyToManyField(to='volontulo.Badge', through='volontulo.UserBadges', related_name='user_profile')),
('organizations', models.ManyToManyField(to='volontulo.Organization', related_name='userprofiles')),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='usergallery',
name='userprofile',
field=models.ForeignKey(to='volontulo.UserProfile', related_name='images'),
),
migrations.AddField(
model_name='userbadges',
name='userprofile',
field=models.ForeignKey(to='volontulo.UserProfile', db_column='userprofile_id'),
),
migrations.AddField(
model_name='organizationgallery',
name='published_by',
field=models.ForeignKey(to='volontulo.UserProfile', related_name='gallery'),
),
migrations.AddField(
model_name='offerimage',
name='userprofile',
field=models.ForeignKey(to='volontulo.UserProfile', related_name='offerimages'),
),
migrations.AddField(
model_name='offer',
name='organization',
field=models.ForeignKey(to='volontulo.Organization'),
),
migrations.AddField(
model_name='offer',
name='volunteers',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
]
| 1.789063
| 2
|
phenoai/models/transformerlstm.py
|
Daniangio/pheno_phases
| 0
|
12777261
|
<filename>phenoai/models/transformerlstm.py
import logging
import os
from phenoai.models.base_model import BaseModel
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import math
import copy
import numpy as np
logger = logging.getLogger()
class Embedder(nn.Module):
def __init__(self, vocab_size, d_model):
super().__init__()
self.embed = nn.Linear(vocab_size, d_model) # nn.Embedding(vocab_size, d_model)
def forward(self, x):
return self.embed(x)
class PositionalEncoder(nn.Module):
def __init__(self, d_model, max_seq_len = 366):
super().__init__()
self.d_model = d_model
# create constant 'pe' matrix with values dependant on
# pos and i
pe = torch.zeros(max_seq_len, d_model)
for pos in range(max_seq_len):
for i in range(0, d_model, 2):
pe[pos, i] = \
math.sin(pos / (10000 ** ((2 * i)/d_model)))
pe[pos, i + 1] = \
math.cos(pos / (10000 ** ((2 * (i + 1))/d_model)))
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x, device):
# make embeddings relatively larger
x = x * math.sqrt(self.d_model)
# add constant to embedding
seq_len = x.size(1)
x = x + Variable(self.pe[:,:seq_len], requires_grad=False).to(device)
return x
class MultiHeadAttention(nn.Module):
def __init__(self, heads, d_model, dropout = 0.1):
super().__init__()
self.d_model = d_model
self.d_k = d_model // heads
self.h = heads
self.q_linear = nn.Linear(d_model, d_model)
self.v_linear = nn.Linear(d_model, d_model)
self.k_linear = nn.Linear(d_model, d_model)
self.dropout = nn.Dropout(dropout)
self.out = nn.Linear(d_model, d_model)
def forward(self, q, k, v, mask=None):
bs = q.size(0)
# perform linear operation and split into h heads
k = self.k_linear(k).view(bs, -1, self.h, self.d_k)
q = self.q_linear(q).view(bs, -1, self.h, self.d_k)
v = self.v_linear(v).view(bs, -1, self.h, self.d_k)
# transpose to get dimensions bs * h * sl * d_model
k = k.transpose(1,2)
q = q.transpose(1,2)
v = v.transpose(1,2)
# calculate attention using function we will define next
scores = self.attention(q, k, v, self.d_k, mask, self.dropout)
# concatenate heads and put through final linear layer
concat = scores.transpose(1,2).contiguous().view(bs, -1, self.d_model)
# output = self.out(concat)
return concat
def attention(self, q, k, v, d_k, mask=None, dropout=None):
scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(d_k)
#[batch_size, 1, 366, 366]
if mask is not None:
mask = mask.unsqueeze(1)
#print('ATTENTION MASK', mask)
scores = scores.masked_fill(mask == 0, -1e9)
scores = F.softmax(scores, dim=-1)
if dropout is not None:
scores = dropout(scores)
output = torch.matmul(scores, v)
return output
class FeedForward(nn.Module):
def __init__(self, d_model, d_ff=2048, dropout = 0.1):
super().__init__()
# We set d_ff as a default to 2048
self.linear_1 = nn.Linear(d_model, d_ff)
self.dropout = nn.Dropout(dropout)
self.linear_2 = nn.Linear(d_ff, d_model)
def forward(self, x):
x = self.dropout(F.relu(self.linear_1(x)))
x = self.linear_2(x)
return x
class Norm(nn.Module):
def __init__(self, d_model, eps = 1e-6):
super().__init__()
self.size = d_model
# create two learnable parameters to calibrate normalisation
self.alpha = nn.Parameter(torch.ones(self.size))
self.bias = nn.Parameter(torch.zeros(self.size))
self.eps = eps
def forward(self, x):
norm = self.alpha * (x - x.mean(dim=-1, keepdim=True)) / (x.std(dim=-1, keepdim=True) + self.eps) + self.bias
return norm
# build an encoder layer with one multi-head attention layer and one # feed-forward layer
class EncoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout = 0.1):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.attn = MultiHeadAttention(heads, d_model)
self.ff = FeedForward(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
def forward(self, x, mask):
x2 = self.norm_1(x)
x = x + self.dropout_1(self.attn(x2,x2,x2,mask))
x2 = self.norm_2(x)
# x = x + self.dropout_2(self.ff(x2))
return x2
# build a decoder layer with two multi-head attention layers and
# one feed-forward layer
class DecoderLayer(nn.Module):
def __init__(self, d_model, heads, dropout=0.01):
super().__init__()
self.norm_1 = Norm(d_model)
self.norm_2 = Norm(d_model)
self.norm_3 = Norm(d_model)
self.dropout_1 = nn.Dropout(dropout)
self.dropout_2 = nn.Dropout(dropout)
self.dropout_3 = nn.Dropout(dropout)
self.attn_1 = MultiHeadAttention(heads, d_model)
self.attn_2 = MultiHeadAttention(heads, d_model)
self.ff = FeedForward(d_model).cuda()
def forward(self, x, e_outputs, src_mask, trg_mask):
x2 = self.norm_1(x)
x = x + self.dropout_1(self.attn_1(x2, x2, x2, trg_mask))
x2 = self.norm_2(x)
x = x + self.dropout_2(self.attn_2(x2, e_outputs, e_outputs, src_mask))
x2 = self.norm_3(x)
# x = x + self.dropout_3(self.ff(x2))
return x2
# We can then build a convenient cloning function that can generate multiple layers:
def get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
def __init__(self, vocab_size, d_model, N, heads, max_seq_len):
super().__init__()
self.N = N
self.embed = Embedder(vocab_size, d_model)
self.pe = PositionalEncoder(d_model, max_seq_len=max_seq_len)
self.layers = get_clones(EncoderLayer(d_model, heads), N)
self.norm = Norm(d_model)
def forward(self, src, mask, device):
# [1, 366, INPUT_SIZE]
x = self.embed(src)
# [1, 366, d_model]
x = self.pe(x, device)
# [1, 366, d_model]
for i in range(self.N):
x = self.layers[i](x, mask)
return self.norm(x)
class Decoder(nn.Module):
def __init__(self, vocab_size, d_model, N, heads, max_seq_len):
super().__init__()
self.N = N
self.embed = Embedder(vocab_size, d_model)
self.pe = PositionalEncoder(d_model, max_seq_len=max_seq_len)
self.layers = get_clones(DecoderLayer(d_model, heads), N)
self.norm = Norm(d_model)
def forward(self, trg, e_outputs, src_mask, trg_mask, device):
x = self.embed(trg)
x = self.pe(x, device)
for i in range(self.N):
x = self.layers[i](x, e_outputs, src_mask, trg_mask)
return self.norm(x)
class LSTM(nn.Module):
def __init__(self, input_size, hidden_layer_size=100, num_layers=2, output_size=3):
super().__init__()
self.hidden_layer_size = hidden_layer_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_layer_size, num_layers=num_layers)
self.linear = nn.Linear(hidden_layer_size, output_size)
self.hidden_cell = (torch.zeros(num_layers, 1, self.hidden_layer_size),
torch.zeros(num_layers, 1, self.hidden_layer_size))
def reset_hidden_state(self, device):
self.hidden_cell = (torch.zeros(self.num_layers, 1, self.hidden_layer_size).to(device),
torch.zeros(self.num_layers, 1, self.hidden_layer_size).to(device))
def forward(self, input_seq):
lstm_out, self.hidden_cell = self.lstm(input_seq.view(len(input_seq) ,1, -1), self.hidden_cell)
predictions = torch.sigmoid(self.linear(lstm_out.view(len(input_seq), -1)))
return predictions
class TransformerLSTM(BaseModel):
def __init__(self, version, input_features, output_features, d_model=32, N=2, heads=1, max_seq_len=366, **kwargs):
super().__init__(version, input_features, output_features)
self.max_seq_len = max_seq_len
self.encoder = Encoder(len(input_features), d_model, N, heads, max_seq_len)
self.decoder = LSTM(input_size=d_model, output_size=len(output_features))
self.d_model = d_model
def reset_hidden_state(self, device):
self.decoder.reset_hidden_state(device)
def forward(self, src, trg, src_mask=None, trg_mask=None, device='cuda'):
e_outputs = self.encoder(src, src_mask, device)
return self.decoder(e_outputs.view(-1, self.d_model))
def get_weights_path(self, root, place, variety):
return os.path.join(root, f'transformer_lstm_{place}_{variety}_{self.version}.pt')
def run_inference(self, src, device):
src_input = F.pad(src, (0, 0, 0, self.max_seq_len - src.size(1)), 'constant', -2)
src_mask = self.create_masks(src_input.squeeze(0).transpose(0,1), device)
self.reset_hidden_state(device)
y_pred = self(src_input, None, src_mask=src_mask, trg_mask=None, device=device)
y_pr = y_pred.squeeze(0).detach().cpu().numpy()
return y_pr
@staticmethod
def create_masks(src, device, pad=-2): # Magheggione dell' and bit a bit
#src_mask = (src != pad).unsqueeze(-2).to(device)
#size = src.size(1) # get seq_len for matrix
#np_mask = Variable(torch.from_numpy(np.ones((1, size, size)).astype('uint8')) == 1).to(device)
#src_mask = src_mask & src_mask.transpose(1,2) & np_mask
if src is not None:
src_mask = (src != pad).unsqueeze(-2)
size = src.size(1) # get seq_len for matrix
np_mask = nopeak_mask(size).to(device)
src_mask = src_mask & src_mask.transpose(1,2) & np_mask
src_mask[0, :, 0] = True
else:
src_mask = None
return src_mask[:1, ...]
def nopeak_mask(size):
np_mask = np.triu(np.ones((1, size, size)), k=1).astype('uint8') # k=-30 rende il modello "cieco" al valore dell'output degli ultimi 30 valori (giorni)
np_mask = Variable(torch.from_numpy(np_mask) == 0)
return np_mask
| 2.296875
| 2
|
homeassistant/components/elgato/button.py
|
charithmadhuranga/core
| 1
|
12777262
|
"""Support for Elgato button."""
from __future__ import annotations
import logging
from elgato import Elgato, ElgatoError, Info
from homeassistant.components.button import ButtonEntity, ButtonEntityDescription
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import HomeAssistantElgatoData
from .const import DOMAIN
from .entity import ElgatoEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Elgato button based on a config entry."""
data: HomeAssistantElgatoData = hass.data[DOMAIN][entry.entry_id]
async_add_entities([ElgatoIdentifyButton(data.client, data.info)])
class ElgatoIdentifyButton(ElgatoEntity, ButtonEntity):
"""Defines an Elgato identify button."""
def __init__(self, client: Elgato, info: Info) -> None:
"""Initialize the button entity."""
super().__init__(client, info)
self.entity_description = ButtonEntityDescription(
key="identify",
name="Identify",
icon="mdi:help",
entity_category=EntityCategory.CONFIG,
)
self._attr_unique_id = f"{info.serial_number}_{self.entity_description.key}"
async def async_press(self) -> None:
"""Identify the light, will make it blink."""
try:
await self.client.identify()
except ElgatoError:
_LOGGER.exception("An error occurred while identifying the Elgato Light")
| 2.265625
| 2
|
Sketches/RJL/Torrent/TorrentTkGUI.py
|
sparkslabs/kamaelia_orig
| 12
|
12777263
|
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""\
=================
TorrentWindow - a basic GUI for BitTorrent
=================
This component supports downloading from multiple torrents simultaneously
but no deletion or statistics other than percentage completuion so far.
How does it work?
-----------------
TorrentWindow uses Tkinter to produce a very simple GUI.
It then produces messages for and accepts messages produced by a
TorrentPatron component (also would work with TorrentClient but
TorrentPatron is preferred, see their respective files).
Example Usage
-------------
The following setup allows torrents to be entered as HTTP URLs into the
GUI and then downloaded with progress information for each torrent.
Graphline(
gui=TorrentWindow(),
httpclient=SimpleHTTPClient(),
backend=TorrentPatron(),
linkages = {
("gui", "outbox") : ("backend", "inbox"),
("gui", "fetchersignal") : ("httpclient", "control"),
("gui", "signal") : ("backend", "control"),
("gui", "fetcher") : ("httpclient", "inbox"),
("httpclient", "outbox") : ("backend", "inbox"),
("backend", "outbox"): ("gui", "inbox")
}
).run()
"""
from Kamaelia.UI.Tk.TkWindow import TkWindow
from Axon.Ipc import producerFinished, shutdown
import Tkinter, time
from TorrentPatron import TorrentPatron
from TorrentIPC import *
class TorrentWindow(TkWindow):
Inboxes = {
"inbox" : "From TorrentPatron backend",
"control" : "Tell me to shutdown",
}
Outboxes = {
"outbox" : "To TorrentPatron backend",
"fetcher" : "To TorrentPatron backend via a resource fetcher, e.g. file reader or HTTP client",
"fetchersignal" : "Shutdown resource fetcher",
"signal" : "When I've shutdown"
}
def __init__(self):
self.pendingtorrents = []
self.torrents = {}
super(TorrentWindow, self).__init__()
def setupWindow(self):
"Create the GUI controls and window for this application"
self.entry = Tkinter.Entry(self.window)
self.addtorrentbutton = Tkinter.Button(self.window, text="Add Torrent", command=self.addTorrent)
self.window.title("Kamaelia BitTorrent Client")
self.entry.grid(row=0, column=0, sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S)
self.addtorrentbutton.grid(row=0, column=1, sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S)
self.window.rowconfigure(0, weight=1)
self.window.columnconfigure(0, weight=3)
self.window.columnconfigure(1, weight=1)
def addTorrent(self):
"Request the addition of a new torrent"
torrenturl = self.entry.get()
self.pendingtorrents.append(torrenturl.rsplit("/", 1)[-1])
self.send(torrenturl, "fetcher") # forward on the torrent URL/path to the fetcher
self.entry.delete(0, Tkinter.END)
def main(self):
while not self.isDestroyed():
time.sleep(0.05) # reduces CPU usage but a timer component would be better
yield 1
if self.dataReady("control"):
msg = self.recv("control")
if isinstance(msg, producerFinished) or isinstance(msg, shutdown):
self.send(msg, "signal")
self.window.destroy()
if self.dataReady("inbox"):
msg = self.recv("inbox")
if isinstance(msg, TIPCNewTorrentCreated):
torrentname = self.pendingtorrents.pop(0)
labeltext = Tkinter.StringVar() # allow us to change the label's text on the fly
newlabel = Tkinter.Label(self.window, textvariable=labeltext)
self.torrents[msg.torrentid] = (torrentname, newlabel, labeltext)
labeltext.set(torrentname + " - 0%")
newlabel.grid(row=len(self.torrents), column=0, columnspan=2, sticky=Tkinter.N+Tkinter.E+Tkinter.W+Tkinter.S)
self.window.rowconfigure(len(self.torrents), weight=1)
elif isinstance(msg, TIPCTorrentStartFail) or isinstance(msg, TIPCTorrentAlreadyDownloading):
self.pendingtorrents.pop(0) # the oldest torrent not yet started failed so remove it from the list of pending torrents
elif isinstance(msg, TIPCTorrentStatusUpdate):
# print msg.statsdictionary.get("fractionDone","-1")
self.torrents[msg.torrentid][2].set(self.torrents[msg.torrentid][0] + " - " + str(int(msg.statsdictionary.get("fractionDone","0") * 100)) + "%")
self.tkupdate()
self.send(shutdown(), "signal")
self.send(shutdown(), "fetchersignal")
if __name__ == "__main__":
from Kamaelia.Chassis.Graphline import Graphline
import sys
sys.path.append("../HTTP")
from HTTPClient import SimpleHTTPClient
Graphline(
gui=TorrentWindow(),
httpclient=SimpleHTTPClient(),
backend=TorrentPatron(),
linkages = {
("gui", "outbox") : ("backend", "inbox"),
("gui", "fetchersignal") : ("httpclient", "control"),
("gui", "signal") : ("backend", "control"),
("gui", "fetcher") : ("httpclient", "inbox"),
("httpclient", "outbox") : ("backend", "inbox"),
("backend", "outbox"): ("gui", "inbox")
}
).run()
| 2.765625
| 3
|
baseball/random.py
|
jconstam/baseball
| 0
|
12777264
|
<gh_stars>0
#!/usr/bin/env python3
import random
from typing import List
class D6s:
@staticmethod
def roll(count: int = 1) -> List[int]:
results = []
for _ in range(count):
results.append(random.randint(1, 6))
return results
| 3.09375
| 3
|
src/party3rd/elastic.py
|
yaroslavNikolaev/A.R.M.O.R.
| 1
|
12777265
|
<filename>src/party3rd/elastic.py<gh_stars>1-10
from utils.collectors import GitHubVersionCollector
from utils.configuration import Configuration
from abc import ABC
owner = "elastic"
class BeatsVersionCollector(GitHubVersionCollector, ABC):
repo = "beats"
def __init__(self, config: Configuration):
super().__init__(config, owner, self.repo)
class FilebeatCollector(BeatsVersionCollector):
@staticmethod
def get_application_name() -> str:
return "filebeat"
class MetricbeatCollector(BeatsVersionCollector):
@staticmethod
def get_application_name() -> str:
return "metricbeat"
class AuditbeatCollector(BeatsVersionCollector):
@staticmethod
def get_application_name() -> str:
return "auditbeat"
class JournalbeatCollector(BeatsVersionCollector):
@staticmethod
def get_application_name() -> str:
return "journalbeat"
class KibanaVersionCollector(GitHubVersionCollector):
repo = "kibana"
@staticmethod
def get_application_name() -> str:
return "kibana"
def __init__(self, config: Configuration):
super().__init__(config, owner, self.repo)
class ElasticsearchVersionCollector(GitHubVersionCollector):
repo = "elasticsearch"
@staticmethod
def get_application_name() -> str:
return "elasticsearch"
def __init__(self, config: Configuration):
super().__init__(config, owner, self.repo)
class LogstashVersionCollector(GitHubVersionCollector):
repo = "logstash"
@staticmethod
def get_application_name() -> str:
return "logstash"
def __init__(self, config: Configuration):
super().__init__(config, owner, self.repo)
| 2.171875
| 2
|
appi2c/ext/icon/icon_controller.py
|
andrequeiroz2/appi2c
| 9
|
12777266
|
from appi2c.ext.database import db
from appi2c.ext.icon.icon_models import Icon
def list_all_icon():
icon = Icon.query.all()
return icon
def list_icon_id(id: int) -> Icon:
icon = Icon.query.filter_by(id=id).first()
return icon
def create_icon(html_class: str):
icon = Icon(html_class=html_class)
db.session.add(icon)
db.session.commit()
def update_icon(id: int, html_class: str):
Icon.query.filter_by(id=id).update(dict(html_class=html_class))
db.session.commit()
def list_icon_in_device(devices: list):
if devices is not None:
list_icon = []
for device in devices:
icon = Icon.query.filter_by(id=device.icon_id).first()
list_icon.append(icon.html_class)
return list_icon
return False
| 2.375
| 2
|
examples/human_control.py
|
Voyager1403/yumi-gym
| 12
|
12777267
|
import gym, yumi_gym
import pybullet as p
env = gym.make('yumi-v0')
env.render()
observation = env.reset()
motorsIds = []
for joint in env.joints:
motorsIds.append(p.addUserDebugParameter(joint, -1, 1, 0))
while True:
env.render()
action = []
for motorId in motorsIds:
action.append(p.readUserDebugParameter(motorId))
observation, reward, done, info = env.step(action)
| 2.171875
| 2
|
src/rdbms-connect/azext_rdbms_connect/vendored_sdks/postgresql_flexibleservers/models/_postgre_sql_management_client_enums.py
|
Mannan2812/azure-cli-extensions
| 2
|
12777268
|
<gh_stars>1-10
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from enum import Enum
class ServerVersion(str, Enum):
one_two = "12"
one_one = "11"
class ServerState(str, Enum):
ready = "Ready"
dropping = "Dropping"
disabled = "Disabled"
starting = "Starting"
stopping = "Stopping"
stopped = "Stopped"
updating = "Updating"
class ServerHAState(str, Enum):
not_enabled = "NotEnabled"
creating_standby = "CreatingStandby"
replicating_data = "ReplicatingData"
failing_over = "FailingOver"
healthy = "Healthy"
removing_standby = "RemovingStandby"
class ServerPublicNetworkAccessState(str, Enum):
enabled = "Enabled"
disabled = "Disabled"
class HAEnabledEnum(str, Enum):
enabled = "Enabled"
disabled = "Disabled"
class CreateMode(str, Enum):
default = "Default"
point_in_time_restore = "PointInTimeRestore"
class ResourceIdentityType(str, Enum):
system_assigned = "SystemAssigned"
class SkuTier(str, Enum):
burstable = "Burstable"
general_purpose = "GeneralPurpose"
memory_optimized = "MemoryOptimized"
class ConfigurationDataType(str, Enum):
boolean = "Boolean"
numeric = "Numeric"
integer = "Integer"
enumeration = "Enumeration"
class OperationOrigin(str, Enum):
not_specified = "NotSpecified"
user = "user"
system = "system"
| 1.976563
| 2
|
simplefasta.py
|
ljdursi/seek-vs-sequential
| 0
|
12777269
|
<filename>simplefasta.py
#!/usr/bin/env python
import os
import argparse
class FastaReader(object):
def __init__(self,infile):
self.__infile = infile
self.__havenext = False
self.__next = ""
self.__end = False
def readNext(self):
curlabel = None
sequences = []
done = False
while not done:
if self.__havenext:
line = self.__next
self.__havenext = False
self.__next = ""
else:
line = self.__infile.readline()
if not line:
done = True
self.__end = True
elif line[0] == ">":
if curlabel is None:
curlabel = line[1:]
else:
self.__havenext = True
self.__next = line
done = True
else:
sequences.append(line.strip())
if curlabel is None:
return None
else:
return curlabel, "".join(sequences)
def skipAhead(self, eatLine=False):
done = False
if eatLine:
line = self.__infile.readline()
while not done:
line = self.__infile.readline()
if not line:
done = True
self.__end = True
elif line[0]=='>':
self.__havenext = True
self.__next = line.strip()
break
return
def eof(self):
return self.__end
if __name__ == "__main__":
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)
parser.add_argument('-s','--skipto',type=float)
args = parser.parse_args()
reader = FastaReader(args.infile)
if args.skipto is None:
record = reader.readNext()
while record:
print(record)
record = reader.readNext()
else:
args.infile.seek(0, os.SEEK_END)
size = args.infile.tell()
if args.skipto < 1:
loc = size * args.skipto
else:
loc = int(args.skipto)
args.infile.seek(loc, os.SEEK_SET)
reader.skipAhead()
print(reader.readNext())
| 3.15625
| 3
|
scripts/create_dataset_definition.py
|
groadabike/DAMP-VSEP-Singles
| 0
|
12777270
|
import argparse
import pandas as pd
from multiprocessing import Pool, cpu_count
from tqdm import tqdm
from pathlib import Path
import json
import librosa
from utils import get_amplitude_scaling_factor, xcorr_searcher_max, load_data
# Filter out performances shorter than ```MIN_DURATION``` secs
MIN_DURATION = 15.0
# Filter out songs with mixtures shorter than vocal in %
# These are errors in the dataset
DURATION_VAR = 0.95
# Framing parameters for RMS
NHOP = 0.010
WIN = 0.025
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--split', type=str, required=True,
help='Dataset to process')
parser.add_argument('--root_path', type=str, required=True,
help='Root path to DAMP-VSEP')
parser.add_argument('--sample_rate', type=int, required=True,
default=16000)
parser.add_argument('--output_meta_path', type=str, required=True,
help='Path where save the metadata')
def main(args):
metadata_path = Path(args.output_meta_path)
track_list = pd.read_csv(f"split/{args.split}.csv")
metadata = []
pool = Pool(processes=cpu_count())
track_inputs = [(t, Path(args.root_path), args.sample_rate)
for i, t in track_list.iterrows()]
for meta in tqdm(pool.imap_unordered(build_metadata, track_inputs),
total=len(track_inputs)):
if meta:
metadata.append(meta)
tracks = {p: m for p, m in metadata}
metadata_path.mkdir(parents=True, exist_ok=True)
json.dump(tracks, open(metadata_path / f"{args.split}_sr{args.sample_rate}.json",
'w'),
indent=2)
def build_metadata(inputs):
track, root, sample_rate = inputs
hop_length = int(sample_rate * NHOP)
frame_length = int(sample_rate * WIN)
vocal = load_data(root / track['vocal_path'],
sample_rate=sample_rate)
# Discard silence vocal target
if vocal.sum() == 0.0:
print(f"Track {track['perf_key']} is silence - discarded")
return None
# Get original duration to discard short vocal target
vocal_dur = librosa.get_duration(vocal, sr=sample_rate)
if vocal_dur < MIN_DURATION:
print(f"Track {track['perf_key']} too short ({vocal_dur} sec) - discarded")
return None
ori_mix = load_data(root / track['mix_path'],
sample_rate=sample_rate)
ori_mix_dur = librosa.get_duration(ori_mix, sr=sample_rate)
if ori_mix_dur < vocal_dur * DURATION_VAR:
print(f"Mixture {track['perf_key']} length ({ori_mix_dur}) is shorter than vocal length ({vocal_dur}) - discarded")
return None
# Get vocal shifting by doing several xcorr of small segments of vocal.
# The shifting time determine the start point of background and vocal.
vocal_shift = xcorr_searcher_max(ori_mix, vocal, sample_rate, frame_length, hop_length)
if vocal_shift <= 0:
vocal_start = abs(vocal_shift)
back_start = 0
else:
vocal_start = 0
back_start = vocal_shift
# Get new/real min duration.
back = load_data(root / track['background_path'],
sample_rate=sample_rate)
vocal = vocal[int(vocal_start * sample_rate):]
back = back[int(back_start * sample_rate):]
vocal_dur = librosa.get_duration(vocal, sr=sample_rate)
back_dur = librosa.get_duration(back, sr=sample_rate)
min_dur = min(vocal_dur, back_dur)
# Create mixture to calculate mean and std
mix = vocal[:int(min_dur * sample_rate)] + back[:int(min_dur * sample_rate)]
# Get amplitude for SNR=0
amplitude_scaler = get_amplitude_scaling_factor(vocal, back)
track_info = dict()
track_info['original_mix'] = track['mix_path']
track_info['original_mix_mean'] = \
f"{ori_mix[int(back_start * sample_rate):int(min_dur * sample_rate)].mean()}"
track_info['original_mix_std'] = \
f"{ori_mix[int(back_start * sample_rate):int(min_dur * sample_rate)].std()}"
track_info['mix_mean'] = f"{mix.mean()}"
track_info['mix_std'] = f"{mix.std()}"
track_info['duration'] = f"{min_dur}"
track_info['vocal'] = track['vocal_path']
track_info['vocal_start'] = f"{vocal_start}"
track_info['scaler'] = f"{amplitude_scaler:}"
track_info['background'] = track['background_path']
track_info['background_start'] = f"{back_start}"
return track['perf_key'], track_info
if __name__ == '__main__':
args = parser.parse_args()
main(args)
| 2.28125
| 2
|
visualize_result_files.py
|
CristobalM/lverlet_n_cells
| 0
|
12777271
|
import matplotlib.pyplot as plt
import numpy as np
import re
import os
import sys
from matplotlib import rcParams
from cycler import cycler
import itertools
if len(sys.argv) < 2:
print("Especifique la carpeta con resultados con la siguiente sintaxis:")
print("python %s carpeta_resultados" % sys.argv[0])
exit(1)
results_folder = sys.argv[1]
digit = r'\d*\.?\d+'
regex = r'^result_(%s)_(%s)_%s_\w+_%s_%s_%s_%s_\w+_%s_\.txt$' % (digit, digit, digit, digit, digit, digit, digit, digit)
"""
print(regex)
tomatch = 'result_1.1000_0.6000_50.0000_WallPeriodicBC_1_0.5000_1_0.0100_False_1024_.txt'
matches = re.match(regex, tomatch)
if matches:
print(matches.group(1))
print(matches.group(2))
else:
print("no match")
"""
files = os.listdir(results_folder)
time_lambda_curves = {}
for filename in files:
matches = re.match(regex, filename)
if not matches:
continue
the_lambda = float(matches.group(1))
the_eta = float(matches.group(2))
with open(results_folder + filename, 'r') as f:
first_line = f.readline()
the_time = float(first_line)
if the_eta not in time_lambda_curves:
time_lambda_curves[the_eta] = {
'times': [],
'lambdas': []
}
time_lambda_curves[the_eta]['times'].append(the_time)
time_lambda_curves[the_eta]['lambdas'].append(the_lambda)
marker = itertools.cycle(('s', 'X', '+', 'o', '*', '>', 'h', 'd', '.'))
lines = itertools.cycle((':', '-.', '--', '-'))
# Configuraciones de estilo de los graficos
plt.figure(figsize=(12, 10), dpi=80, facecolor='w', edgecolor='k')
plt.rc('lines', linewidth=1)
plt.rc('axes', prop_cycle=(cycler('color', ['blue', 'green', 'red',
'magenta', 'black',
'purple', 'pink', 'brown',
'orange', 'coral',
'lightblue', 'lime', 'lavender',
'turquoise', 'darkgreen', 'tan',
'salmon', 'gold',
'darkred', 'darkblue'])))
to_plot = []
for eta, values in time_lambda_curves.items():
to_plot.append((eta, values))
to_plot.sort()
#for eta, values in time_lambda_curves.items():
for eta, values in to_plot:
the_times = values['times']
the_lambdas = values['lambdas']
order = np.argsort(the_lambdas)
xs = np.array(the_lambdas)[order]
ys = np.array(the_times)[order]
plt.plot(xs, ys, label="$\eta = %.1f$" % eta, marker=next(marker), markersize=15, linewidth=3)
plt.xticks(np.arange(0.0, 1.4, 0.1))
plt.yticks(np.arange(0, 10001, 1000))
plt.xlabel('$\lambda$', fontsize=18)
plt.ylabel('Tiempo (s)', fontsize=18)
plt.title('Tiempo de ejecución del algoritmo de Listas de Verlet\n para un tiempo de simulación físico de 50 segundos', fontsize=22, y=1.02)
#plot.legend(loc=2, prop={'size': 6})
plt.legend(prop={'size': 16})
plt.grid(alpha=0.5)
plt.show()
| 2.59375
| 3
|
populate/ropensci_libraries/throughputpy/checkNode.py
|
throughput-ec/throughputdb
| 4
|
12777272
|
<reponame>throughput-ec/throughputdb<gh_stars>1-10
from functools import reduce
def checkNode(graph, parentid):
graphquery = """MATCH (n:OBJECT {id: $id})-[]-(:ANNOTATION)-[]-(o:OBJECT)-\
[:isType]-(:TYPE {type:'schema:CodeRepository'})
RETURN COUNT(o) AS repos"""
silent = graph.run(graphquery, {'id': parentid}).data()
links = reduce(lambda x, y: max(y.get('repos'), x.get('repos')), silent)
if type(links) is dict:
links = links.get('repos')
return links
| 2.59375
| 3
|
drf_msal_jwt/exceptions.py
|
narongdejsrn/django-rest-framework-msal
| 3
|
12777273
|
<filename>drf_msal_jwt/exceptions.py
from rest_framework.exceptions import APIException
class CodeException(APIException):
status_code = 401
default_detail = 'Invalid authorization code'
default_code = 'code_error'
class DomainException(APIException):
status_code = 403
default_detail = "The account from the domain you sign in are not allowed"
default_code = 'invalid_domain_error'
class StateException(APIException):
status_code = 401
default_detail = 'Invalid state, please try again'
default_code = 'state_error'
class WrongTokenException(APIException):
status_code = 401
default_detail = 'The access token is no longer valid, please try logging in again'
default_code = 'access_token_error'
| 2.453125
| 2
|
sddr/__init__.py
|
felixGer/PySDDR
| 14
|
12777274
|
<filename>sddr/__init__.py
from .sddr import *
| 1.132813
| 1
|
image_tweet.py
|
pipinstallyogi/Basic_scripts
| 0
|
12777275
|
<filename>image_tweet.py<gh_stars>0
from selenium import webdriver
from getpass import getpass
from time import sleep
usr = input("Enter your username or Email: ")
pwd = getpass("Enter your password: ") #getpass() will help your password remain hidden
image_path = input("Please enter your image path: ")
driver =webdriver.Firefox() # or Firefox() if using firefox
driver.get("https://twitter.com/login")
username_box = driver.find_element_by_class_name("js-username-field")
username_box.send_keys(usr)
sleep(3)
password_box = driver.find_element_by_class_name("js-password-field")
password_box.send_keys(pwd)
sleep(3)
login_btn = driver.find_element_by_css_selector("button.submit.EdgeButton.EdgeButton--primary.EdgeButtom--medium")
login_btn.submit() # clicking instead of entering a value
sleep(3)
image_box = driver.find_element_by_css_selector("input.file-input.js-tooltip")
image_box.send_keys(image_path)
sleep(3)
tweet_button = driver.find_element_by_css_selector("button.tweet-action.EdgeButton.EdgeButton--primary.js-tweet-btn")
tweet_button.click()
| 3.046875
| 3
|
forum/utils/dates.py
|
kraft99/forum
| 8
|
12777276
|
import datetime
from django.conf import settings
from django.utils import dateformat
import pytz
from forum.models import ForumProfile
def user_timezone(dt, user):
"""
Converts the given datetime to the given User's timezone, if they
have one set in their forum profile.
Adapted from http://www.djangosnippets.org/snippets/183/
"""
tz = settings.TIME_ZONE
if user.is_authenticated():
profile = ForumProfile.objects.get_for_user(user)
if profile.timezone:
tz = profile.timezone
try:
result = dt.astimezone(pytz.timezone(tz))
except ValueError:
# The datetime was stored without timezone info, so use the
# timezone configured in settings.
result = dt.replace(tzinfo=pytz.timezone(settings.TIME_ZONE)) \
.astimezone(pytz.timezone(tz))
return result
def format_datetime(dt, user, date_format, time_format, separator=' '):
"""
Formats a datetime, using ``'Today'`` or ``'Yesterday'`` instead of
the given date format when appropriate.
If a User is given and they have a timezone set in their profile,
the datetime will be translated to their local time.
"""
if user:
dt = user_timezone(dt, user)
today = user_timezone(datetime.datetime.now(), user).date()
else:
today = datetime.date.today()
date_part = dt.date()
delta = date_part - today
if delta.days == 0:
date = u'Today'
elif delta.days == -1:
date = u'Yesterday'
else:
date = dateformat.format(dt, date_format)
return u'%s%s%s' % (date, separator,
dateformat.time_format(dt.time(), time_format))
| 3.53125
| 4
|
scripts/alignment/extract_fastq_from_bam.py
|
mahajrod/MAVR
| 10
|
12777277
|
#!/usr/bin/env python
__author__ = '<NAME>'
import argparse
from RouToolPa.Tools.Samtools import SamtoolsV1
from RouToolPa.Tools.Bedtools import BamToFastq
from RouToolPa.GeneralRoutines import FileRoutines
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", action="store", dest="input", required=True,
help="Input bam file")
parser.add_argument("-t", "--threads", action="store", dest="threads", type=int, default=1,
help="Number of threads to use. Default - 1")
parser.add_argument("-p", "--prepare_bam", action="store_true", dest="prepare_bam",
help="Prepare bam for reads extraction(filter out supplementary and not primary alignments"
"and sort by name)")
"""
parser.add_argument("-e", "--prepared_bam", action="store", dest="prepared_bam",
help="File to write sorted bam file. Required if -p/--prepare_bam option is set")
"""
parser.add_argument("-e", "--prepared_bam_prefix", action="store", dest="prepared_bam_prefix",
help="Prefix of sorted bam file(s). Required if -p/--prepare_bam option is set")
parser.add_argument("-d", "--temp_dir", action="store", dest="temp_dir",
help="Directory to use for temporary files. Required if -p/--prepare_bam option is set")
parser.add_argument("-o", "--out_prefix", action="store", dest="out_prefix", required=True,
help="Prefix of output fastq files")
parser.add_argument("-s", "--single_ends", action="store_false", dest="paired", default=True,
help="Reads are SE")
parser.add_argument("-x", "--mix_ends", action="store_true", dest="mix_ends", default=False,
help="Reads are mix of PE and SE")
parser.add_argument("-m", "--max_memory_per_thread", action="store", dest="max_memory_per_thread", default="1G",
help="Maximum memory per thread. Default - 1G")
args = parser.parse_args()
if args.prepare_bam and ((not args.prepared_bam_prefix) or (not args.temp_dir)):
raise ValueError("Options -e/--prepared_bam_prefix and -m/--temp_dir must be set if -p/--prepare_bam option is used")
SamtoolsV1.threads = args.threads
if args.prepare_bam or args.mix_ends:
FileRoutines.safe_mkdir(FileRoutines.check_path(args.temp_dir))
prepared_pe_bam_file = "%s.bam" % args.prepared_bam_prefix
prepared_unpaired_bam_file = ("%s.unpaired.bam" % args.prepared_bam_prefix) if args.mix_ends else None
"""
SamtoolsV1.prepare_bam_for_read_extraction(args.input, args.prepared_bam, temp_file_prefix=args.temp_dir,
max_memory_per_thread=args.max_memory_per_thread)
"""
SamtoolsV1.prepare_bam_for_read_extraction(args.input, prepared_pe_bam_file, temp_file_prefix=args.temp_dir,
max_memory_per_thread=args.max_memory_per_thread,
bam_file_to_write_unpaired_reads=prepared_unpaired_bam_file)
if args.paired:
left_fastq = "%s_1.fastq" % args.out_prefix
right_fastq = "%s_2.fastq" % args.out_prefix
unpaired_fastq = "%s.unpaired.fastq" % args.out_prefix
else:
left_fastq = "%s.fastq" % args.out_prefix
right_fastq = None
if args.mix_ends:
BamToFastq.convert(prepared_unpaired_bam_file, unpaired_fastq, out_right_fastq=None)
#BamToFastq.convert(args.prepared_bam if args.prepare_bam else args.input, left_fastq, out_right_fastq=right_fastq)
BamToFastq.convert(prepared_pe_bam_file if args.prepare_bam else args.input, left_fastq, out_right_fastq=right_fastq)
| 2.359375
| 2
|
azcam_soguiders/console_soguiders.py
|
mplesser/soguiders
| 0
|
12777278
|
<filename>azcam_soguiders/console_soguiders.py
# azcamconsole config file for soguiders
import os
import threading
import azcam
import azcam.shortcuts
from azcam_ds9.ds9display import Ds9Display
# ****************************************************************
# files and folders
# ****************************************************************
azcam.db.systemname = "soguiders"
azcam.db.systemfolder = os.path.dirname(__file__)
azcam.db.datafolder = azcam.db.systemfolder
parfile = os.path.join(azcam.db.datafolder, f"parameters_console_{azcam.db.systemname}.ini")
# ****************************************************************
# start logging
# ****************************************************************
logfile = os.path.join(azcam.db.datafolder, "logs", "console.log")
azcam.db.logger.start_logging(logfile=logfile)
azcam.log(f"Configuring console for {azcam.db.systemname}")
# ****************************************************************
# display
# ****************************************************************
display = Ds9Display()
dthread = threading.Thread(target=display.initialize, args=[])
dthread.start() # thread just for speed
# ****************************************************************
# console tools
# ****************************************************************
from azcam.tools import create_console_tools
create_console_tools()
# ****************************************************************
# try to connect to azcamserver
# ****************************************************************
server = azcam.db.tools.["server"]
connected = server.connect(port=2412)
if connected:
azcam.log("Connected to azcamserver")
else:
azcam.log("Not connected to azcamserver")
# ****************************************************************
# read par file
# ****************************************************************
azcam.db.tools["parameters"].read_parfile(parfile)
azcam.db.tools["parameters"].update_pars(0, "azcamconsole")
# ****************************************************************
# finish
# ****************************************************************
azcam.log("Configuration complete")
| 1.914063
| 2
|
Sintactico.py
|
LawlietJH/PyCthon
| 0
|
12777279
|
# -*- coding: utf-8 -*-
import Lexico
import Arbol
import string
import sys
import os
class Sintactico():
def __init__(self):
with open('entrada.txt','r') as Archivo: self.Cadena = Archivo.read()+'$'
Archivo.close()
#===============================================================
self.Suma = Arbol.Suma
self.Multi = Arbol.Multi
self.Asign = Arbol.Asignacion
self.ReservIf = Arbol.ReservIf
self.ReservPrint = Arbol.ReservPrint
self.Separador = Arbol.Separador
self.Signo = Arbol.Signo
self.ExpresionArb = Arbol.Expre
self.Bloque = Arbol.Bloque
self.ReservElse = Arbol.ReservElse
self.ReservWhile = Arbol.ReservWhile
self.Logico = Arbol.Logico
self.Relacional = Arbol.Relacional
self.Identi = Arbol.Identificador
self.Entero = Arbol.Entero
self.Flotante = Arbol.Flotante
self.CadenaArb = Arbol.Cadena
#===============================================================
self.ListaArbolesBloque = [[],[],[],[],[]] # Permite Anidación de hasta 5 niveles.
self.ListaArboles = []
self.ArbolActual = []
self.ArbolPila = []
self.lexico = Lexico.Lexico(self.Cadena)
self.Cadena = ''
self.PalabReserv = ['if', 'else', 'do','while', 'print']
self.BloqueActivo = [False, False, False, False, False] # Permite Anidación de hasta 5 niveles.
def Resultado(self, Salida):
if Salida == 0:
print('\n\n\n\t Error Sintáctico: ', Salida)
for x in range(5):
self.lexico.sigSimbolo()
print(self.lexico.simbolo,end='')
Archivo = open('salida.txt','w')
Cadena = Archivo.write(str(Salida))
Archivo.close()
def error(self):
self.Resultado(0)
sys.exit()
def analiza(self):
self.lexico.sigSimbolo()
self.A()
self.Comprueba(20)
def Comprueba(self, Tipo):
if self.lexico.tipo == Tipo:
try: self.lexico.sigSimbolo()
except: self.Resultado(1)
else: self.error()
def A(self):
xD = True
if self.lexico.tipo == 2 and self.lexico.simbolo in self.PalabReserv:
while xD:
xD = False
if self.lexico.simbolo == 'if':
self.If()
xD = True
if self.lexico.simbolo == 'do':
self.DoWhile()
xD = True
if self.lexico.simbolo == 'while':
self.While()
xD = True
if self.lexico.simbolo == 'for':
self.For()
xD = True
if self.lexico.simbolo == 'print':
self.Print()
xD = True
self.Asignacion()
def Asignacion(self, Bool=True):
#===============================================================
Simbolo = None
#===============================================================
if self.lexico.tipo == 2:
#================================================================
R = self.Identi(None, self.lexico.simbolo)
#================================================================
self.lexico.sigSimbolo()
self.Comprueba(15)
#================================================================
P = self.Expresion()
P = self.Asign(R,P)
if self.BloqueActivo[0]:
if self.BloqueActivo[4]: self.ListaArbolesBloque[4].append(P)
elif self.BloqueActivo[3]: self.ListaArbolesBloque[3].append(P)
elif self.BloqueActivo[2]: self.ListaArbolesBloque[2].append(P)
elif self.BloqueActivo[1]: self.ListaArbolesBloque[1].append(P)
elif self.BloqueActivo[0]: self.ListaArbolesBloque[0].append(P)
else: self.ListaArboles.append(P)
#================================================================
if Bool:
self.Comprueba(12)
self.A()
def If(self):
self.lexico.sigSimbolo()
self.Comprueba(11)
#===============================================================
P = self.ComparacionLogica()
R = self.ReservIf()
R.SetHijo(P)
#===============================================================
self.Comprueba(22)
if self.lexico.tipo == 23:
#===============================================================
if self.BloqueActivo[0] == False: self.BloqueActivo[0] = True
elif self.BloqueActivo[1] == False: self.BloqueActivo[1] = True
elif self.BloqueActivo[2] == False: self.BloqueActivo[2] = True
elif self.BloqueActivo[3] == False: self.BloqueActivo[3] = True
elif self.BloqueActivo[4] == False: self.BloqueActivo[4] = True
B = self.Bloque()
#===============================================================
self.lexico.sigSimbolo()
self.A()
self.Comprueba(24)
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]:
B.SetListaHijos(self.ListaArbolesBloque[4])
self.BloqueActivo[4] = False
self.ListaArbolesBloque[4] = []
elif self.BloqueActivo[3]:
B.SetListaHijos(self.ListaArbolesBloque[3])
self.BloqueActivo[3] = False
self.ListaArbolesBloque[3] = []
elif self.BloqueActivo[2]:
B.SetListaHijos(self.ListaArbolesBloque[2])
self.BloqueActivo[2] = False
self.ListaArbolesBloque[2] = []
elif self.BloqueActivo[1]:
B.SetListaHijos(self.ListaArbolesBloque[1])
self.BloqueActivo[1] = False
self.ListaArbolesBloque[1] = []
elif self.BloqueActivo[0]:
B.SetListaHijos(self.ListaArbolesBloque[0])
self.BloqueActivo[0] = False
self.ListaArbolesBloque[0] = []
R.SetHijo(B)
#===============================================================
else:
#===============================================================
if self.BloqueActivo[0] == False: self.BloqueActivo[0] = True
elif self.BloqueActivo[1] == False: self.BloqueActivo[1] = True
elif self.BloqueActivo[2] == False: self.BloqueActivo[2] = True
elif self.BloqueActivo[3] == False: self.BloqueActivo[3] = True
elif self.BloqueActivo[4] == False: self.BloqueActivo[4] = True
B = self.Bloque()
#===============================================================
if self.lexico.simbolo == 'print': self.Print()
else:
self.Asignacion(False)
self.Comprueba(12);
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]:
B.SetListaHijos(self.ListaArbolesBloque[4])
self.BloqueActivo[4] = False
self.ListaArbolesBloque[4] = []
elif self.BloqueActivo[3]:
B.SetListaHijos(self.ListaArbolesBloque[3])
self.BloqueActivo[3] = False
self.ListaArbolesBloque[3] = []
elif self.BloqueActivo[2]:
B.SetListaHijos(self.ListaArbolesBloque[2])
self.BloqueActivo[2] = False
self.ListaArbolesBloque[2] = []
elif self.BloqueActivo[1]:
B.SetListaHijos(self.ListaArbolesBloque[1])
self.BloqueActivo[1] = False
self.ListaArbolesBloque[1] = []
elif self.BloqueActivo[0]:
B.SetListaHijos(self.ListaArbolesBloque[0])
self.BloqueActivo[0] = False
self.ListaArbolesBloque[0] = []
R.SetHijo(B)
#===============================================================
if self.lexico.simbolo == 'else':
self.lexico.sigSimbolo()
if self.lexico.tipo == 23:
if self.BloqueActivo[0] == False: self.BloqueActivo[0] = True
elif self.BloqueActivo[1] == False: self.BloqueActivo[1] = True
elif self.BloqueActivo[2] == False: self.BloqueActivo[2] = True
elif self.BloqueActivo[3] == False: self.BloqueActivo[3] = True
elif self.BloqueActivo[4] == False: self.BloqueActivo[4] = True
E = self.ReservElse()
self.lexico.sigSimbolo()
self.A()
self.Comprueba(24)
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]:
E.SetListaHijos(self.ListaArbolesBloque[4])
self.BloqueActivo[4] = False
self.ListaArbolesBloque[4] = []
elif self.BloqueActivo[3]:
E.SetListaHijos(self.ListaArbolesBloque[3])
self.BloqueActivo[3] = False
self.ListaArbolesBloque[3] = []
elif self.BloqueActivo[2]:
E.SetListaHijos(self.ListaArbolesBloque[2])
self.BloqueActivo[2] = False
self.ListaArbolesBloque[2] = []
elif self.BloqueActivo[1]:
E.SetListaHijos(self.ListaArbolesBloque[1])
self.BloqueActivo[1] = False
self.ListaArbolesBloque[1] = []
elif self.BloqueActivo[0]:
E.SetListaHijos(self.ListaArbolesBloque[0])
self.BloqueActivo[0] = False
self.ListaArbolesBloque[0] = []
#===============================================================
else:
#===============================================================
if self.BloqueActivo[0] == False: self.BloqueActivo[0] = True
elif self.BloqueActivo[1] == False: self.BloqueActivo[1] = True
elif self.BloqueActivo[2] == False: self.BloqueActivo[2] = True
elif self.BloqueActivo[3] == False: self.BloqueActivo[3] = True
elif self.BloqueActivo[4] == False: self.BloqueActivo[4] = True
E = self.ReservElse()
#===============================================================
if self.lexico.simbolo == 'print': self.Print()
else:
self.Asignacion(False)
self.Comprueba(12);
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]:
E.SetListaHijos(self.ListaArbolesBloque[4])
self.BloqueActivo[4] = False
self.ListaArbolesBloque[4] = []
elif self.BloqueActivo[3]:
E.SetListaHijos(self.ListaArbolesBloque[3])
self.BloqueActivo[3] = False
self.ListaArbolesBloque[3] = []
elif self.BloqueActivo[2]:
E.SetListaHijos(self.ListaArbolesBloque[2])
self.BloqueActivo[2] = False
self.ListaArbolesBloque[2] = []
elif self.BloqueActivo[1]:
E.SetListaHijos(self.ListaArbolesBloque[1])
self.BloqueActivo[1] = False
self.ListaArbolesBloque[1] = []
elif self.BloqueActivo[0]:
E.SetListaHijos(self.ListaArbolesBloque[0])
self.BloqueActivo[0] = False
self.ListaArbolesBloque[0] = []
#===============================================================
#===============================================================
R.SetHijo(E)
#===============================================================
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]: self.ListaArbolesBloque[4].append(R)
elif self.BloqueActivo[3]: self.ListaArbolesBloque[3].append(R)
elif self.BloqueActivo[2]: self.ListaArbolesBloque[2].append(R)
elif self.BloqueActivo[1]: self.ListaArbolesBloque[1].append(R)
elif self.BloqueActivo[0]: self.ListaArbolesBloque[0].append(R)
else: self.ListaArboles.append(R)
#===============================================================
def While(self):
self.lexico.sigSimbolo()
self.Comprueba(11)
#===============================================================
P = self.ComparacionLogica()
W = self.ReservWhile()
W.SetHijo(P)
#===============================================================
self.Comprueba(22)
if self.lexico.tipo == 23:
#===============================================================
if self.BloqueActivo[0] == False: self.BloqueActivo[0] = True
elif self.BloqueActivo[1] == False: self.BloqueActivo[1] = True
elif self.BloqueActivo[2] == False: self.BloqueActivo[2] = True
elif self.BloqueActivo[3] == False: self.BloqueActivo[3] = True
elif self.BloqueActivo[4] == False: self.BloqueActivo[4] = True
B = self.Bloque()
#===============================================================
self.lexico.sigSimbolo()
self.A()
self.Comprueba(24)
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]:
B.SetListaHijos(self.ListaArbolesBloque[4])
self.BloqueActivo[4] = False
self.ListaArbolesBloque[4] = []
elif self.BloqueActivo[3]:
B.SetListaHijos(self.ListaArbolesBloque[3])
self.BloqueActivo[3] = False
self.ListaArbolesBloque[3] = []
elif self.BloqueActivo[2]:
B.SetListaHijos(self.ListaArbolesBloque[2])
self.BloqueActivo[2] = False
self.ListaArbolesBloque[2] = []
elif self.BloqueActivo[1]:
B.SetListaHijos(self.ListaArbolesBloque[1])
self.BloqueActivo[1] = False
self.ListaArbolesBloque[1] = []
elif self.BloqueActivo[0]:
B.SetListaHijos(self.ListaArbolesBloque[0])
self.BloqueActivo[0] = False
self.ListaArbolesBloque[0] = []
W.SetHijo(B)
#===============================================================
#===============================================================
if self.BloqueActivo[0]:
if self.BloqueActivo[4]: self.ListaArbolesBloque[4].append(W)
elif self.BloqueActivo[3]: self.ListaArbolesBloque[3].append(W)
elif self.BloqueActivo[2]: self.ListaArbolesBloque[2].append(W)
elif self.BloqueActivo[1]: self.ListaArbolesBloque[1].append(W)
elif self.BloqueActivo[0]: self.ListaArbolesBloque[0].append(W)
else: self.ListaArboles.append(W)
#===============================================================
def DoWhile(self):
self.lexico.sigSimbolo()
self.Comprueba(23)
self.A()
self.Comprueba(24)
if self.lexico.simbolo == 'while':
self.lexico.sigSimbolo()
self.Comprueba(11)
self.ComparacionLogica()
self.Comprueba(22)
self.Comprueba(12)
else: self.error()
def For(self):
self.lexico.sigSimbolo()
self.Comprueba(11)
self.Asignacion(False)
self.Comprueba(12)
if (self.lexico.tipo == 2 or self.lexico.tipo == 3 or self.lexico.tipo == 5) and not self.lexico.tipo in self.PalabReserv:
self.lexico.sigSimbolo()
if self.lexico.tipo == 14:
self.lexico.sigSimbolo()
if (self.lexico.tipo == 2 or self.lexico.tipo == 3 or self.lexico.tipo == 5) and not self.lexico.tipo in self.PalabReserv: self.lexico.sigSimbolo()
self.Comprueba(12)
self.Asignacion(False)
self.Comprueba(22)
if self.lexico.tipo == 23:
self.lexico.sigSimbolo()
self.A()
self.Comprueba(24)
def Expresion(self, Bool=True): # Permite Recursividad
#================================================================
P = None
Q = None
Tipo = None
xD = False
Sign = False
ArbolPila = []
#================================================================
if self.lexico.tipo == 9:
Sign = self.lexico.simbolo
self.lexico.sigSimbolo()
if self.lexico.tipo == 11:
self.lexico.sigSimbolo()
#================================================================
P = self.Expresion()
ArbolPila.append(P)
#================================================================
self.Comprueba(22)
xD = True
# 2 = IDENTIFICADOR; 3 = ENTERO; 5 = FLOTANTE; 8 = CADENA = "Hola xD"
if self.lexico.tipo == 2 or self.lexico.tipo == 3\
or self.lexico.tipo == 5 or self.lexico.tipo == 8\
or xD == True:
if xD == False:
#================================================================
if self.lexico.tipo == 2: P = self.Identi(None, self.lexico.simbolo)
elif self.lexico.tipo == 3: P = self.Entero('i', self.lexico.simbolo)
elif self.lexico.tipo == 5: P = self.Flotante('r', self.lexico.simbolo)
elif self.lexico.tipo == 8: P = self.CadenaArb('c', self.lexico.simbolo)
ArbolPila.append(P)
#================================================================
self.lexico.sigSimbolo()
else: xD = False
#================================================================
if Sign != False:
P = self.Signo(P, Sign)
ArbolPila.pop()
ArbolPila.append(P)
Sign = False
#================================================================
while self.lexico.tipo == 9 or self.lexico.tipo == 10:
#================================================================
Tipo = (self.lexico.tipo, self.lexico.simbolo)
ArbolPila.append(Tipo)
#================================================================
self.lexico.sigSimbolo()
if self.lexico.tipo == 9:
Sign = self.lexico.simbolo
self.lexico.sigSimbolo()
if self.lexico.tipo == 11:
self.lexico.sigSimbolo()
#================================================================
Q = self.Expresion()
ArbolPila.append(Q)
#================================================================
self.Comprueba(22)
xD = True
if self.lexico.tipo == 2 or self.lexico.tipo == 3\
or self.lexico.tipo == 5 or self.lexico.tipo == 8\
or xD == True:
if xD == False:
#================================================================
if self.lexico.tipo == 2: Q = self.Identi(None, self.lexico.simbolo)
elif self.lexico.tipo == 3: Q = self.Entero('i', self.lexico.simbolo)
elif self.lexico.tipo == 5: Q = self.Flotante('r', self.lexico.simbolo)
elif self.lexico.tipo == 8: Q = self.CadenaArb('c', self.lexico.simbolo)
ArbolPila.append(Q)
#================================================================
self.lexico.sigSimbolo()
else: xD = False
else: self.error()
#================================================================
if Sign != False:
Q = self.Signo(Q, Sign)
ArbolPila.pop()
ArbolPila.append(Q)
Sign = False
if Bool:
if Tipo[0] == 9: P = self.Suma(P, Q, Tipo[1])
elif Tipo[0] == 10: P = self.Multi(P, Q, Tipo[1])
#================================================================
if Bool == False:
# ~ print('\n')
ArbolPila = ArbolPila[::-1]
P = ArbolPila.pop(0)
# ~ print(P)
if ArbolPila != []:
Operador = ArbolPila.pop(0)
Valor1 = ArbolPila.pop(0)
# ~ print(Operador)
# ~ print(Valor1)
if Operador[0] == 9: P = self.Suma( Valor1, P, Operador[1])
elif Operador[0] == 10: P = self.Multi(Valor1, P, Operador[1])
Cont = 0
for x in ArbolPila:
# ~ print(x)
if Cont % 2 == 0: Operador = x
elif Cont % 2 == 1:
Valor1 = x
if Operador[0] == 9: P = self.Suma( Valor1, P, Operador[1])
elif Operador[0] == 10: P = self.Multi(Valor1, P, Operador[1])
Cont += 1
return P
def Print(self):
self.lexico.sigSimbolo()
self.Comprueba(11)
#===============================================================
P = self.Expresion()
P = self.ExpresionArb(P)
#===============================================================
self.Comprueba(22)
#===============================================================
P = self.ReservPrint(P)
if self.BloqueActivo[0]:
if self.BloqueActivo[4]: self.ListaArbolesBloque[4].append(P)
elif self.BloqueActivo[3]: self.ListaArbolesBloque[3].append(P)
elif self.BloqueActivo[2]: self.ListaArbolesBloque[2].append(P)
elif self.BloqueActivo[1]: self.ListaArbolesBloque[1].append(P)
elif self.BloqueActivo[0]: self.ListaArbolesBloque[0].append(P)
else: self.ListaArboles.append(P)
#===============================================================
self.Comprueba(12)
def ComparacionLogica(self):
#================================================================
P = self.ComparacionRelacional()
#================================================================
while self.lexico.tipo == 19:
self.lexico.sigSimbolo()
#================================================================
Q = self.ComparacionRelacional()
P = self.Logico(P, Q)
#================================================================
#================================================================
return P
#================================================================
def ComparacionRelacional(self):
#================================================================
P = None
Q = None
Simbolo = None
P = self.Expresion()
#================================================================
if self.lexico.tipo == 16:
Simbolo = self.lexico.simbolo
self.lexico.sigSimbolo()
Simbolo += self.lexico.simbolo
self.Comprueba(15)
#================================================================
Q = self.Expresion()
P = self.Relacional(P, Q, Simbolo)
#================================================================
elif self.lexico.tipo == 14:
Simbolo = self.lexico.simbolo
self.lexico.sigSimbolo()
#================================================================
Q = self.Expresion()
P = self.Relacional(P, Q, Simbolo)
#================================================================
#================================================================
return P
#================================================================
def P(self): os.system('Pause > Nul')
| 3.375
| 3
|
jentry/entry/script/__init__.py
|
HansBug/jentry
| 0
|
12777280
|
<gh_stars>0
from .file import load_entries_from_file, load_entry_classes_from_code
from .project import load_entries_from_project
| 1.078125
| 1
|
httpolice/syntax/rfc3986.py
|
vfaronov/httpolice
| 1,027
|
12777281
|
from httpolice.citation import RFC
from httpolice.parse import (auto, empty, fill_names, literal, maybe_str,
octet_range, pivot, string, string1, string_times,
subst)
from httpolice.syntax.common import ALPHA, DIGIT, HEXDIG
pct_encoded = '%' + HEXDIG + HEXDIG > auto
sub_delims = (literal('!') | '$' | '&' | "'" | '(' | ')' | '*' | '+' |
',' | ';' | '=') > auto
unreserved = ALPHA | DIGIT | '-' | '.' | '_' | '~' > auto
pchar = unreserved | sub_delims | ':' | '@' | pct_encoded > auto
segment = string(pchar) > auto
segment_nz = string1(pchar) > auto
segment_nz_nc = string1(unreserved | sub_delims | '@' | pct_encoded) > auto
scheme = ALPHA + string(ALPHA | DIGIT | '+' | '-' | '.') > pivot
userinfo = string(unreserved | sub_delims | ':' | pct_encoded) > pivot
dec_octet = (DIGIT |
octet_range(0x31, 0x39) + DIGIT |
'1' + DIGIT + DIGIT |
'2' + octet_range(0x30, 0x34) + DIGIT |
'25' + octet_range(0x30, 0x35)) > auto
IPv4address = (dec_octet + '.' + dec_octet + '.' +
dec_octet + '.' + dec_octet) > pivot
h16 = string_times(1, 4, HEXDIG) > auto
ls32 = (h16 + ':' + h16) | IPv4address > auto
IPv6address = (
string_times(6, 6, h16 + ':') + ls32 |
'::' + string_times(5, 5, h16 + ':') + ls32 |
maybe_str(h16) + '::' + string_times(4, 4, h16 + ':') + ls32 |
maybe_str(string_times(0, 1, h16 + ':') + h16) +
'::' + string_times(3, 3, h16 + ':') + ls32 |
maybe_str(string_times(0, 2, h16 + ':') + h16) +
'::' + string_times(2, 2, h16 + ':') + ls32 |
maybe_str(string_times(0, 3, h16 + ':') + h16) + '::' + h16 + ':' + ls32 |
maybe_str(string_times(0, 4, h16 + ':') + h16) + '::' + ls32 |
maybe_str(string_times(0, 5, h16 + ':') + h16) + '::' + h16 |
maybe_str(string_times(0, 6, h16 + ':') + h16) + '::'
) > pivot
IPvFuture = ('v' + string1(HEXDIG) + '.' +
string1(unreserved | sub_delims | ':')) > pivot
# As updated by RFC 6874
ZoneID = string1(unreserved | pct_encoded) > pivot
IPv6addrz = IPv6address + '%25' + ZoneID > pivot
IP_literal = '[' + (IPv6address | IPv6addrz | IPvFuture) + ']' > pivot
reg_name = string(unreserved | sub_delims | pct_encoded) > pivot
host = IP_literal | IPv4address | reg_name > pivot
port = string(DIGIT) > pivot
authority = maybe_str(userinfo + '@') + host + maybe_str(':' + port) > pivot
path_abempty = string('/' + segment) > auto
path_absolute = '/' + maybe_str(segment_nz + string('/' + segment)) > auto
path_noscheme = segment_nz_nc + string('/' + segment) > auto
path_rootless = segment_nz + string('/' + segment) > auto
path_empty = subst(u'') << empty > auto
hier_part = ('//' + authority + path_abempty |
path_absolute | path_rootless | path_empty) > pivot
query = string(pchar | '/' | '?') > pivot
fragment = string(pchar | '/' | '?') > pivot
absolute_URI = scheme + ':' + hier_part + maybe_str('?' + query) > pivot
relative_part = ('//' + authority + path_abempty |
path_absolute | path_noscheme | path_empty) > pivot
URI = (scheme + ':' + hier_part +
maybe_str('?' + query) + maybe_str('#' + fragment)) > pivot
relative_ref = (relative_part +
maybe_str('?' + query) + maybe_str('#' + fragment)) > pivot
URI_reference = URI | relative_ref > pivot
fill_names(globals(), RFC(3986))
| 2.171875
| 2
|
setup.py
|
dlshriver/Queryable
| 5
|
12777282
|
"""
pylinq setup script.
"""
from distutils.core import setup
with open("README.rst", 'r') as f:
readme = f.read()
with open("HISTORY.rst", 'r') as f:
history = f.read()
setup(
name='pinq',
version='0.1.1',
description='LINQ for python.',
long_description="%s\n\n%s" % (readme, history),
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/dlshriver/pinq',
packages=[
'pinq',
],
classifiers=(
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
)
)
| 1.648438
| 2
|
plot_sweep.py
|
yinghai/benchmark
| 384
|
12777283
|
<filename>plot_sweep.py
import argparse
import json
# import pandas as pd
import os
# import sys
# import re
import yaml
import itertools
# from bokeh.layouts import column, row, layout, gridplot
# from bokeh.plotting import figure, output_file, show
# from bokeh.sampledata.autompg import autompg
# from bokeh.transform import jitter
from bokeh.palettes import Category10
from bokeh.models import HoverTool, Div, Range1d, HoverTool
from bokeh.plotting import figure, output_file, show
# from bokeh.models import Legend
# from bokeh.models import ColumnDataSource, CategoricalTicker, Div
# from bokeh.models import ColumnDataSource, DataTable, DateFormatter, TableColumn
# from bokeh.transform import jitter
from collections import defaultdict
from datetime import datetime as dt
from torchbenchmark.util.data import load_data_dir, load_data_files
from torchbenchmark.score.compute_score import TorchBenchScore
TORCHBENCH_SCORE_VERSION = "v1"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("data_dir", nargs='+',
help="One or more directories containing benchmark json files. "
"Each directory will be plotted as a separate series. "
"By default, the first file in the first directory will be used"
" to generate a score configuration with a target of 1000,"
" and everything else will be relative to that.")
parser.add_argument("--output_html", default='plot.html', help="html file to write")
parser.add_argument("--plot_all", action='store_true',
help="Plots the scores for each configuration")
parser.add_argument("--reference_json", required=True,
help="file defining score norm values, usually first json in first data_dir")
args = parser.parse_args()
plot_height = 800
plot_width = 1000
assert len(args.data_dir) > 0, "Must provide at least one data directory"
compare_datasets = [load_data_dir(d, most_recent_files=-1) for d in args.data_dir]
with open(args.reference_json) as f:
ref_data = json.load(f)
plot_all = args.plot_all
score_config = TorchBenchScore(ref_data=ref_data, version=TORCHBENCH_SCORE_VERSION)
p = figure(plot_width=plot_width, plot_height=plot_height,
x_axis_type='datetime')
xs = []
ys = []
zs = []
max_score = 0
for d in compare_datasets:
scores = {}
scores_db = defaultdict(list)
for i in range(len(d._json_raw)):
data = d._json_raw[i]
pytorch_ver = data['machine_info']['pytorch_version']
# Slice the portion after '+'
pytorch_ver_cuda_loc = pytorch_ver.rfind('+')
pytorch_ver = pytorch_ver[:pytorch_ver_cuda_loc]
date = dt.strptime(pytorch_ver[pytorch_ver.index("dev") + len("dev"):], "%Y%m%d")
score = score_config.compute_score(data)
scores[date] = score
dates = []
total_scores = []
all_scores = []
for date in sorted(scores.keys()):
dates.append(date)
total_scores.append(scores[date]["total"])
max_score = max(max_score, max(total_scores))
all_scores.append(scores[date])
xs.append(dates)
ys.append(total_scores)
if plot_all:
zs.append(all_scores)
colors = itertools.cycle(Category10[10])
basenames = map(os.path.basename, args.data_dir)
if plot_all:
for x, z in zip(xs, zs):
basename = next(basenames)
color = next(colors)
configs = z[0].keys()
for config in configs:
if not ("subscore" in config or "total" in config):
continue
color = next(colors)
scores = []
for s in z:
scores.append(s[config])
p.line(x, scores, color=color, line_width=2, legend_label=basename + '-' + config)
p.legend.click_policy = "hide"
else:
for x, y, color in zip(xs, ys, colors):
p.line(x, y, color=color, line_width=2, legend_label=next(basenames))
for x, y, color in zip(xs, ys, colors):
p.circle(x, y, color=color)
p.legend.location = "bottom_right"
p.y_range = Range1d(0, max_score * 1.25)
p.add_tools(HoverTool(
tooltips=[
('date', '@x{%F}'),
('score', '@y{0.00 a}'),
],
formatters={
'@x': 'datetime',
'@y': 'numeral',
},
))
output_file(args.output_html)
show(p)
| 2.578125
| 3
|
api/api.py
|
trompamusic/crowd_task_manager
| 0
|
12777284
|
<reponame>trompamusic/crowd_task_manager
# get slices from db
# list slices on home page
# make api endpoint for slice
# each shows slice image and xml, create this from template
from flask import Flask
from flask import render_template
app = Flask(__name__)
import pymongo
import re
import os
import yaml
import urllib.request
import pathlib
from flask import Flask
from flask import render_template
from bson.objectid import ObjectId
from flask import request
from flask import jsonify
from datetime import datetime
from flask import flash, redirect, url_for
from werkzeug.utils import secure_filename
from flask import send_from_directory
from pathlib import Path
from lxml import etree
from xmldiff import main
from urllib.parse import urlparse
from shutil import copyfile
import logging
import ssl
import pika
import sys
import json
sys.path.append("..")
from common.settings import cfg
import common.file_system_manager as fsm
import pwd
import grp
rabbitmq_address = cfg.rabbitmq_address
path = os.getcwd()
UPLOAD_FOLDER_TEMP = path + '/uploads'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
MONGO_SERVER = cfg.mongodb_address
MONGO_DB = cfg.db_name
TASK_COLL = cfg.col_task
# CLIENT_SECRET = cfg.client_secret
CURRENT_SERVER = cfg.current_server
app.config['UPLOAD_FOLDER'] = str(Path.home() / cfg.upload_folder)
@app.route('/')
# display all the tasks
@app.route('/index')
def index():
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb[cfg.col_submitted_task]
myquery = {}
mydoc = mycol.find(myquery)
tasks = []
for x in mydoc:
tasks.append(x)
return render_template('index.html', title='Home', tasks=tasks)
@app.route('/tasks', methods=['GET'])
def get_tasks():
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb[cfg.col_task]
myquery = {}
mydoc = mycol.find(myquery)
tasks = []
for x in mydoc:
task = {}
task['_id'] = str(x['_id'])
task['type'] = 'edit' if 'type' not in x else x['type']
tasks.append(task)
resp = jsonify(tasks=tasks)
return resp
@app.route('/tasks/<variable>', methods=['GET'])
def get_task_query(variable):
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb[cfg.col_task]
myquery = {"_id": ObjectId(variable)}
mydoc = mycol.find_one(myquery)
del mydoc["_id"]
del mydoc["batch_id"]
return json.dumps(mydoc)
# display task info, slice, and xml
@app.route('/edit/<variable>', methods=['GET'])
def task(variable):
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb[TASK_COLL]
myquery = {"_id": ObjectId(variable)}
mydoc = mycol.find(myquery)
task = mydoc[0]
xml = task['xml']
xml2 = re.sub(r'\s+', ' ', xml)
print(xml2)
return render_template("task.html", task=task, xml=xml2)
# display task info, slice, and xml
@app.route('/verify/<variable>', methods=['GET'])
def task_verify(variable):
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb[TASK_COLL]
myquery = {"_id": ObjectId(variable)}
mydoc = mycol.find(myquery)
task = mydoc[0]
xml = task['xml']
xml2 = re.sub(r'\s+', ' ', xml)
print(xml2)
return render_template("task_verify.html", task=task, xml=xml2)
# getxml data
@app.route('/xml/<variable>', methods=['GET'])
def task_xml(variable):
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb[TASK_COLL]
myquery = {"_id": ObjectId(variable)}
mydoc = mycol.find(myquery)
task = mydoc[0]
xml = task['xml']
# print(xml)
# print(type(xml))
resp = jsonify(xml=xml)
return resp
# TODO: This needs an overhaul
# receive xml data
@app.route('/<variable>', methods=['POST'])
def taskpost(variable):
myclient = pymongo.MongoClient(*MONGO_SERVER)
mydb = myclient[MONGO_DB]
mycol = mydb[cfg.col_result]
task = mydb[cfg.col_task].find_one({"_id": ObjectId(variable)})
opinion = 'xml' if 'v' not in request.args else (request.args['v'] == "1")
result_type = "verify" if 'v' in request.args else "edit"
result = {
"task_id": variable,
"result": str(request.get_data(as_text=True)),
"step": task["step"],
}
mycol.insert_one(result)
send_message(
cfg.mq_task_scheduler_status,
cfg.mq_task_scheduler_status,
json.dumps({
'action': 'result',
'module': 'api',
'_id': variable,
'type': result_type}))
# # check if the task is complete
# mycol_other = mydb['submitted_tasks']
# task_status = mycol_other.find({"task_id": variable, "type": result['result_type']})
# if task_status.count() > 0:
# if task_status[0]['status'] != "complete" and opinion == 'xml':
# xml_in = str(request.get_data(as_text=True))
# other_entry = mycol_other.update_one(
# {"task_id": variable, "type": result['result_type']},
# {'$push': {'xml': xml_in}},
# upsert=True)
# count = len(task_status[0]['xml'])
# if(count == 1):
# # set status of controlaction to active
# send_message(
# 'ce_communicator_queue',
# 'ce_communicator_queue',
# json.dumps({
# 'action': 'task active',
# 'identifier': variable,
# 'type': 'edit',
# 'status': 'ActiveActionStatus'}))
# if(count > 1):
# x = 0
# for x in range(0, count - 1):
# xml_string1 = re.sub(r'\s+', ' ', task_status[0]['xml'][x])
# xml_string2 = re.sub(r'\s+', ' ', xml_in)
# tree1 = etree.fromstring(xml_string1)
# tree2 = etree.fromstring(xml_string2)
# diff = main.diff_trees(tree1, tree2)
# # check is the new one matches one of the known ones
# print("compare xml diff ", len(diff))
# if(len(diff) == 0):
# # if two match then save the result
# # in result_agg collection
# results_agg_coll = mydb['results_agg']
# good_result = {
# "task_id": variable,
# "xml": xml_string2
# }
# results_agg_coll.insert_one(good_result)
# # mark the submitted task done
# mycol_other.update_one(
# {
# "task_id": variable
# }, {
# '$set': {
# 'status': "complete"
# }
# })
# # send message to omr_planner
# tasks_coll = mydb['tasks']
# task = tasks_coll.find_one({"_id": ObjectId(variable)})
# status_update_msg = {
# '_id': variable,
# 'module': 'aggregator',
# 'status': 'complete',
# 'name': task['score']}
# send_message(
# 'omr_planner_status_queue',
# 'omr_planner_status_queue',
# json.dumps(status_update_msg))
# send_message(
# 'ce_communicator_queue',
# 'ce_communicator_queue',
# json.dumps({
# 'action': 'task completed',
# 'identifier': variable,
# 'type': 'edit',
# 'status': 'CompletedActionStatus'}))
# if(opinion == 'xml'):
# a = mydb['tasks']
# xml_in = str(request.get_data(as_text=True))
# c = a.update_one(
# {"_id": ObjectId(variable)},
# {'$set': {'xml': xml_in}},
# upsert=True)
# send_message(
# 'ce_communicator_queue',
# 'ce_communicator_queue',
# json.dumps({
# 'action': 'verify task created',
# '_id': variable}))
# if(count == 1):
# mycol_other.update_one(
# {"task_id": variable, "type": result['result_type']},
# {'$set': {'status': "processing"}})
# elif task_status[0]['status'] != "complete" and opinion != 'xml':
# mycoll = mydb['results']
# query = {"task_id": variable, 'result_type': 'verify'}
# # query = {"task_id": variable, "opinion": True}
# mydoc = mycol.find(query)
# if(mydoc.count() == 1):
# mycol_other.update_one(
# {"task_id": variable, "type": result['result_type']},
# {'$set': {'status': "processing"}})
# # set status of controlaction to active
# send_message(
# 'ce_communicator_queue',
# 'ce_communicator_queue',
# json.dumps({
# 'action': 'task active',
# 'identifier': variable,
# 'type': 'verify',
# 'status': 'ActiveActionStatus'}))
# mycoll = mydb['results']
# query = {"task_id": variable, "opinion": True}
# if(mydoc.count() > 1):
# mycol_other.update_one(
# {"task_id": variable, "type": result['result_type']},
# {'$set': {'status': "complete"}})
# send_message(
# 'ce_communicator_queue',
# 'ce_communicator_queue',
# json.dumps({
# 'action': 'task completed',
# 'identifier': variable,
# 'type': 'verify',
# 'status': 'CompletedActionStatus'}))
resp = jsonify(success=True)
return resp
# display list of completed sheets
@app.route('/results', methods=['GET'])
def index_sheets():
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb[cfg.col_aggregated_result]
myquery = {}
mydoc = mycol.find(myquery)
sheets = []
for x in mydoc:
sheets.append(x)
return render_template('sheets.html', title='Home', sheets=sheets)
# display aggregated task results
@app.route('/results/<variable>', methods=['GET'])
def show_sheet(variable):
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb[cfg.col_aggregated_result]
myquery = {"_id": ObjectId(variable)}
mydoc = mycol.find(myquery)
sheet = mydoc[0]
xml = sheet['xml']
xml2 = re.sub(r'\s+', ' ', xml)
return render_template("sheet.html", sheet=sheet, xml=xml2)
# display aggregated task results
@app.route('/context/<variable>', methods=['GET'])
def show_page_context(variable):
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb[cfg.col_task_context]
myquery = {"task_id": ObjectId(variable)}
mydoc = mycol.find_one(myquery)
page_nr = mydoc['page_nr']
score = mydoc['score']
coords = mydoc['coords']
mycol2 = mydb[cfg.col_sheet]
myquery2 = {"name": score}
mydoc2 = mycol2.find_one(myquery2)
nr_pages = len(mydoc2['pages_path'])
mycol = mydb[cfg.col_task_context]
myquery = {"score": score, "page_nr": page_nr}
mydoc = mycol.find(myquery, {'_id': False, 'task_id': False})
tasks = []
for x in mydoc:
tasks.append(x)
return render_template("page_context.html", tasks=json.dumps(tasks), coords=json.dumps(coords), nr_pages=nr_pages)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def send_message(queue_name, routing_key, message):
connection = pika.BlockingConnection(
pika.ConnectionParameters(
host=rabbitmq_address.ip,
port=rabbitmq_address.port))
channel = connection.channel()
channel.queue_declare(queue=queue_name)
channel.basic_publish(exchange='', routing_key=routing_key, body=message)
connection.close()
# endpoint for uploading pdf that kicks off the demo use case
@app.route('/upload', methods=['POST', 'GET'])
def upload_sheet():
if request.method == 'POST':
# check if the post request has the file part
if 'file' not in request.files:
flash('No file part')
return redirect(request.url)
file = request.files['file']
mei = request.files['mei']
# if user does not select file, browser also
# submit an empty part without filename
if file.filename == '':
flash('No selected file')
return redirect(request.url)
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
data_folder = Path(app.config['UPLOAD_FOLDER'])
data_folder = data_folder / os.path.splitext(file.filename)[0]
pathlib.Path(data_folder).mkdir(parents=True, exist_ok=True)
uid = os.geteuid()
gid = os.getegid()
os.chown(data_folder, uid, gid)
data_folder = data_folder / "whole"
sheet_path = data_folder / filename
pathlib.Path(data_folder).mkdir(parents=True, exist_ok=True)
os.chown(data_folder, uid, gid)
file.save(os.path.join(data_folder, sheet_path))
os.chown(sheet_path, uid, gid)
mei_path = ''
if mei:
mei_filename = secure_filename(mei.filename)
mei_path = data_folder / mei_filename
mei.save(os.path.join(data_folder, mei_filename))
os.chown(mei_path, uid, gid)
# create entry into database
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb[cfg.col_sheet]
# copy file to omr_files
data_folder_temp = Path(UPLOAD_FOLDER_TEMP)
data_folder_temp.mkdir(parents=True, exist_ok=True)
os.chown(data_folder_temp, uid, gid)
pathlib.Path(data_folder_temp).mkdir(parents=True, exist_ok=True)
sheet_path_temp = data_folder_temp / filename
file.save(os.path.join(data_folder_temp, sheet_path_temp))
os.chown(sheet_path_temp, uid, gid)
sheet_path_temp = filename
result = {
"name": os.path.splitext(file.filename)[0],
"sheet_path": str(sheet_path),
"ts": datetime.now(),
"submitted_mei_path": str(mei_path),
"source" : "UI"
}
identifier = mycol.insert_one(result).inserted_id
# send message to omr_planner
message = {'score_name': os.path.splitext(filename)[0], '_id': str(identifier)}
send_message(
cfg.mq_omr_planner,
cfg.mq_omr_planner,
json.dumps(message))
return '''
<!doctype html>
<title>Upload new File</title>
<h1>PDF Uploaded successfully</h1>
'''
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Upload music score PDF</h1>
<form method=post enctype=multipart/form-data>
PDF: <br><input type=file name=file><br>
MEI(Optional): <br><input type=file name=mei>
<input type=submit value=Upload>
</form>
'''
@app.route('/uploads/<filename>')
def uploaded_file(filename):
return send_from_directory(
UPLOAD_FOLDER_TEMP,
filename)
@app.route('/upload/url_submit', methods=['POST', 'GET'])
def download_from_url():
print('Beginning file download with urllib2...')
if request.method == 'POST':
url = request.form['url']
a = urlparse(url)
filename = os.path.basename(a.path)
extension = os.path.splitext(filename)[-1][1:]
# check if the post request has the file part
if 'url' not in request.form:
flash('No file part')
return redirect(request.url)
if allowed_file(filename):
filename = secure_filename(filename)
path_whole_files = os.path.join(
app.config['UPLOAD_FOLDER'],
os.path.splitext(filename)[0])
pathlib.Path(path_whole_files).mkdir(parents=True, exist_ok=True)
uid = os.geteuid()
gid = os.getegid()
os.chown(path_whole_files, uid, gid)
path_whole_files = os.path.join(
path_whole_files,
"whole")
pathlib.Path(path_whole_files).mkdir(parents=True, exist_ok=True)
os.chown(path_whole_files, uid, gid)
sheet_path = os.path.join(path_whole_files, filename)
urllib.request.urlretrieve(url, sheet_path)
os.chown(sheet_path, uid, gid)
# create entry into database
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb[cfg.col_sheet]
# copy file to omr_files
data_folder_temp = Path(UPLOAD_FOLDER_TEMP)
pathlib.Path(data_folder_temp).mkdir(parents=True, exist_ok=True)
os.chown(data_folder_temp, uid, gid)
sheet_path_temp = data_folder_temp / filename
copyfile(sheet_path, sheet_path_temp)
os.chown(sheet_path_temp, uid, gid)
sheet_path_temp = filename
result = {
"name": os.path.splitext(filename)[0],
"description": request.form['description'],
"source": "UI",
"sheet_path": str(sheet_path),
"ts": datetime.now()
}
identifier = mycol.insert_one(result).inserted_id
# send message to omr_planner
message = {'score_name': os.path.splitext(filename)[0], '_id': str(identifier)}
send_message(
cfg.mq_omr_planner,
cfg.mq_omr_planner,
json.dumps(message))
return redirect(url_for('uploaded_file', filename=sheet_path_temp))
return '''
<!doctype html>
<title>Upload new File</title>
<h1>Submit music score PDF</h1>
<form method=post enctype=multipart/form-data>
Name: <br>
<input type=text name=name value="">
<br>
Description: <br>
<input type=text name=description value=""><br>
URL to Music Score PDF <br>
<input type=text name=url>
<input type=submit value=Submit>
</form>
'''
@app.route('/tasks', methods=['GET'])
def list_tasks():
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb['tasks_test2']
myquery = {}
mydoc = mycol.find(myquery)
tasks = []
for x in mydoc:
task = {
'id': str(x['_id']),
'name': x['name'],
'image_path': CURRENT_SERVER + 'static/' + x['image_path'],
'xml': x['xml']
}
tasks.append(task)
result = {
'tasks': tasks
}
return jsonify(result)
@app.route('/tasks/<variable>', methods=['GET'])
def get_task(variable):
myclient = pymongo.MongoClient(MONGO_SERVER.ip, MONGO_SERVER.port)
mydb = myclient[MONGO_DB]
mycol = mydb['tasks_test2']
myquery = {"_id": ObjectId(variable)}
mydoc = mycol.find_one(myquery)
result = {
'id': str(mydoc['_id']),
'name': mydoc['name'],
'image_path': CURRENT_SERVER + 'static/' + mydoc['image_path'],
'xml': mydoc['xml']
}
return jsonify(result)
if __name__ == "__main__":
app.debug = True
print('in the main')
if cfg.use_cert:
context = ('crowdmanager_eu.crt', 'crowdmanager.eu.key')
app.run(host='0.0.0.0', port=443, ssl_context=context)
else:
app.secret_key = os.urandom(24)
app.run(host='0.0.0.0', port=443)
| 2.234375
| 2
|
code.py
|
kasthuri28/hacktoberithms
| 0
|
12777285
|
<filename>code.py
const assert = require('assert')
function checkout_time(customers, n_cashier) {
let cashiers = Array(n_cashier).fill(0)
customers.forEach(customer => {
const minIndex = cashiers.reduce((accIdx, current, index) => {
return current < cashiers[accIdx] ? index : accIdx
}, 0)
cashiers[minIndex] = cashiers[minIndex] + parseInt(customer)
});
return Math.max(...cashiers)
}
function explicitTest() {
cases = [
[[[5, 1, 3], 1], 9],
[[[10, 3, 4, 2], 2], 10]
]
result = cases.reduce((acc, current, idx) => {
res = checkout_time(...current[0])
console.log("[Test case", idx, "] Inputs are ", current[0],
", expected result is ", current[1],
", result is ", res,
" : ", res === current[1] ? "PASS" : "FAILED");
return acc && res === current[1]
}, true)
result ? console.log("All tests passed successfully") : console.log("Some tests failed")
}
function assertionTest() {
cases = [
[[[5, 1, 3], 1], 9],
[[[10, 3, 4, 2], 2], 10]
]
cases.forEach((c) => {
assert(checkout_time(...c[0]) === c[1], "Tests failed")
})
}
assertionTest()
explicitTest()
| 2.828125
| 3
|
urls.py
|
princeofdatamining/blueking-sample
| 0
|
12777286
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
urls config
"""
from django.urls import include, path
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from django.conf import settings
prefix = settings.SITE_URL.lstrip('/')
# 公共URL配置
urlpatterns = [
path(prefix + 'admin/', admin.site.urls),
path(prefix + 'auth/', include('rest_framework.urls')),
path(prefix, include('home_application.urls')),
]
| 1.4375
| 1
|
altair_examples/simple_line_chart.py
|
progressivis/altair_examples
| 1
|
12777287
|
<filename>altair_examples/simple_line_chart.py
"""
Simple Line Chart
-----------------
This chart shows the most basic line chart, made from a dataframe with two
columns.
"""
# category: simple charts
import altair as alt
import numpy as np
x = np.arange(100)
source = alt.pd.DataFrame({"x": x, "f(x)": np.sin(x / 5)})
alt.Chart(source).mark_line().encode(x="x", y="f(x)")
| 3.3125
| 3
|
labelfactory/Log.py
|
Orieus/one_def_classification
| 0
|
12777288
|
# -*- coding: utf-8 -*-
"""
Created on Fri May 22 08:52:50 2015
@author: sblanco
Modified by jcid to log messages to standard output
"""
import logging
import sys
class Log:
__logger__ = None
__error__ = False
def __init__(self, path, crear=False):
try:
self.__logger__ = logging.getLogger(__name__)
self.__logger__ .setLevel(logging.DEBUG)
# create a file handler
# mode w create new file:
if crear is True:
handler = logging.FileHandler(path, mode='w')
else:
handler = logging.FileHandler(path)
handler.setLevel(logging.DEBUG)
# create a logging format
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
self.__logger__.addHandler(handler)
# Add
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
self.__logger__.addHandler(ch)
except:
self.__error__ = True
def debug(self, msg):
if (self.__error__):
print('DEBUG: {}'.format(msg))
else:
self.__logger__.debug(msg)
def info(self, msg):
if (self.__error__):
print('INFO: {}'.format(msg))
else:
self.__logger__.info(msg)
def warn(self, msg):
if (self.__error__):
print('WARN: {}'.format(msg))
else:
self.__logger__.warn(msg)
def error(self, msg):
if (self.__error__):
print('ERROR: {}'.format(msg))
else:
self.__logger__.error(msg)
def critical(self, msg):
if (self.__error__):
print('CRITICAL: {}'.format(msg))
else:
self.__logger__.critical(msg)
| 2.984375
| 3
|
code/stats.py
|
Aklaran/trickingGame
| 0
|
12777289
|
<reponame>Aklaran/trickingGame
from direct.gui.DirectGui import *
from menu import Menu
class Stats(Menu):
def __init__(self):
super().__init__()
self.parentNode = aspect2d.attachNewNode('Stats')
self.backButton = DirectButton(text=("back"), scale = 0.25,
command=self.switchToMainMenu, parent=base.a2dTopLeft,
pos=(0.275,0,-0.225))
self.p1StatsList = None
self.p2StatsList = None
if base.player1.hasName():
self.p1StatsList = self.createStatsList(base.player1)
if base.player2.hasName():
self.p2StatsList = self.createStatsList(base.player2)
def createStatsList(self, player):
## Following code block adapted from:
# https://moguri.github.io/panda-sphinx/programming-with-panda3d/directgui/directscrolledlist.html
numItemsVisible = 7
itemHeight = 0.11
if player == base.player1: listPos = (-1,0,0)
elif player == base.player2: listPos = (0.25,0,0)
statsList = DirectScrolledList(
decButton_pos=(0.35, 0, 0.53),
decButton_text="Dec",
decButton_text_scale=0.04,
decButton_borderWidth=(0.005, 0.005),
incButton_pos=(0.35, 0, -0.35),
incButton_text="Inc",
incButton_text_scale=0.04,
incButton_borderWidth=(0.005, 0.005),
frameSize=(0.0, 0.7, -0.40, 0.59),
frameColor=(0.5, 0.5, 0.5, 0.5),
pos=listPos,
numItemsVisible=numItemsVisible,
forceHeight=itemHeight,
itemFrame_frameSize=(-0.3, 0.3, -0.70, 0.11),
itemFrame_pos=(0.35, 0, 0.4),
parent=self.parentNode
)
nameAndLevel = player.getName() + " : lv" + player.getLevel()
nameLabel = DirectLabel(text=nameAndLevel,text_scale=0.1)
statsList.addItem(nameLabel)
stamLabel = DirectLabel(text=("stamina : " + player.getTotalStam()), text_scale=0.1)
statsList.addItem(stamLabel)
skillDict = player.getSkillDict()
for trick in skillDict:
s = str(trick + ": " + str(int(skillDict[trick])))
l = DirectLabel(text=s, text_scale=0.1)
statsList.addItem(l)
## End cited code block
return statsList
def destroy(self):
self.parentNode.removeNode()
self.backButton.removeNode()
if self.p1StatsList: self.p1StatsList.removeNode()
if self.p2StatsList: self.p2StatsList.removeNode()
| 2.453125
| 2
|
Day-111/list_index.py
|
arvimal/100DaysofCode-Python
| 1
|
12777290
|
#!/usr/bin/env python3
# Find the middle element in the list.
# Create a function called middle_element that has one parameter named lst.
# If there are an odd number of elements in lst, the function
# should return the middle element.
# If there are an even number of elements, the function should
# return the average of the middle two elements.
def middle_element(lst):
if len(lst) % 2 == 0:
sum = lst[int(len(lst)/2)] + lst[int(len(lst)/2) - 1]
return sum / 2
else:
return lst[int(len(lst)/2)]
print(middle_element([5, 2, -10, -4, 4, 5]))
| 4.3125
| 4
|
Z3RO discord spammer/zspam.py
|
freebobuxsite/Z3RO-SERIES
| 0
|
12777291
|
<reponame>freebobuxsite/Z3RO-SERIES
import requests
import random
import time
"""
import requests
def join(token, server_invite):
header = {"authorization": token}
r = requests.post("https://discord.com/api/v8/invites/{}".format(server_invite), headers=header)
"""
tokens = []
am = int(input("Enter the amount of tokens you have/want to use."))
amn = am + 1
for y in range(1, amn):
token = str(input("Token {}: ".format(y)))
tokens.append(token)
qu = int(input("Do you want to join a specific server? \n1: Yes \n2: No"))
if (qu == 1):
invite = input("Enter server invite : ")
def join():
global auth
for auth in tokens:
header = {
'authorization': auth
}
requests.post("https://discord.com/api/v8/invites/{}".format(invite), headers=header)
print("Successfully server with {}".format(auth))
join()
print("Enter message: ")
message = input()
print("Enter channel ID")
chanID = input()
payload = {
'content': message
}
while True:
t = random.choice(tokens)
header = {
'authorization': t
}
url = 'https://discord.com/api/v8/channels/{}/messages'.format( chanID )
requests.post(url, headers=header, data=payload)
else:
print("Enter message: ")
message = input()
print("Enter channel ID")
chanID = input()
payload = {
'content': message
}
while True:
t = random.choice(tokens);header = {"authorization": t}
header = {
'authorization': t
}
url = 'https://discord.com/api/v8/channels/{}/messages'.format( chanID )
requests.post(url, headers=header, data=payload)
| 3.0625
| 3
|
code_dev/ipynbs/feature_visualizer.py
|
liud16/capstone18
| 0
|
12777292
|
import matplotlib.pyplot as plt
import pandas as pd
def visualize(peak_dict):
for i in range(len(peak_dict)):
df = pd.DataFrame(peak_dict['peak_%s' % i],
columns=['Position', 'Height', 'Width', 'Time'])
plt.subplot(3, 1, 1)
plt.plot(df['Time'], df['Height'])
plt.title('Peak %s Dynamics' % (i+1))
plt.ylabel('Intensity')
plt.subplot(3, 1, 2)
plt.plot(df['Time'], df['Position'])
plt.ylabel('Position')
plt.subplot(3, 1, 3)
plt.plot(df['Time'], df['Width'])
plt.ylabel('Width')
plt.xlabel('Time')
plt.show()
return
| 3.484375
| 3
|
frontend/manageFrontendDowntimes.py
|
ddbox/glideinwms
| 0
|
12777293
|
#!/usr/bin/env python3
# SPDX-FileCopyrightText: 2009 Fermi Research Alliance, LLC
# SPDX-License-Identifier: Apache-2.0
import os
import os.path
import re
import string
import sys
import time
from glideinwms.frontend import glideinFrontendConfig, glideinFrontendDowntimeLib
def usage():
print("Usage:")
print(" manageFrontendDowntimes.py -dir frontend_dir -cmd [command] [options]")
print("where command is one of:")
print(" add - Add a scheduled downtime period")
print(" down - Put the factory down now(+delay)")
print(" up - Get the factory back up now(+delay)")
print(" check - Report if the factory is in downtime now(+delay)")
print("Other options:")
print(" -start [[[YYYY-]MM-]DD-]HH:MM[:SS] (start time for adding a downtime)")
print(" -end [[[YYYY-]MM-]DD-]HH:MM[:SS] (end time for adding a downtime)")
print(" -delay [HHh][MMm][SS[s]] (delay a downtime for down, up, and check cmds)")
# [[[YYYY-]MM-]DD-]HH:MM[:SS]
def strtxt2time(timeStr):
deftime = time.localtime(time.time())
year = deftime[0]
month = deftime[1]
day = deftime[2]
seconds = 0
darr = timeStr.split("-") # [[[YYYY-]MM-]DD-]HH:MM[:SS]
if len(darr) > 1: # we have at least part of the date
timeStr = darr[-1]
day = int(darr[-2])
if len(darr) > 2:
month = int(darr[-3])
if len(darr) > 3:
year = int(darr[-4])
tarr = timeStr.split(":")
hours = int(tarr[0])
minutes = int(tarr[1])
if len(tarr) > 2:
seconds = int(tarr[2])
outtime = time.mktime((year, month, day, hours, minutes, seconds, 0, 0, -1))
return outtime # this is epoch format
# [[[YYYY-]MM-]DD-]HH:MM[:SS]
# or
# unix_time
def str2time(timeStr):
if len(timeStr.split(":", 1)) > 1:
return strtxt2time(timeStr) # has a :, so it must be a text representation
else:
print(timeStr)
return int(timeStr) # should be a simple number
# [HHh][MMm][SS[s]]
def delay2time(delayStr):
hours = 0
minutes = 0
seconds = 0
# getting hours
harr = delayStr.split("h", 1)
if len(harr) == 2:
hours = int(harr[0])
delayStr = harr[1]
# getting minutes
marr = delayStr.split("m", 1)
if len(marr) == 2:
minutes = int(marr[0])
delayStr = marr[1]
# getting seconds
if delayStr[-1:] == "s":
delayStr = delayStr[:-1] # remove final s if present
if len(delayStr) > 0:
seconds = int(delayStr)
return seconds + 60 * (minutes + 60 * hours)
def get_downtime_fd(work_dir):
frontendDescript = glideinFrontendConfig.FrontendDescript(work_dir)
fd = glideinFrontendDowntimeLib.DowntimeFile(os.path.join(work_dir, frontendDescript.data["DowntimesFile"]))
return fd
# major commands
def add(opt_dict):
# glideinFrontendDowntimeLib.DowntimeFile( self.elementDescript.frontend_data['DowntimesFile'] )
down_fd = get_downtime_fd(opt_dict["dir"])
start_time = str2time(opt_dict["start"])
end_time = str2time(opt_dict["end"])
down_fd.addPeriod(start_time=start_time, end_time=end_time)
return 0
# this calls checkDowntime(with delayed_start_time ) first and then startDowntime(with delayed_start_time and end_time)
def down(opt_dict):
down_fd = get_downtime_fd(opt_dict["dir"])
when = delay2time(opt_dict["delay"])
if opt_dict["start"] == "None":
when += int(time.time())
else:
# delay applies only to the start time
when += str2time(opt_dict["start"])
if opt_dict["end"] == "None":
end_time = None
else:
end_time = str2time(opt_dict["end"])
if not down_fd.checkDowntime(check_time=when):
# only add a new line if not in downtime at that time
return down_fd.startDowntime(start_time=when, end_time=end_time)
else:
print("Frontend is already down. ")
return 0
# calls endDowntime( with end_time only )
def up(opt_dict):
down_fd = get_downtime_fd(opt_dict["dir"])
when = delay2time(opt_dict["delay"])
if opt_dict["end"] == "None":
when += int(time.time())
else:
# delay applies only to the end time
when += str2time(opt_dict["end"])
rtn = down_fd.endDowntime(end_time=when)
if rtn > 0:
return 0
else:
print("Frontend is not in downtime.")
return 1
def printtimes(opt_dict):
down_fd = get_downtime_fd(opt_dict["dir"])
when = delay2time(opt_dict["delay"]) + int(time.time())
down_fd.printDowntime(check_time=when)
def get_args(argv):
opt_dict = {"comment": "", "sec": "All", "delay": "0", "end": "None", "start": "None", "frontend": "All"}
index = 0
for arg in argv:
if len(argv) <= index + 1:
continue
if arg == "-cmd":
opt_dict["cmd"] = argv[index + 1]
if arg == "-dir":
opt_dict["dir"] = argv[index + 1]
if arg == "-start":
opt_dict["start"] = argv[index + 1]
if arg == "-end":
opt_dict["end"] = argv[index + 1]
if arg == "-delay":
opt_dict["delay"] = argv[index + 1]
index = index + 1
return opt_dict
def main(argv):
if len(argv) < 3:
usage()
return 1
# Get the command line arguments
opt_dict = get_args(argv)
try:
frontend_dir = opt_dict["dir"]
cmd = opt_dict["cmd"]
except KeyError as e:
usage()
print("-cmd -dir argument is required.")
return 1
try:
os.chdir(frontend_dir)
except OSError as e:
usage()
print("Failed to locate factory %s" % frontend_dir)
print("%s" % e)
return 1
if cmd == "add":
return add(opt_dict)
elif cmd == "down":
return down(opt_dict)
elif cmd == "up":
return up(opt_dict)
elif cmd == "check":
return printtimes(opt_dict)
else:
usage()
print("Invalid command %s" % cmd)
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv))
| 2.328125
| 2
|
airbnb.py
|
johnliu4/kaggle-airbnb-ny
| 0
|
12777294
|
import csv
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as tck
from PIL import Image
# for testing purposes, remove this later!
from sys import exit
"""Data visualization on the Airbnb New York dataset from Kaggle.
The dataset provides 16 pieces of data in the following order:
0: id
1: name
2: host_id
3: host_name
4: neighbourhood_group
5: neighbourhood
6: latitude
7: longitude
8: room_type
9: price
10: minimum_nights
11: number_of_reviews
12: last_review
13: reviews_per_month
14: calculated_host_listings_count
15: availability_365
All fields are fairly self-explanatory. I will not be using the 'id' or the
'host_id' field since they are not relevant, and the 'name' field since it does
not make sense to in this context.
This project is fully open source and free to use and share. Enjoy!
"""
header = []
data = {}
num_columns = 16
num_entries = 0
with open('new_york_data.csv', encoding='utf-8') as csv_file:
reader = csv.reader(csv_file, delimiter=',')
# read the header
header = next(reader)
# read the entries
body = []
for row in reader:
body.append(row)
num_entries = len(body)
# parse the entries into np arrays and store them under in the data list
for i in range(num_columns):
dtype = 'str'
# price, minimum nights, number of reviews
# calculated host listings count, annual availability
if i == 9 or i == 10 or i == 11 or i == 14 or i == 15:
dtype = 'int64'
# latitude, longitude, review per month
if i == 6 or i == 7 or i == 13:
dtype = 'float64'
# reviews per month is blank sometimes in the original dataset
if i == 13:
# numpy cannot process empty strings to floats; so check for this
col_data = np.asarray([body[j][i] if len(body[j][i]) > 0 else 0.0 for j in range(num_entries)], dtype=dtype)
else:
col_data = np.asarray([body[j][i] for j in range(num_entries)], dtype=dtype)
data[header[i]] = col_data
# Area that the cover maps; experimentally determined
# (latitude, longitude)
min_coords = (40.49279, -74.26442)
max_coords = (40.91906, -73.68299)
long_range = max_coords[1] - min_coords[1]
lat_range = max_coords[0] - min_coords[0]
image_extent = (min_coords[1], max_coords[1], min_coords[0], max_coords[0])
new_york_img = Image.open('new_york_map.png')
# use large figure sizes
matplotlib.rcParams['figure.figsize'] = (12, 7)
# Room Type Bar Graph
room_types, room_types_count = np.unique(data['room_type'], return_counts=True)
plt.title('Distribution of Room Types')
room_types_norm = room_types_count / sum(room_types_count)
plt.barh(room_types, room_types_norm)
ax = plt.gca()
ax.xaxis.set_major_formatter(tck.FuncFormatter(lambda x, _: '{:.0%}'.format(x)))
plt.show()
# Neighbourhood Groups
n_groups, n_groups_count = np.unique(data['neighbourhood_group'], return_counts=True)
n_groups_colors = ['#1a535c', '#4ecdc4', '#b2ff66', '#ff6b6b', '#ffe66d']
explode = np.zeros((len(n_groups),), dtype='float64')
for idx, group in enumerate(n_groups):
if group == 'Manhattan':
explode[idx] = 0.1
break
plt.title('Distribution of Neighbourhood Groups')
wedges, texts, _ = plt.pie(
n_groups_count,
labels=n_groups,
explode=explode,
autopct='%1.1f%%',
pctdistance=0.8,
colors=n_groups_colors)
plt.show()
# Neighbourhoods
nbhs, nbhs_count = np.unique(data['neighbourhood'], return_counts=True)
# zip the neighbourhood name and count into a tuple to sort by count
nbhs_sorted_tuples = sorted(list(zip(nbhs, nbhs_count)), key=lambda elem: elem[1], reverse=True)
# unzip the sorted tuples back into a list of names and a list of counts
nbhs_sorted, nbhs_sorted_count = list(zip(*nbhs_sorted_tuples))
# take only the top 20
nbhs_sorted = nbhs_sorted[:20]
nbhs_sorted_count = nbhs_sorted_count[:20]
nbhs_price_avgs = []
for nbh in nbhs_sorted:
prices = data['price'][data['neighbourhood'] == nbh]
nbhs_price_avgs.append(np.average(prices))
fig, ax1 = plt.subplots()
plt.title('Most Popular Neighbourhoods and Average Price')
# pad the bottom of the plot to prevent text clipping
plt.subplots_adjust(bottom=0.2)
# rotate the labels so that they are easier to read
ax1.set_xticklabels(nbhs_sorted, rotation=45, ha='right')
ax1.set_xlabel('Neighbourhood');
# plot number of places on the left y-axis
ax1.bar(nbhs_sorted, nbhs_sorted_count, width=-0.2, align='edge')
ax1.set_ylabel('Number of places (blue)')
# plot average price on the right y-axis
ax2 = ax1.twinx()
ax2.bar(nbhs_sorted, nbhs_price_avgs, width=0.2, align='edge', color='orange')
ax2.set_ylabel('Average price (orange)')
plt.show()
# Price Histogram
group_prices = []
# separate the price data based on neighbourhood groups
for group in n_groups:
group_prices.append(data['price'][data['neighbourhood_group'] == group])
# plot the price data for each group separately as stacked bars
# use only prices less than 500 since most of the data belongs in this range
# this also lets us not worry about huge outliers (there are a few places whose
# nightly price is in the many thousands)
plt.hist(
group_prices,
histtype='barstacked',
bins=25,
range=(0, 500),
edgecolor='white',
color=n_groups_colors)
plt.legend(n_groups, loc='upper right')
plt.title('Distribution of Price per Night')
plt.xlim(0, 500)
plt.ylabel('Number of places')
plt.xlabel('Price range (USD)')
plt.show()
# Average Price Heatmap
# compute the average pricing over a grid of 150 by 150
price_heatmap_bins = 150
price_heatmap_sum = np.zeros((price_heatmap_bins, price_heatmap_bins), dtype='float64')
price_heatmap_count = np.zeros((price_heatmap_bins, price_heatmap_bins), dtype='float64')
for long, lat, price in zip(data['longitude'], data['latitude'], data['price']):
# take only prices below 500 to be consistent with price histogram
if price < 500:
idx_long = int((long - min_coords[1]) / long_range * price_heatmap_bins)
idx_lat = int((lat - min_coords[0]) / lat_range * price_heatmap_bins)
price_heatmap_sum[idx_lat, idx_long] += price
price_heatmap_count[idx_lat, idx_long] += 1
# ensure that a divide by zero will not occur
price_heatmap_count = np.clip(price_heatmap_count, 1, None)
price_heatmap = price_heatmap_sum / price_heatmap_count
plt.imshow(new_york_img, extent=image_extent)
plt.imshow(price_heatmap, extent=image_extent, origin='lower', alpha=0.9)
plt.colorbar()
plt.title('Average Price per Night Heatmap')
plt.show()
# Housing Scatter Plot
plt.imshow(new_york_img, extent=image_extent)
# divide locations based on groups and display them as a scatter on the New York map
for group, color in zip(n_groups, n_groups_colors):
plt.scatter(
data['longitude'][data['neighbourhood_group'] == group],
data['latitude'][data['neighbourhood_group'] == group],
s=2,
color=color)
plt.legend(n_groups, loc='upper left', markerscale=5)
plt.title('Plot of Housing Locations')
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.show()
# Housing Heatmap
plt.imshow(new_york_img, extent=image_extent)
plt.hist2d(data['longitude'], data['latitude'], bins=150, alpha=0.7)
plt.title('Heatmap of Housing Locations')
plt.colorbar()
plt.xlabel('Longitude')
plt.ylabel('Latitude')
plt.show()
# Minimum Nights Distribution
group_min_nights = []
# separate the price data based on neighbourhood groups
for group in n_groups:
group_min_nights.append(data['minimum_nights'][data['neighbourhood_group'] == group])
# plot the price data for each group separately as stacked bars
plt.hist(
group_min_nights,
histtype='barstacked',
bins=20,
range=(1, 21),
edgecolor='white',
color=n_groups_colors)
plt.title('Minimum Number of Nights Required')
plt.legend(n_groups, loc='upper right')
plt.xlim(1, 21)
plt.xticks(np.arange(1, 21))
plt.xlabel('Minimum Nights')
plt.ylabel('Number of Places')
plt.show()
# Number of Reviews
# compute the average number of reviews over a grid of 150 by 150
num_reviews_bins = 150
num_reviews_sum = np.zeros((num_reviews_bins, num_reviews_bins), dtype='float64')
num_reviews_count = np.zeros((num_reviews_bins, num_reviews_bins), dtype='float64')
for long, lat, price in zip(data['longitude'], data['latitude'], data['number_of_reviews']):
idx_long = int((long - min_coords[1]) / long_range * num_reviews_bins)
idx_lat = int((lat - min_coords[0]) / lat_range * num_reviews_bins)
num_reviews_sum[idx_lat, idx_long] += price
num_reviews_count[idx_lat, idx_long] += 1
# ensure that a divide by zero will not occur
num_reviews_count = np.clip(num_reviews_count, 1, None)
num_reviews = num_reviews_sum / num_reviews_count
plt.imshow(new_york_img, extent=image_extent)
plt.imshow(num_reviews, extent=image_extent, origin='lower', alpha=0.9)
plt.colorbar()
plt.title('Average Number of Reviews Heatmap')
plt.show()
| 3.234375
| 3
|
musicscore/musicxml/attributes/textdecoration.py
|
alexgorji/music_score
| 2
|
12777295
|
from musicscore.musicxml.attributes.attribute_abstract import AttributeAbstract
class Underline(AttributeAbstract):
""""""
def __init__(self, underline=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generate_attribute('underline', underline, "TypeNumberOfLines")
class Overline(AttributeAbstract):
""""""
def __init__(self, overline=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generate_attribute('overline', overline, "TypeNumberOfLines")
class LineThrough(AttributeAbstract):
""""""
def __init__(self, line_through=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.generate_attribute('line-through', line_through, "TypeNumberOfLines")
class TextDecoration(Underline, Overline, LineThrough):
"""
The text-decoration attribute group is based on the similar feature in XHTML and CSS. It allows for text to be
underlined, overlined, or struck-through. It extends the CSS version by allow double or triple lines instead of just
being on or off.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
| 3.25
| 3
|
src/camera.py
|
trenchant7/Face_Recognition
| 9
|
12777296
|
# -*- coding: utf-8 -*-
import cv2
import argparse
import time
import numpy as np
from training import Model
classes = []
FRAME_SIZE = 256
font = cv2.FONT_HERSHEY_SIMPLEX
switch = False
def detect(image):
crop_image = image[112:112 + FRAME_SIZE, 192:192 + FRAME_SIZE]
result = model.predict(crop_image)
index = np.argmax(result)
cv2.putText(image, classes[index], (192, 112), font, 1, (0, 255, 0), 2)
def crop_save(image):
crop_image = image[112 + 2:112 + FRAME_SIZE - 2, 192 + 2:192 + FRAME_SIZE - 2]
timestamp = str(time.time())
cv2.imwrite(
'C:\\Users\Akira.DESKTOP-HM7OVCC\Desktop\database\\' + timestamp + '.png',
crop_image,
(cv2.IMWRITE_PNG_COMPRESSION, 0)
)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir',
type=str,
help='folder contains model and labels'
)
args = parser.parse_args()
if args.model_dir:
model = Model()
try:
model.load(file_path=args.model_dir + '\model.h5')
with open(args.model_dir + '\labels.txt', 'r') as f:
for line in f.readlines():
classes.append(line.strip())
except OSError as e:
print("<--------------------Unable to open file-------------------->\n", e)
else:
cv2.namedWindow('Video')
# open le camera
capture = cv2.VideoCapture(0)
while capture.isOpened():
_, frame = capture.read()
cv2.rectangle(frame, (192, 112), (192 + FRAME_SIZE, 112 + FRAME_SIZE), (0, 255, 0), 2)
if switch:
detect(frame)
cv2.imshow('Video', frame)
key = cv2.waitKey(10)
if key == ord('z'):
switch = True
elif key == ord('d'):
switch = False
elif key == ord('s'):
crop_save(frame)
elif key == ord('q'): # exit
break
capture.release()
cv2.destroyWindow('Video')
else:
print('Input no found\nTry "python predict.py -h" for more information')
| 2.859375
| 3
|
advent2017_day2.py
|
coandco/advent2017
| 0
|
12777297
|
<filename>advent2017_day2.py
INPUT = """1919 2959 82 507 3219 239 3494 1440 3107 259 3544 683 207 562 276 2963
587 878 229 2465 2575 1367 2017 154 152 157 2420 2480 138 2512 2605 876
744 6916 1853 1044 2831 4797 213 4874 187 6051 6086 7768 5571 6203 247 285
1210 1207 1130 116 1141 563 1056 155 227 1085 697 735 192 1236 1065 156
682 883 187 307 269 673 290 693 199 132 505 206 231 200 760 612
1520 95 1664 1256 685 1446 253 88 92 313 754 1402 734 716 342 107
146 1169 159 3045 163 3192 1543 312 161 3504 3346 3231 771 3430 3355 3537
177 2129 3507 3635 2588 3735 3130 980 324 266 1130 3753 175 229 517 3893
4532 164 191 5169 4960 3349 3784 3130 5348 5036 2110 151 5356 193 1380 3580
2544 3199 3284 3009 3400 953 3344 3513 102 1532 161 143 2172 2845 136 2092
194 5189 3610 4019 210 256 5178 4485 5815 5329 5457 248 5204 4863 5880 3754
3140 4431 4534 4782 3043 209 216 5209 174 161 3313 5046 1160 160 4036 111
2533 140 4383 1581 139 141 2151 2104 2753 4524 4712 866 3338 2189 116 4677
1240 45 254 1008 1186 306 633 1232 1457 808 248 1166 775 1418 1175 287
851 132 939 1563 539 1351 1147 117 1484 100 123 490 152 798 1476 543
1158 2832 697 113 121 397 1508 118 2181 2122 809 2917 134 2824 3154 2791"""
def calc_checksum(num_list):
return max(num_list) - min(num_list)
def calc_checksum_two(num_list):
for i, first in enumerate(num_list):
for j, second in enumerate(num_list):
if i == j:
continue
if first % second == 0 and first / second > 0:
return first / second
checksums = []
new_checksums = []
for line in INPUT.split("\n"):
checksums.append(calc_checksum([int(x) for x in line.strip().split()]))
new_checksums.append(calc_checksum_two([int(x) for x in line.strip().split()]))
print("Line checksums: %r" % checksums)
print("New line checksums: %r" % new_checksums)
print("Total checksum: %d" % sum(checksums))
print("Total new checksum: %d" % sum(new_checksums))
| 2.140625
| 2
|
hknweb/alumni/migrations/0003_auto_20220303_1955.py
|
jyxzhang/hknweb
| 0
|
12777298
|
<filename>hknweb/alumni/migrations/0003_auto_20220303_1955.py
# Generated by Django 2.2.8 on 2022-03-04 03:55
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('alumni', '0002_auto_20220228_1252'),
]
operations = [
migrations.AlterField(
model_name='alumnus',
name='salary',
field=models.IntegerField(blank=True, default=0, validators=[django.core.validators.MinValueValidator(0)]),
),
]
| 1.421875
| 1
|
sockets/bank_socket.py
|
aneeshads/EPAi4.0-Capstone
| 0
|
12777299
|
import bpy
import threading, time
from bpy.props import IntProperty, FloatProperty, StringProperty, FloatVectorProperty, CollectionProperty, EnumProperty
from bpy.types import NodeTree, Node, NodeSocket
class MyCustomSocketBank(NodeSocket):
'''Custom node socket type for creating data input points for bank information.'''
bl_idname = 'CustomSocketTypeBank'
bl_label = "Bank Information"
def update_bank_socket(self, context):
'''This function updates the output of the current node.'''
self.node.update()
bank_country: bpy.props.BoolProperty(name="Bank Country", update=update_bank_socket)
bank_items = (
('BBAN', "BBAN", "Basic Bank Account Number"),
('IBAN', "IBAN", "International Bank Account Number"),
)
bank_type: bpy.props.EnumProperty(
name="Account Type",
description="Choose the account information required",
items=bank_items,
default='BBAN',
update=update_bank_socket
)
def draw(self, context, layout, node, text):
'''This function creates the labels for the socket panels within the node.'''
if self.is_output or self.is_linked:
layout.label(text=text)
else:
layout.label(text="Bank")
layout.prop(self, "bank_country", text="Country")
layout.prop(self, "bank_type", text="Account Type")
def draw_color(self, context, node):
'''This function determines the colour of the input and output points within the socket.'''
return (1.0, 0.4, 0.216, 0.5)
| 2.734375
| 3
|
sdk/python/pulumi_azure_native/network/v20151101/outputs.py
|
sebtelko/pulumi-azure-native
| 0
|
12777300
|
<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'DnsConfigResponse',
'EndpointResponse',
'MonitorConfigResponse',
]
@pulumi.output_type
class DnsConfigResponse(dict):
"""
Class containing DNS settings in a Traffic Manager profile.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "relativeName":
suggest = "relative_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DnsConfigResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DnsConfigResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DnsConfigResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
fqdn: Optional[str] = None,
relative_name: Optional[str] = None,
ttl: Optional[float] = None):
"""
Class containing DNS settings in a Traffic Manager profile.
:param str fqdn: Gets or sets the fully-qualified domain name (FQDN) of the Traffic Manager profile. This is formed from the concatenation of the RelativeName with the DNS domain used by Azure Traffic Manager.
:param str relative_name: Gets or sets the relative DNS name provided by this Traffic Manager profile. This value is combined with the DNS domain name used by Azure Traffic Manager to form the fully-qualified domain name (FQDN) of the profile.
:param float ttl: Gets or sets the DNS Time-To-Live (TTL), in seconds. This informs the local DNS resolvers and DNS clients how long to cache DNS responses provided by this Traffic Manager profile.
"""
if fqdn is not None:
pulumi.set(__self__, "fqdn", fqdn)
if relative_name is not None:
pulumi.set(__self__, "relative_name", relative_name)
if ttl is not None:
pulumi.set(__self__, "ttl", ttl)
@property
@pulumi.getter
def fqdn(self) -> Optional[str]:
"""
Gets or sets the fully-qualified domain name (FQDN) of the Traffic Manager profile. This is formed from the concatenation of the RelativeName with the DNS domain used by Azure Traffic Manager.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="relativeName")
def relative_name(self) -> Optional[str]:
"""
Gets or sets the relative DNS name provided by this Traffic Manager profile. This value is combined with the DNS domain name used by Azure Traffic Manager to form the fully-qualified domain name (FQDN) of the profile.
"""
return pulumi.get(self, "relative_name")
@property
@pulumi.getter
def ttl(self) -> Optional[float]:
"""
Gets or sets the DNS Time-To-Live (TTL), in seconds. This informs the local DNS resolvers and DNS clients how long to cache DNS responses provided by this Traffic Manager profile.
"""
return pulumi.get(self, "ttl")
@pulumi.output_type
class EndpointResponse(dict):
"""
Class representing a Traffic Manager endpoint.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "endpointLocation":
suggest = "endpoint_location"
elif key == "endpointMonitorStatus":
suggest = "endpoint_monitor_status"
elif key == "endpointStatus":
suggest = "endpoint_status"
elif key == "minChildEndpoints":
suggest = "min_child_endpoints"
elif key == "targetResourceId":
suggest = "target_resource_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in EndpointResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
EndpointResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
EndpointResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
endpoint_location: Optional[str] = None,
endpoint_monitor_status: Optional[str] = None,
endpoint_status: Optional[str] = None,
id: Optional[str] = None,
min_child_endpoints: Optional[float] = None,
name: Optional[str] = None,
priority: Optional[float] = None,
target: Optional[str] = None,
target_resource_id: Optional[str] = None,
type: Optional[str] = None,
weight: Optional[float] = None):
"""
Class representing a Traffic Manager endpoint.
:param str endpoint_location: Specifies the location of the external or nested endpoints when using the ‘Performance’ traffic routing method.
:param str endpoint_monitor_status: Gets or sets the monitoring status of the endpoint.
:param str endpoint_status: Gets or sets the status of the endpoint.. If the endpoint is Enabled, it is probed for endpoint health and is included in the traffic routing method. Possible values are 'Enabled' and 'Disabled'.
:param str id: Gets or sets the ID of the Traffic Manager endpoint.
:param float min_child_endpoints: Gets or sets the minimum number of endpoints that must be available in the child profile in order for the parent profile to be considered available. Only applicable to endpoint of type 'NestedEndpoints'.
:param str name: Gets or sets the name of the Traffic Manager endpoint.
:param float priority: Gets or sets the priority of this endpoint when using the ‘Priority’ traffic routing method. Possible values are from 1 to 1000, lower values represent higher priority. This is an optional parameter. If specified, it must be specified on all endpoints, and no two endpoints can share the same priority value.
:param str target: Gets or sets the fully-qualified DNS name of the endpoint. Traffic Manager returns this value in DNS responses to direct traffic to this endpoint.
:param str target_resource_id: Gets or sets the Azure Resource URI of the of the endpoint. Not applicable to endpoints of type 'ExternalEndpoints'.
:param str type: Gets or sets the endpoint type of the Traffic Manager endpoint.
:param float weight: Gets or sets the weight of this endpoint when using the 'Weighted' traffic routing method. Possible values are from 1 to 1000.
"""
if endpoint_location is not None:
pulumi.set(__self__, "endpoint_location", endpoint_location)
if endpoint_monitor_status is not None:
pulumi.set(__self__, "endpoint_monitor_status", endpoint_monitor_status)
if endpoint_status is not None:
pulumi.set(__self__, "endpoint_status", endpoint_status)
if id is not None:
pulumi.set(__self__, "id", id)
if min_child_endpoints is not None:
pulumi.set(__self__, "min_child_endpoints", min_child_endpoints)
if name is not None:
pulumi.set(__self__, "name", name)
if priority is not None:
pulumi.set(__self__, "priority", priority)
if target is not None:
pulumi.set(__self__, "target", target)
if target_resource_id is not None:
pulumi.set(__self__, "target_resource_id", target_resource_id)
if type is not None:
pulumi.set(__self__, "type", type)
if weight is not None:
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="endpointLocation")
def endpoint_location(self) -> Optional[str]:
"""
Specifies the location of the external or nested endpoints when using the ‘Performance’ traffic routing method.
"""
return pulumi.get(self, "endpoint_location")
@property
@pulumi.getter(name="endpointMonitorStatus")
def endpoint_monitor_status(self) -> Optional[str]:
"""
Gets or sets the monitoring status of the endpoint.
"""
return pulumi.get(self, "endpoint_monitor_status")
@property
@pulumi.getter(name="endpointStatus")
def endpoint_status(self) -> Optional[str]:
"""
Gets or sets the status of the endpoint.. If the endpoint is Enabled, it is probed for endpoint health and is included in the traffic routing method. Possible values are 'Enabled' and 'Disabled'.
"""
return pulumi.get(self, "endpoint_status")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Gets or sets the ID of the Traffic Manager endpoint.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="minChildEndpoints")
def min_child_endpoints(self) -> Optional[float]:
"""
Gets or sets the minimum number of endpoints that must be available in the child profile in order for the parent profile to be considered available. Only applicable to endpoint of type 'NestedEndpoints'.
"""
return pulumi.get(self, "min_child_endpoints")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets or sets the name of the Traffic Manager endpoint.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> Optional[float]:
"""
Gets or sets the priority of this endpoint when using the ‘Priority’ traffic routing method. Possible values are from 1 to 1000, lower values represent higher priority. This is an optional parameter. If specified, it must be specified on all endpoints, and no two endpoints can share the same priority value.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def target(self) -> Optional[str]:
"""
Gets or sets the fully-qualified DNS name of the endpoint. Traffic Manager returns this value in DNS responses to direct traffic to this endpoint.
"""
return pulumi.get(self, "target")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> Optional[str]:
"""
Gets or sets the Azure Resource URI of the of the endpoint. Not applicable to endpoints of type 'ExternalEndpoints'.
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Gets or sets the endpoint type of the Traffic Manager endpoint.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def weight(self) -> Optional[float]:
"""
Gets or sets the weight of this endpoint when using the 'Weighted' traffic routing method. Possible values are from 1 to 1000.
"""
return pulumi.get(self, "weight")
@pulumi.output_type
class MonitorConfigResponse(dict):
"""
Class containing endpoint monitoring settings in a Traffic Manager profile.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "profileMonitorStatus":
suggest = "profile_monitor_status"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in MonitorConfigResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
MonitorConfigResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
MonitorConfigResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
path: Optional[str] = None,
port: Optional[float] = None,
profile_monitor_status: Optional[str] = None,
protocol: Optional[str] = None):
"""
Class containing endpoint monitoring settings in a Traffic Manager profile.
:param str path: Gets or sets the path relative to the endpoint domain name used to probe for endpoint health.
:param float port: Gets or sets the TCP port used to probe for endpoint health.
:param str profile_monitor_status: Gets or sets the profile-level monitoring status of the Traffic Manager profile.
:param str protocol: Gets or sets the protocol (HTTP or HTTPS) used to probe for endpoint health.
"""
if path is not None:
pulumi.set(__self__, "path", path)
if port is not None:
pulumi.set(__self__, "port", port)
if profile_monitor_status is not None:
pulumi.set(__self__, "profile_monitor_status", profile_monitor_status)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter
def path(self) -> Optional[str]:
"""
Gets or sets the path relative to the endpoint domain name used to probe for endpoint health.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def port(self) -> Optional[float]:
"""
Gets or sets the TCP port used to probe for endpoint health.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="profileMonitorStatus")
def profile_monitor_status(self) -> Optional[str]:
"""
Gets or sets the profile-level monitoring status of the Traffic Manager profile.
"""
return pulumi.get(self, "profile_monitor_status")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
Gets or sets the protocol (HTTP or HTTPS) used to probe for endpoint health.
"""
return pulumi.get(self, "protocol")
| 1.796875
| 2
|
package/awesome_streamlit/experiments/__init__.py
|
R-fred/awesome-streamlit
| 1,194
|
12777301
|
"""Imports that should be exposed outside the package"""
from .hello_world import write as write_hello_world
| 1.335938
| 1
|
openktn/native/microstate.py
|
uibcdf/OpenKinNet
| 0
|
12777302
|
<gh_stars>0
class Microstate():
def __init__(self):
self.index = None
self.label = None
self.weight = 0.0
self.probability = 0.0
self.basin = None
self.coordinates = None
self.color = None
self.size = None
| 2.28125
| 2
|
examples/multiples.py
|
PictElm/grom
| 1
|
12777303
|
<filename>examples/multiples.py
from grom import Genome, util
util.DEBUG = False
# file size: 0x20 (32 char, raw text)
# mapping along words in first file (a.txt)
P = [
('ceci', range(0x00, 0x05)),
('est', range(0x05, 0x09)),
('un', range(0x09, 0x0C)),
('txt', range(0x0C, 0x10)),
('de', range(0x10, 0x13)),
('32', range(0x13, 0x16)),
('caracteres', range(0x16, 0x20))
]
a = Genome("examples\\src\\a.txt").partition(P)
b = Genome("examples\\src\\b.txt").partition(P)
a(file="dump\\output0.txt")
a.crossover(b)("dump\\output1.txt")
a.format(['32', 'caracteres', 'est', 'ceci', 'de', 'un', 'txt'])("dump\\output2.txt")
a.mutate(.5, 5, ['ceci', 'caracteres'])("dump\\output3.txt")
| 2.125
| 2
|
pymatflow/vasp/base/intraband.py
|
DeqiTang/pymatflow
| 6
|
12777304
|
intraband_incharge = {
"WEIMIN": None,
"EBREAK": None,
"DEPER": None,
"TIME": None,
}
| 1.257813
| 1
|
searchlet/ds/PriorityQueue.py
|
DavidMChan/searchlet
| 1
|
12777305
|
# Copyright (c) 2018 <NAME>
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
import itertools
import heapq
from typing import List, Any, Union
class PriorityQueue(object):
REMOVED = '<removed-element>'
EXISTS_LOWER_PRIORITY = 1
EXISTS_UPDATED = 2
NONEXIST = 0
def __init__(self, ) -> None:
self.memory: List[Any] = []
self.counter = itertools.count()
self.size = 0
self.map = {}
def push(self, element: Any, priority: Union[float, int]=0) -> int:
return_value = PriorityQueue.NONEXIST
if element in self.map:
if self.map[element][0] < priority:
return PriorityQueue.EXISTS_LOWER_PRIORITY
self.remove_element(element)
return_value = PriorityQueue.EXISTS_UPDATED
else:
self.size += 1
count = next(self.counter)
entry = [priority, count, element]
self.map[element] = entry
heapq.heappush(self.memory, entry)
return return_value
def remove_element(self, element) -> None:
entry = self.map.pop(element)
entry[-1] = PriorityQueue.REMOVED
def pop(self, ) -> Any:
while self.memory:
priority, _, element = heapq.heappop(self.memory)
if element is not PriorityQueue.REMOVED:
del self.map[element]
self.size -= 1
return (priority, element)
raise KeyError("Tried to pop from an empty queue")
def empty(self, ) -> bool:
return self.size <= 0
| 3.5
| 4
|
giraf/const.py
|
amol9/imgur
| 1
|
12777306
|
<gh_stars>1-10
program_name = 'giraf'
program_desc = 'A command line utility to access imgur.com.'
| 1.226563
| 1
|
book/slackBot.py
|
JisunParkRea/naverSearchAPI_practice
| 2
|
12777307
|
<gh_stars>1-10
from slacker import Slacker
import os, sys, json
from django.core.exceptions import ImproperlyConfigured
# Get SLACK_BOT_TOKEN from secrets.json
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
secret_file = os.path.join(BASE_DIR, 'secrets.json') # secrets.json 파일 위치를 명시
with open(secret_file) as f:
secrets = json.loads(f.read())
def get_secret(setting, secrets=secrets):
try:
return secrets[setting]
except KeyError:
error_msg = "Set the {} environment variable".format(setting)
raise ImproperlyConfigured(error_msg)
SLACK_BOT_TOKEN = get_secret("SLACK_BOT_TOKEN")
slack = Slacker(SLACK_BOT_TOKEN)
| 2.203125
| 2
|
Ejercicio 1/biblioteca/admin.py
|
DiogenesPuig/Ejercicios-con-Django
| 0
|
12777308
|
from django.contrib import admin
from biblioteca.models import Autor
from biblioteca.models import Libro
from biblioteca.models import Ejemplar
from biblioteca.models import Usuario
class LibroInline(admin.TabularInline):
model = Libro
class LibroAdmin(admin.ModelAdmin):
list_display = ('Titulo','Editorial','Autor')
list_display_links = ('Titulo','Editorial')
class UsuarioAdmin(admin.ModelAdmin):
list_display = ('Nombre','Telefono')
fieldsets =(
('Datos',{
'fields': ('Nombre',)
}),
('Contacto',{
'fields': ('Telefono','Direccion')
})
)
class EjemplarAdmin(admin.ModelAdmin):
list_display = ('NombreLibro', 'NombreEditorial')
list_filter = ('Libro',)
class AutorAdmin(admin.ModelAdmin):
list_display = ('Codigo','Nombre')
inlines = [LibroInline]
search_fields = ['Nombre',]
admin.site.register(Autor,AutorAdmin)
admin.site.register(Libro,LibroAdmin)
admin.site.register(Ejemplar,EjemplarAdmin)
admin.site.register(Usuario,UsuarioAdmin)
| 1.867188
| 2
|
src/act/client/clicommon.py
|
bryngemark/aCT
| 0
|
12777309
|
"""
This module defines all functionality that is common to CLI programs.
"""
import sys
import act.client.proxymgr as proxymgr
from act.client.errors import NoSuchProxyError
from act.client.errors import NoProxyFileError
def getProxyIdFromProxy(proxyPath):
"""
Returns ID of proxy at the given path.
Args:
proxyPath: A string with path to the proxy.
Raises:
NoSuchProxyError: Proxy with DN and attributes of the proxy given
in proxy path is not in the database.
NoProxyFileError: No proxy on given path.
"""
manager = proxymgr.ProxyManager()
try:
return manager.getProxyIdForProxyFile(proxyPath)
except NoSuchProxyError as e:
print("error: no proxy for DN=\"{}\" and attributes=\"{}\" "\
"found in database; use actproxy".format(e.dn, e.attribute))
sys.exit(1)
except NoProxyFileError as e:
print("error: path \"{}\" is not a proxy file; use arcproxy".format(e.path))
sys.exit(2)
def showHelpOnCommandOnly(argparser):
"""Show help if command is called without parameters."""
if len(sys.argv) == 1:
argparser.print_help()
sys.exit(0)
| 2.65625
| 3
|
run.py
|
tzom/yHydra
| 0
|
12777310
|
import sys,os
os.environ['YHYDRA_CONFIG'] = sys.argv[1]
import setup_device
from load_config import CONFIG
import glob
RAWs = glob.glob(CONFIG['RAWs'])
FASTA = glob.glob(CONFIG['FASTA'])[0]
from fasta2db import digest_fasta
f = digest_fasta(FASTA,REVERSE_DECOY=False)
r = digest_fasta(FASTA,REVERSE_DECOY=True)
from sanitize_db import sanitize_db
s = sanitize_db()
from embed_db import embed_db
e = embed_db(REVERSE_DECOY=False)
e = embed_db(REVERSE_DECOY=True)
from pyThermoRawFileParser import parse_rawfiles
raw = parse_rawfiles(RAWs)
from search import search
s = [search(RAW.replace('.raw','.mgf')) for RAW in RAWs]
#s = [search(RAW) for RAW in RAWs]
from search_score import search_score
search_score()
from fdr_filter import fdr_filter
fdr_filter()
| 2.25
| 2
|
seeding/seed.py
|
yorkshirelandscape/musebot
| 1
|
12777311
|
<reponame>yorkshirelandscape/musebot
#! /bin/env python3
import argparse
import collections
import csv
import itertools
import math
import operator
import os
import random
import re
import sys
import unicodedata
import uuid
# Default values for these settings
# May be modified by command line arguments
BADNESS_MAX_ARTIST = 20
BADNESS_MAX_SUBMITTER = 50
BADNESS_MAX_SEED = 100
ITERATIONS = 10000
ATTEMPTS = 10
ATTEMPT_ITERATIONS = 200
ORDERS = {
128: [
1, 128, 64, 65, 32, 97, 33, 96, 16, 113, 49, 80, 17, 112, 48, 81,
8, 121, 57, 72, 25, 104, 40, 89, 9, 120, 56, 73, 24, 105, 41, 88,
4, 125, 61, 68, 29, 100, 36, 93, 13, 116, 52, 77, 20, 109, 45, 84,
5, 124, 60, 69, 28, 101, 37, 92, 12, 117, 53, 76, 21, 108, 44, 85,
2, 127, 63, 66, 31, 98, 34, 95, 15, 114, 50, 79, 18, 111, 47, 82,
7, 122, 58, 71, 26, 103, 39, 90, 10, 119, 55, 74, 23, 106, 42, 87,
3, 126, 62, 67, 30, 99, 35, 94, 14, 115, 51, 78, 19, 110, 46, 83,
6, 123, 59, 70, 27, 102, 38, 91, 11, 118, 54, 75, 22, 107, 43, 86,
],
96: [
64, 65, 1, 33, 96, 32, 49, 80, 16, 48, 81, 17,
57, 72, 8, 40, 89, 25, 56, 73, 9, 41, 88, 24,
61, 68, 4, 36, 93, 29, 52, 77, 13, 45, 84, 20,
60, 69, 5, 37, 92, 28, 53, 76, 12, 44, 85, 21,
63, 66, 2, 34, 95, 31, 50, 79, 15, 47, 82, 18,
58, 71, 7, 39, 90, 26, 55, 74, 10, 42, 87, 23,
62, 67, 3, 35, 94, 30, 51, 78, 14, 46, 83, 19,
59, 70, 6, 38, 91, 27, 54, 75, 11, 43, 86, 22,
],
64: [
1, 64, 32, 33, 16, 49, 17, 48, 8, 57, 25, 40, 9, 56, 24, 41,
4, 61, 29, 36, 13, 52, 20, 45, 5, 60, 28, 37, 12, 53, 21, 44,
2, 63, 31, 34, 15, 50, 18, 47, 7, 58, 26, 39, 10, 55, 23, 42,
3, 62, 30, 35, 14, 51, 19, 46, 6, 59, 27, 38, 11, 54, 22, 43,
],
}
def get_distance(i, j):
"""
Calculates the number of rounds before two songs would meet in a match.
If the songs would meet in the first round, returns 0. To determine a more
human-readable (1-indexed) round number for when two songs would meet (or,
the maximum number of rounds), call with 0 and the maximum slot number (i.e.
``len(submissions) - 1``).
:param i: The 0-based index position of the first song
:param j: The 0-based index position of the second song
:returns: Integer indicating the number of rounds until these two songs meet
"""
return math.floor(math.log2(i ^ j))
def get_analysis(seeds, submissions):
"""
Constructs statistics for anomalous submitter and artist distribution.
For the given set of submissions and seed order, determines the distribution
of songs for each submitter for each artist, returning anything that seems
potentially anomalous for manual review.
The returned data structure is a dict broken down by round number, where
each round is a dict that may include any or all of the following keys:
- ``submitters`` a per-submitter dict mapping group number to the
specific songs submitted by that submitter within the group
- ``artists`` the same thing but for artists
- ``seeds`` for the first round only, a dict mapping match number to
submission pairs for any ``{0, 1}`` seeds matched up against other
``{0, 1}`` seeds
If a round has nothing to report, it will be omitted from the returned dict.
:param seeds: Seed order to analyze
:param submissions: List of `Submission` instances to sort
:returns: The data structure described above, which may be an empty dict
"""
ordered_submissions = [Submission.copy(submissions[j], slot=i) for i, j in enumerate(seeds)]
results = collections.defaultdict(lambda: collections.defaultdict(dict))
max_round = get_distance(0, len(seeds) - 1) + 1
def get_keyfunc(round):
return lambda x: x // (2 ** round)
for key, attr in [("submitters", "submitter_cmp"), ("artists", "artist_cmp")]:
vals = {getattr(submission, attr) for submission in submissions}
for val in vals:
# The positions of all the relevant songs
slots = [
i
for i, submission in enumerate(ordered_submissions)
if getattr(submission, attr) == val
]
if len(slots) <= 1:
# Only one entry, nothing to meet, much less meet too early
continue
# Analyze the quartiles, octiles, or respective groupings for each round
# Don't analyze the finals, though, everything is allowed there
for round in range(1, max_round):
# First figure out what we should be seeing
num_groups = 2 ** (max_round - round)
allowed_sizes = {
math.floor(len(slots) / num_groups),
math.ceil(len(slots) / num_groups),
}
# Now split into the actual groups
# We do this here since we're interested in the size of the groups,
# otherwise we could just record while iterating through the groupby
groups = {
# Use k + 1 to get 1-indexed numbers here for readability
k + 1: tuple(g)
for k, g in itertools.groupby(slots, get_keyfunc(round))
}
if all(len(g) in allowed_sizes for g in groups.values()):
# Nothing to record, avoid adding empty dicts to the results
continue
# Record any groups with invalid sizes
results[round][key][val] = {
k: tuple(ordered_submissions[i] for i in g)
for k, g in groups.items()
if len(g) not in allowed_sizes
}
# Special case to list any {0, 1} vs. {0, 1} matches in the first round
match = 0
for submission1, submission2 in chunk(ordered_submissions, 2):
match += 1
if submission1.seed in {0, 1} and submission2.seed in {0, 1}:
results[1]["seeds"][match] = (submission1, submission2)
# Special case to list any {0, 1} vs. {0, 1} matches in the second round
match = 0
for submissions_chunk in chunk(ordered_submissions, 4):
match += 1
if (
{submissions_chunk[0].seed, submissions_chunk[1].seed} & {0, 1}
and {submissions_chunk[2].seed, submissions_chunk[3].seed} & {0, 1}
):
# Pick the first {0,1}-seed in each pair to represent something Bad
# There may be multiple Bad things here, but just mention the first
# one we find
submission1 = next(s for s in submissions_chunk[:2] if s.seed in {0, 1})
submission2 = next(s for s in submissions_chunk[2:] if s.seed in {0, 1})
results[2]["seeds"][match] = (submission1, submission2)
# If we have any byes, make sure they match against the lowest possible seeds
# Also make sure they are in the expected slots
if submissions[-1].is_bye:
allowed_seeds = [
submission.seed
for submission in sorted(submissions, key=operator.attrgetter("seed"))
][:int(len(submissions) / 4)]
max_allowed_seed = max(allowed_seeds)
match = 0
for submission1, submission2 in chunk(ordered_submissions, 2):
match += 1
# Only the second submission of even numbered matches can be a bye,
# and it *must* be a bye
if submission1.is_bye or submission2.is_bye is bool(match % 2):
results[1]["byes"][match] = (submission1, submission2)
# Incorrect layout is all that needs to be reported
continue
if match % 2:
# Odd numbered matches have no bye
continue
# The layout is correct and we're in a match with a bye
# Now make sure the seeds are good
if submission1.seed > max_allowed_seed:
results[1]["byes"][match] = (submission1, submission2)
# If we haven't seen any issues yet, verify that the number of each seed
# matched against a bye is exactly what we expect
if 1 not in results or "byes" not in results[1]:
actual_seeds = sorted(ordered_submissions[i].seed for i in range(2, len(submissions), 4))
if allowed_seeds != actual_seeds:
# We have a problem, count up each seed and record any disparities
allowed_counts = collections.Counter(allowed_seeds)
actual_counts = collections.Counter(actual_seeds)
results[1]["byes"]["totals"] = {}
for k in set(allowed_counts) | set(actual_counts):
if allowed_counts[k] != actual_counts[k]:
results[1]["byes"]["totals"][k] = (allowed_counts[k], actual_counts[k])
return results
# https://stackoverflow.com/a/22045226
def chunk(lst, n):
"""
Returns successive tuples of length n from input iterator.
The last tuple may be shorter than n if it reaches the end of the iterator
early.
:param lst: Input iterator to chunk
:param n: Desired length of output tuples
:returns: Iterator of tuples of length n (except, possibly, the last tuple)
"""
it = iter(lst)
return iter(lambda: tuple(itertools.islice(it, n)), ())
def print_analysis_results(results, total_submissions):
"""
Prints the anomalous results, one issue per line.
The results object may not include enough information on its own to
reconstruct the total number of submissions, so this is required as an
argument to be able to print slightly more helpful messages.
:param results: Results dict as returned by `get_analysis`
:param total_submission: Integer number of total submissions
:returns: None
"""
num_rounds = get_distance(0, total_submissions - 1) + 1
print(f"Analysis results:")
for round, round_results in sorted(results.items()):
# We only record problems, so this should never come up
if not round_results:
print(f"Round {round} / {num_rounds} | No issues found")
continue
if "submitters" in round_results:
for submitter, submission_groups in round_results["submitters"].items():
for group_number, group in submission_groups.items():
num_groups = 2 ** (num_rounds - round)
print(f"Round {round} / {num_rounds} | Submitter {submitter} | Group {group_number} / {num_groups} | {' | '.join(map(str, group))}")
if "artists" in round_results:
for artist, submission_groups in round_results["artists"].items():
for group_number, group in submission_groups.items():
num_groups = 2 ** (num_rounds - round)
print(f"Round {round} / {num_rounds} | Artist {artist} | Group {group_number} / {num_groups} | {' | '.join(map(str, group))}")
if "seeds" in round_results:
for match, submission_pair in round_results["seeds"].items():
num_matches = 2 ** (num_rounds - round)
print(f"Round {round} / {num_rounds} | Match {match} / {num_matches} | {submission_pair[0]} | {submission_pair[1]}")
if "byes" in round_results:
for match, submission_pair in round_results["byes"].items():
if match == "totals":
# In this case, we have recorded information about total
# numbers of seeds in a dict so "submission_pair" isn't
# accurate
for seed, counts in submission_pair.items():
print(f"Round {round} / {num_rounds} | Bye Totals | Seed {seed} | Expected {counts[0]} | Actual {counts[1]}")
else:
num_matches = 2 ** (num_rounds - round)
print(f"Round {round} / {num_rounds} | Bye Match {match} / {num_matches} | {submission_pair[0]} | {submission_pair[1]}")
if not results:
print(f"No problems found")
def print_analysis(seeds, submissions):
"""
Calculate and then print the distribution analysis for the given seeding.
:param seeds: Seed order to analyze
:param submissions: List of `Submission` instances to sort
:returns: None
"""
print_analysis_results(get_analysis(seeds, submissions), len(submissions))
def get_canonical_artist(artist):
"""
Converts artist name string to canonical version, for comparison.
This performs the following operations, in order:
- lowercases the input string
- replaces double hyphens with a space
- strips diacritics and many other miscellaneous marks (like ``&``)
- collapses multiple spaces in a row to a single space
- strips leading "The"
- drops featured artists from the end of the artist name by looking for:
- ``ft``, ``feat``, or ``featuring``
- optional period after the two abbreviations
- optional parentheses around the whole thing
- must have something following the "featuring" introduction, strips
to the end of the artist name
:param artist: String artist name
:returns: Canonical artist name, suitable for comparison
"""
def should_keep(c):
return unicodedata.category(c)[0] in {"L", "N", "S", "Z"}
return re.sub(
r" (\()?(?:f(?:ea)?t\.?|featuring) .+(?(1)\))$",
"",
re.sub(
r"^the ",
"",
re.sub(
r"\s+",
" ",
"".join(
filter(
should_keep,
unicodedata.normalize(
"NFKD",
artist.lower().replace("--", " "),
),
),
),
),
),
)
class Submission:
"""
Container class for an individual submission
"""
def __init__(self, *, artist, song, submitter, seed, slot=None, is_bye=False, **kwargs):
"""
Constructor for a `Submission` instance.
Requires the following keyword-only arguments:
- ``artist``
- ``song``
- ``submitter``
- ``seed``
Any additional keyword arguments will be ignored.
:param artist: The string name of the artist who performed/composed the
song
:param song: The string title of the song
:param submitter: The string handle of the user who submitted the song
:param seed: The 1-indexed seed position within the submitter's list, 0
indicates a song was submitted by other users as well
"""
self.is_bye = is_bye
self.artist = artist
self.artist_cmp = get_canonical_artist(artist)
self.song = song
self.submitter = submitter
# Ideally we would go by submitter ID
# but this should be good enough for now
self.submitter_cmp = submitter.lower()
self.seed = int(seed)
self.slot = slot
def __str__(self):
"""
Pretty way of converting the submission to a string.
Includes the artist, song, submitter, and submitted seed values.
"""
slot = "" if self.slot is None else f"{self.slot} "
if self.is_bye:
return f"{slot}Bye"
else:
return f"{slot}{self.artist} - {self.song} <{self.submitter}, {self.seed}>"
@classmethod
def Bye(cls, *, slot=None, **kwargs):
"""
Returns a dummy instance indicating that a slot's opponent gets a bye.
"""
# Use UUID for artist and submitter as a hack so they won't count
# against us during analysis
bye = cls(
artist=uuid.uuid4().hex,
song="",
submitter=uuid.uuid4().hex,
seed=6,
slot=slot,
is_bye=True,
**kwargs,
)
return bye
@classmethod
def copy(cls, instance, **overrides):
kwargs = instance.__dict__.copy()
kwargs.update(overrides)
return cls(**kwargs)
def calc_badness(i, submissions):
"""
Calculates the badness for the submission at the given index.
Adds the badness from comparing the given submission to each of the
submissions that come after it in the list and returns the sum total.
:param i: Index of the current submission within the ``submissions`` list
:param submissions: List of all song submissions, in seeded order
:returns: Total badness of the given song within the current list
"""
n = len(submissions)
badness = [0] * n
# The final submission always has perfect badness since nothing comes after
if i == n - 1:
return badness
# Byes don't generate any badness on their own
# Only real submissions matched against a bye should generate badness here
if submissions[i].is_bye:
return badness
# Include some badness for matching low seeds to other low
# seeds in the first round only
if i % 2 == 0:
if submissions[i + 1].is_bye:
# We want the lowest (closest to 0) seeds possible to get byes
# We use sqrt here so that it gets badder faster as you get farther
# away from 0
badness[i + 1] += math.sqrt(
submissions[i].seed / submissions[i + 1].seed
) * BADNESS_MAX_SEED
else:
badness[i + 1] += (
abs(3 - 0.5 * (submissions[i].seed + submissions[i + 1].seed))
* (13 - submissions[i].seed - submissions[i + 1].seed)
) * BADNESS_MAX_SEED / 39
# We'd also like to not have 0 or 1 seeds meet up r2, give a little badness there
if (i // 2) % 2 == 0 and submissions[i].seed in {0, 1}:
j_start = i + 2 - (i % 2)
for j in range(j_start, j_start + 2):
if submissions[j].seed in {0, 1}:
# A 0-seed here with two 0-seeds in the next couple slots would
# give us half BADNESS_MAX_SEED total. More likely we'll only
# add one quarter of BADNESS_MAX_SEED at the most. A 1-seed vs a
# 1-seed here earns one eighth of BADNESS_MAX_SEED.
badness[j] += (4 - submissions[i].seed - submissions[j].seed) * BADNESS_MAX_SEED / 16
max_distance = math.floor(math.log2(n))
for j in range(i + 1, n):
# Calculate the number of rounds before these two submissions would meet
# in a match, starting with 0 if they already are
distance = get_distance(i, j)
if submissions[i].artist_cmp == submissions[j].artist_cmp:
badness[j] += BADNESS_MAX_ARTIST * (1 - distance / max_distance)
if submissions[i].submitter_cmp == submissions[j].submitter_cmp:
badness[j] += BADNESS_MAX_SUBMITTER * (1 - distance / max_distance)
# We've collected all the same badness in other slots, add here as well
# This gives us twice the score we want but is evenly distributed
badness[i] = sum(badness)
return badness
def get_badness(seeds, submissions):
"""
Returns a list of the badness scores for corresponding submissions
:param seeds: Seed order to sort submissions by
:param submissions: List of `Submission` instances
:returns: List of badness scores corresponding to each submission in the
original order
"""
ordered_submissions = [submissions[i] for i in seeds]
return [
sum(lst)
for lst in zip(
*(
calc_badness(i, ordered_submissions)
for i in range(len(ordered_submissions))
)
)
]
def get_rand_index(n, exclude=None, has_byes=False):
"""
Return a random integer in range(n), given constraints.
The ``exclude`` parameter excludes a single integer value. The ``has_byes``
parameter indicates that every fourth integer should be skipped as those are
occupied by bye slots.
:param n: Integer max value to return (exclusive)
:param exclude: Optional integer value to specifically exclude from output
:param has_byes: Optional Boolean value indicating that every fourth integer
should be disallowed
:returns: Random integer that meets all the constraints specified
"""
if has_byes:
n *= 0.75
if exclude is not None:
n -= 1
i = random.randrange(n)
if exclude is not None:
if has_byes:
exclude -= exclude // 4
if i >= exclude:
i += 1
if has_byes:
i += i // 3
return i
def swap(seeds, submissions, badness, use_max=True, hint=None, has_byes=False):
"""
Try to decrease total badness by swapping a submission
Will perform a single swap on the submission with the maximum badness (or a
random one if use_max is ``False``), re-evaluate the badness, and return the
new seed list if the total badness went down.
:param seeds: Seed order to sort submissions by
:param submissions: List of `Submission` instances
:param badness: List of badness scores for submissions in seed order
:param use_max: Boolean indicating whether to swap the submission with the
maximum badness score, or just pick a random one (default True)
:param hint: Optional integer index to try swapping with. If not given, swap
with a random one instead
:returns: Tuple of containing the list of badness scores and the list of
seeds. May or may not be identical to the one originally passed in
"""
n = len(submissions)
if use_max:
if has_byes:
# Byes are in a fixed position, cannot be swapped
# Give them 0 badness so they won't be picked with use_max
valid_badness = [b if (i + 1) % 4 else 0 for i, b in enumerate(badness)]
else:
valid_badness = badness
# Index within seeds of submission with the highest badness score
i = valid_badness.index(max(valid_badness))
else:
# Hit a wall, use random starting point
i = get_rand_index(n, has_byes=has_byes)
if hint is not None and hint != i and (not has_byes or (hint + 1) % 4):
j = hint
else:
# Random choice to swap with
j = get_rand_index(n, exclude=i, has_byes=has_byes)
seeds[i], seeds[j] = seeds[j], seeds[i]
new_badness = get_badness(seeds, submissions)
if sum(new_badness) < sum(badness):
return new_badness, seeds
else:
seeds[i], seeds[j] = seeds[j], seeds[i]
return badness, seeds
def get_new_seeds(submissions):
"""
Generate a new seeding order.
Given a list of submissions, generates a new seed ordering for the
submissions and calculates the initial badness for each corresponding
element in the seed list, returning the badness and seed lists in a tuple.
This is mostly used as an internal convenience function.
:param submissions: List of `Submission` instances
:returns: Tuple containing a list of badness scores and a list of seeds
"""
if submissions[-1].is_bye:
# Ignore byes when shuffling, we'll insert them all afterward
n = len([submission for submission in submissions if not submission.is_bye])
else:
n = len(submissions)
seeds = get_shuffled_range(n)
if submissions[-1].is_bye:
# Every fourth submission should be a bye
# The bye submissions are all at the end, so adding n to a 0-based index
# will give a bye submission index
seeds = list(
itertools.chain.from_iterable(
trio + (i + n,) for i, trio in enumerate(chunk(seeds, 3))
)
)
badness = get_badness(seeds, submissions)
return badness, seeds
def get_shuffled_range(n):
"""
Utility function to generate a random ordering of the integers in [0, n)
:param n: The length of the returned list
:returns: A list of the numbers from 0 to n-1, inclusive, shuffled
"""
return random.sample(list(range(n)), k=n)
def get_hints(n, skip_byes=False):
hints = get_shuffled_range(n)
if skip_byes:
return [j for i, j in enumerate(hints) if i % 4]
else:
return hints
def get_seed_order(data):
"""
Given parsed CSV data, returns the seed slot of corresponding songs.
:param data: List of dicts from the parsed CSV data
:returns: List of seeding integers, indicating the final output order for
the corresponding row in data. The input data list should be traversed
in the order specified by the return value to obtain the match order
"""
submissions = [Submission(**row) for row in data]
has_byes = False
if len(data) in {48, 96}:
has_byes = True
# Make dummy submissions at the end until we have an even power of two
submissions += [
Submission.Bye() for i in range(2 ** (math.floor(math.log2(len(data))) - 1))
]
# Start completely randomly
badness, seeds = get_new_seeds(submissions)
n = len(submissions)
prev_total = sum(badness)
prev_i = 0
attempts = 0
best_badness = prev_total
best_seeds = seeds.copy()
hints = get_hints(n, has_byes)
for i in range(ITERATIONS):
if hints:
use_max = True
hint = hints.pop()
else:
use_max = False
hint = None
badness, seeds = swap(seeds, submissions, badness, use_max=use_max, hint=hint, has_byes=has_byes)
total_badness = sum(badness)
if i % 100 == 0:
print(f"Iteration {i} total badness {total_badness:.0f}")
if total_badness < prev_total:
prev_total = total_badness
prev_i = i
hints = get_hints(n, has_byes)
if i - prev_i > ATTEMPT_ITERATIONS:
attempts += 1
if total_badness < best_badness:
best_badness = total_badness
best_seeds = seeds.copy()
# We've tried enough for now
if attempts > ATTEMPTS:
print(f"Iteration {i} max attempts, quitting")
break
# Otherwise, start over
badness, seeds = get_new_seeds(submissions)
prev_total = sum(badness)
prev_i = i
hints = get_hints(n, has_byes)
print(f"Iteration {i} new attempt, new badness {prev_total:.0f}")
continue
else:
if total_badness < best_badness:
best_badness = total_badness
best_seeds = seeds.copy()
print(f"Done trying, best badness {best_badness:.0f}")
badness = get_badness(best_seeds, submissions)
[print(f"{i:2} {badness[i]:3.0f} {submissions[best_seeds[i]]}") for i in range(n)]
print_analysis(best_seeds, submissions)
if submissions[-1].is_bye:
# Now we need to drop the byes before returning
best_seeds = [i for i in best_seeds if i < len(data)]
return best_seeds
def get_parser():
"""
Creates and return the ArgumentParser for the script.
:returns: Instantiated and fully configured instance of ArgumentParser
"""
parser = argparse.ArgumentParser(
description="create a seeding order for input CSV",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
usage=f"{os.path.basename(__file__ or 'seed.py')} [OPTIONS] INPUT [OUTPUT]",
)
parser.add_argument(
"INPUT",
help=(
"path to the input CSV. If given the special value `-`, instead "
"reads input from STDIN"
),
)
parser.add_argument(
"OUTPUT",
help=(
"desired path to the output CSV file. If not given, defaults to "
"printing to STDOUT. If file exists or intermediate directories do "
"not, operation will fail (and output will be directed to STDOUT), "
"unless the `--force` flag is specified"
),
nargs="?",
)
parser.add_argument(
"--force",
"-f",
help=(
"force output to the given file path, overwriting contents if the "
"file already exists and creating any intermediate directories, if "
"necessary"
),
action="store_true",
default=False,
)
group = parser.add_argument_group(
title="output arguments",
description="Configure output CSV formatting",
)
group.add_argument(
"--output-csv-tabs",
help="use a tab delimiter when outputting the CSV data",
action="store_true",
default=False,
)
group.add_argument(
"--output-order",
help=(
"the order to sort and/or transform the output seeding by. For "
"`sorted`, submissions are sorted by the new seeding order. For "
"`original`, submissions retain their original positioning within "
"the input data. For `bracket`, the generated seeding order is "
"transformed so that when the spreadsheet creates matches it'll end "
"up with them in the proposed ordering. Output order is the same as "
"the input data."
),
choices=["sorted", "original", "bracket"],
default="sorted",
)
group.add_argument(
"--output-dropped",
help=(
"include dropped songs in the output CSV data. Dropped rows will "
"have an empty value for the new seed position. In `sorted` output "
"order, dropped submissions will be at the end."
),
action="store_true",
default=False,
)
group = parser.add_argument_group(
title="behavioral arguments",
description=(
"The following arguments configure how many iterations to try and "
"how to calculate the badness score"
),
)
group.add_argument(
"--badness-artist",
help="how much to weigh duplicate artists when calculating badness",
default=BADNESS_MAX_ARTIST,
type=int,
)
group.add_argument(
"--badness-submitter",
help="how much to weigh duplicate submitters when calculating badness",
default=BADNESS_MAX_SUBMITTER,
type=int,
)
group.add_argument(
"--badness-seed",
help="how much to weigh first round seed matchups when calculating badness",
default=BADNESS_MAX_SEED,
type=int,
)
group.add_argument(
"--iterations",
help="total number of iterations to run",
default=ITERATIONS,
type=int,
)
group.add_argument(
"--attempts",
help="maximum number of attempts to reshuffle",
default=ATTEMPTS,
type=int,
)
group.add_argument(
"--attempt-iterations",
help=(
"number of iterations without a decrease in badness before starting "
"a new attempt"
),
default=ATTEMPT_ITERATIONS,
type=int,
)
return parser
def read_csv_from_file(file):
"""
Reads the CSV data from the open file handle and returns a list of dicts.
Assumes the CSV data includes a header row and uses that header row as
fieldnames in the dict. The following fields are required and are
case-sensitive:
- ``artist``
- ``song``
- ``submitter``
- ``seed``
Other fields are ultimately preserved untouched in the output CSV.
If the CSV doesn't have a header row, uses the following hardcoded list:
- ``order``
- ``seed``
- ``submitter``
- ``year``
- ``song``
- ``artist``
- ``link``
If a tab character is present in the first row, assumes the data is
tab-delimited, otherwise assumes comma-delimited.
:returns: All parsed data from the already-opened CSV file given, as a list
of dicts as generated by `csv.DictReader`
"""
data = list(file)
delimiter = "\t" if "\t" in data[0] else ","
# Look for a header row
reader = csv.reader([data[0]], delimiter=delimiter)
row = next(reader)
for col in row:
try:
int(col)
# Found an integer, no headers present
headers = ["order", "seed", "submitter", "year", "song", "artist", "link"]
break
except ValueError:
pass
else:
# Unable to find an integer here, must be a header row
# Pop the header row off the data list and create a new reader just to
# parse that row
data.pop(0)
headers = row
return list(csv.DictReader(data, fieldnames=headers, delimiter=delimiter))
def get_csv_data(csv_path):
"""
Given a path to a CSV file (or ``"-"``), returns the parsed data.
:param csv_path: Path to the CSV input file, or ``"-"`` for STDIN
:returns: Parsed CSV data as a list of dicts
.. seealso:: `read_csv_from_file` for more details regarding the specific
output format and required columns
"""
if csv_path == '-':
data = read_csv_from_file(sys.stdin)
else:
with open(csv_path, newline='') as csv_file:
data = read_csv_from_file(csv_file)
return data
def choose_submissions(data):
"""
Creates a power of two bracket by randomly eliminating larger seed songs
Creates a copy of the input list and then, for the submissions with larger
seed numbers, randomly eliminates them until the length of the list is an
even power of two.
As a special case, if there are 96 or more songs but not enough for a 128
bracket, it'll return 96 submissions. When creating the seeds, every fourth
submissions will need to be populated with dummy bye submissions. Similarly,
it will return 48 submissions if there are not enough for a 64 bracket.
Returns a tuple containing both the new data and a list of what was removed.
The dropped list contains tuple with the index of the dropped data in the
original list as well as the data itself.
:param data: List of dicts from the input CSV
:returns: A tuple with a new list with a number of elements that is a power
of two and another list containing tuples with the original index and
the dropped data for all removed rows
"""
dropped = []
if 96 <= len(data) < 128:
target_size = 96
elif 48 <= len(data) < 64:
target_size = 48
else:
target_size = 2 ** math.floor(math.log2(len(data)))
new_data = data.copy()
while len(new_data) > target_size:
target_seed = max(row["seed"] for row in new_data)
to_remove = random.choice(
[row for row in new_data if row["seed"] == target_seed]
)
print(f"Eliminating submission {Submission(**to_remove)}")
new_data.remove(to_remove)
dropped.append((data.index(to_remove), to_remove))
print(
f"Eliminated {len(data) - len(new_data)} submissions for a "
f"{len(new_data)} bracket"
)
# Sort `dropped` list by original index (the first element in each tuple)
return new_data, sorted(dropped)
def output_seeded_csv(file, seeds, data, use_tabs, order, dropped):
"""
Given an open file, seed list, and input CSV data, writes data as a CSV.
Any open file or file-like handle can be given, the input data will be
sorted according to the order specified in the seed list (as returned by
`get_seed_order`).
:param file: Open file or file-like handle
:param seeds: List of integers to sort data by
:param data: List of dicts parsed from the input CSV rows, to be written in
the order specified by ``seeds``. Column ordering should be preserved
from the input data
:returns: None
"""
delimiter = "\t" if use_tabs else ","
quoting = csv.QUOTE_NONE if use_tabs else csv.QUOTE_MINIMAL
if order == "bracket" and len(seeds) not in ORDERS:
print(
f"ERROR Bracket ordering requested but no ordering defined for a "
f"{len(seeds)} bracket, using `original`"
)
order = "original"
print(f"Writing CSV in {order} order")
writer = csv.writer(file, delimiter=delimiter, quoting=quoting)
# Write header row first
writer.writerow(["new_order"] + list(data[0].keys()))
if order == "sorted":
ordered_data = (
[i + 1] + list(data[j].values())
for i, j in enumerate(seeds)
)
elif order == "original":
# Invert the seeding list so we can iterate in original order
original_seeds = [None] * len(seeds)
for i, j in enumerate(seeds):
original_seeds[j] = i
ordered_data = (
[original_seeds[i] + 1] + list(row.values())
for i, row in enumerate(data)
)
elif order == "bracket":
# Invert the seeding list so we can iterate in original order
original_seeds = [None] * len(seeds)
for i, j in enumerate(seeds):
original_seeds[j] = i
# Now transform these values to fit the traversal order hardcoded into
# the spreadsheet
bracket_order = ORDERS[len(seeds)]
ordered_data = (
[bracket_order[original_seeds[i]]] + list(row.values())
for i, row in enumerate(data)
)
if dropped is not None:
if order == "sorted":
# Put the dropped stuff at the end, in original index order
ordered_data = itertools.chain(ordered_data, ([""] + list(row.values()) for i, row in dropped))
else:
# Interleave the dropped rows into the rest of the data
# Cast to a list so we can slice
seeded_data = list(ordered_data)
# Start this over and rebuild the list from scratch
ordered_data = []
prev_i = 0
for i, row in dropped:
ordered_data += seeded_data[prev_i:i] + [[""] + list(row.values())]
prev_i = i
else:
# `i` will still be whatever the last value was
ordered_data += seeded_data[i:]
writer.writerows(ordered_data)
def write_csv_data(csv_path, force, seeds, data, use_tabs, use_bracket_order, dropped):
"""
Given an output path and force flag, sorts data by seeds and writes it.
:param csv_path: Path to the desired output file, or ``None`` for STDOUT
:param force: Boolean flag indicated whether to overwrite existing files and
create intermediate directories in the path
:param seeds: List of integers to sort data by
:param data: List of dicts parsed from the input CSV rows
:returns: None
.. seealso:: `output_seeded_csv` for more details on the specific output
format
"""
if csv_path is None:
return output_seeded_csv(sys.stdout, seeds, data, use_tabs, use_bracket_order, dropped)
if force:
dirs = os.path.dirname(csv_path)
if dirs:
os.makedirs(dirs, exist_ok=True)
mode = "w" if force else "x"
with open(csv_path, mode, newline="") as csv_file:
return output_seeded_csv(csv_file, seeds, data, use_tabs, use_bracket_order, dropped)
def main(
input_csv_path,
output_csv_path,
force_output,
output_csv_tabs,
output_bracket_order,
output_dropped,
):
"""
Main entry point for the script.
Given the arguments parsed from the command line, performs the requested
operations, including writing output to the specified file.
:param input_csv_path: Path to input CSV file, or ``"-"`` for STDIN
:param output_csv_path: Path to output CSV file, or ``None`` for STDOUT
:param force_output: If output file already exists overwrite it, if
intermediate directories on the path do not exist, create them
:returns: None
"""
data = get_csv_data(input_csv_path)
data, dropped = choose_submissions(data)
seeds = get_seed_order(data)
write_csv_data(
output_csv_path,
force_output,
seeds,
data,
output_csv_tabs,
output_bracket_order,
dropped if output_dropped else None,
)
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
input_csv_path = args.INPUT
output_csv_path = args.OUTPUT
force_output = args.force
output_csv_tabs = args.output_csv_tabs
output_order = args.output_order
output_dropped = args.output_dropped
# Reset variables with anything passed in on the command line
BADNESS_MAX_ARTIST = args.badness_artist
BADNESS_MAX_SUBMITTER = args.badness_submitter
BADNESS_MAX_SEED = args.badness_seed
ITERATIONS = args.iterations
ATTEMPTS = args.attempts
ATTEMPT_ITERATIONS = args.attempt_iterations
main(
input_csv_path,
output_csv_path,
force_output,
output_csv_tabs,
output_order,
output_dropped,
)
| 1.59375
| 2
|
src/ising_animate/examples/__init__.py
|
davifeliciano/ising_model
| 2
|
12777312
|
<reponame>davifeliciano/ising_model
"""
An set of examples written with the ising_animate package.
"""
| 1.4375
| 1
|
build/lib/jhu_primitives/dimselect/profile_likelihood_maximization.py
|
hhelm10/primitives-interfaces
| 0
|
12777313
|
def profile_likelihood_maximization(U, n_elbows, threshold):
"""
Inputs
U - An ordered or unordered list of eigenvalues
n - The number of elbows to return
Return
elbows - A numpy array containing elbows
"""
if type(U) == list: # cast to array for functionality later
U = np.array(U)
if type(U) is not np.ndarray: # only support arrays, lists
return np.array([])
if n_elbows == 0: # nothing to do..
return np.array([])
if U.ndim == 2:
U = np.std(U, axis = 0)
U = U[U > threshold]
if len(U) == 0:
return np.array([])
elbows = []
if len(U) == 1:
return np.array(elbows.append(U[0]))
# select values greater than the threshold
U.sort() # sort
U = U[::-1] # reverse array so that it is sorted in descending order
while len(elbows) < n_elbows and len(U) > 1:
d = 1
sample_var = np.var(U, ddof = 1)
sample_scale = sample_var**(1/2)
elbow = 0
likelihood_elbow = 0
while d < len(U):
mean_sig = np.mean(U[:d])
mean_noise = np.mean(U[d:])
sig_likelihood = 0
noise_likelihood = 0
for i in range(d):
sig_likelihood += norm.pdf(U[i], mean_sig, sample_scale)
for i in range(d, len(U)):
noise_likelihood += norm.pdf(U[i], mean_noise, sample_scale)
likelihood = noise_likelihood + sig_likelihood
if likelihood > likelihood_elbow:
likelihood_elbow = likelihood
elbow = d
d += 1
elbows.append(U[elbow - 1])
U = U[elbow:]
if len(elbows) == n_elbows:
return np.array(elbows)
if len(U) == 0:
return np.array(elbows)
else:
elbows.append(U[0])
return np.array(elbows)
| 3.171875
| 3
|
eflow/_hidden/general_objects/enum.py
|
EricCacciavillani/eFlow
| 1
|
12777314
|
<filename>eflow/_hidden/general_objects/enum.py
__author__ = "<NAME>"
__copyright__ = "Copyright 2019, eFlow"
__credits__ = ["<NAME>"]
__license__ = "MIT"
__maintainer__ = "EricCacciavillani"
__email__ = "<EMAIL>"
def enum(**enums):
"""
Allows for constant like variables.
"""
return type('Enum', (), enums)
| 1.679688
| 2
|
tokens/urls.py
|
Shelo/cmdoc
| 0
|
12777315
|
<reponame>Shelo/cmdoc
from django.conf.urls import url
from tokens import views
urlpatterns = [
url(
r'^(?P<document_id>[0-9]+)/create/',
views.create,
name='create'
),
url(
r'^(?P<document_id>[0-9]+)/remove/(?P<token_key>[a-zA-Z_-]+)/',
views.remove,
name='remove'
),
url(
r'^(?P<document_id>[0-9]+)/update/(?P<token_key>[0-9]+)/',
views.update,
name='update'
),
]
| 1.820313
| 2
|
stepik/python575/03_02_13.py
|
ornichola/learning-new
| 2
|
12777316
|
"""
Возьмите тесты из шага — https://stepik.org/lesson/138920/step/11?unit=196194
Создайте новый файл
Создайте в нем класс с тестами, который должен наследоваться от unittest.TestCase по аналогии с предыдущим шагом
Перепишите в стиле unittest тест для страницы http://suninjuly.github.io/registration1.html
Перепишите в стиле unittest второй тест для страницы http://suninjuly.github.io/registration2.html
Оформите финальные проверки в тестах в стиле unittest, например, используя проверочный метод assertEqual
Запустите получившиеся тесты из файла
Просмотрите отчёт о запуске и найдите последнюю строчку
Отправьте эту строчку в качестве ответа на это задание
"""
import time
import unittest
from selenium import webdriver
LINK_TO_REG_FORM_V1 = 'http://suninjuly.github.io/registration1.html'
LINK_TO_REG_FORM_V2 = 'http://suninjuly.github.io/registration2.html'
REG_DATA = {
'first_name': 'John',
'last_name': 'Doe',
'email': '<EMAIL>',
}
driver = webdriver.Chrome()
class TestABC(unittest.TestCase):
def check_form(self, link_to_form):
try:
driver.get(link_to_form)
required_elements = [
driver.find_element_by_xpath('//*[.="First name*"]/following-sibling::input'),
driver.find_element_by_xpath('//*[.="Last name*"]/following-sibling::input'),
driver.find_element_by_xpath('//*[.="Email*"]/following-sibling::input')
]
for element, value in zip(required_elements, REG_DATA.values()):
element.send_keys(value)
driver.find_element_by_css_selector("button.btn").click()
time.sleep(1)
self.assertEqual(
'Congratulations! You have successfully registered!',
driver.find_element_by_tag_name("h1").text
)
finally:
driver.quit()
def test_reg_form_v1(self):
self.check_form(LINK_TO_REG_FORM_V1)
def test_reg_form_v2(self):
self.check_form(LINK_TO_REG_FORM_V2)
if __name__ == '__main__':
unittest.main()
| 3.203125
| 3
|
IMDB/serializers.py
|
MrRobot100/api-Auth
| 1
|
12777317
|
from rest_framework import serializers
from .models import Pelicula
from django.contrib.auth.models import User
from . import models
class PeliculaSerializer(serializers.ModelSerializer):
class Meta:
fields = ('id', 'titulo', 'descripcion', 'puntaje')
model = models.Pelicula
| 1.804688
| 2
|
Parte 1/lista 02/12 -pi.py
|
Raiane-nepomuceno/Python
| 0
|
12777318
|
<filename>Parte 1/lista 02/12 -pi.py
pi = 0
a = int(input('Num:'))
i = 0 # variavel de controle
b = a - 1 #controle do expoente
while i < a:
if i%2!=0:
pi = pi+ (a/b)
print('1:', pi)
else:
pi = (a - a/b)
print('2:',pi)
b = b + 2
i = i + 1
print(pi)
| 3.90625
| 4
|
dataset/disp_fig.py
|
jajatikr/Video-Inpainting
| 16
|
12777319
|
<reponame>jajatikr/Video-Inpainting
import matplotlib.pyplot as plt
import numpy as np
class disp_fig(object):
"""
Class to display video frames
Args:
Accepts video frames numpy array to display using matplotlib
Output:
Displays 8 video frames using matplotlib window
"""
def __init__(self, arr):
self.array = arr
# Reshape numpy video array
self.video = arr.reshape(1,8,152,152)
# Create new matplotlib figure
plt.figure(figsize=(20, 2))
def figure(self):
# Display video frames using 8 subplot
for i in range(8):
ax = plt.subplot(1, 8, i+1)
image = self.video[0][i]
plt.imshow(image)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
if __name__ == '__main__':
pass
| 3.390625
| 3
|
proudcatowner/utils/helpers.py
|
cyanideph/proudcatowner
| 0
|
12777320
|
import json
from functools import reduce
from base64 import b64decode
from typing import Union
import requests
def generate_device_info() -> dict:
return {
"device_id": device.deviceGenerator(),
"user_agent": "Dalvik/2.1.0 (Linux; U; Android 7.1.2; SM-G965N Build/star2ltexx-user 7.1.; com.narvii.amino.master/3.4.33592)"
}
def signature(data: Union[str, dict]) -> str:
if isinstance(data, dict): data = json.dumps(data)
return requests.get(f"http://forevercynical.com/generate/signature?data={str(data)}").json()['signature']
def decode_sid(sid: str) -> dict:
return json.loads(b64decode(reduce(lambda a, e: a.replace(*e), ("-+", "_/"), sid + "=" * (-len(sid) % 4)).encode())[1:-20].decode())
def sid_to_uid(SID: str) -> str: return decode_sid(SID)["2"]
def sid_to_ip_address(SID: str) -> str: return decode_sid(SID)["4"]
| 2.65625
| 3
|
bear/uncertainty_modeling/d4rl/test_rapp.py
|
junmokane/AI602_Project
| 1
|
12777321
|
<gh_stars>1-10
import torch
import argparse
import numpy as np
import matplotlib.pyplot as plt
from uncertainty_modeling.rapp.calc_uncertainty import get_diffs
def test_rapp_lunarlander(args):
fig = plt.figure(figsize=(10, 7))
for i in range(4):
path = f"{args.p}_{i}.pt"
model = torch.load(path)
extent = [-0.4, 0.4, 0.0, 1.5]
x = np.linspace(-0.4, 0.4, 101)
y = np.linspace(0.0, 1.5, 101)
xv, yv = np.meshgrid(x, y)
meshgrid_data = torch.from_numpy(np.dstack([xv, yv]))
meshgrid_data_lin = meshgrid_data.reshape((101*101, 2)).cuda()
dif = get_diffs(meshgrid_data_lin, model)
difs = torch.cat([torch.from_numpy(i) for i in dif], dim=-1).numpy()
dif = (difs**2).mean(axis=1)
dif = dif.reshape(101, 101)
fig.add_subplot(2, 2, i+1)
im2 = plt.imshow(dif, extent=extent, origin="lower", cmap=plt.cm.jet, aspect='auto')
plt.colorbar()
plt.xlabel('horizontal displacement')
plt.ylabel('vertical displacement')
if i == 0:
plt.title('action 0: do nothing')
elif i == 1:
plt.title('action 1: fire left engine')
elif i == 2:
plt.title('action 2: fire main engine')
else:
plt.title('action 3: fire right engine')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
# python -m uncertainty_modeling.d4rl.test_rapp --p "/home/seungjae/Desktop/AI602/AI602_Project/bear/trained_LunarLander-v2_seungjae_horizontal"
parser = argparse.ArgumentParser()
parser.add_argument('--p', type=str,
help='path')
args = parser.parse_args()
test_rapp_lunarlander(args)
# "/home/seungjae/Desktop/AI602/AI602_Project/bear/trained_LunarLander-v2_seungjae.pt"
| 2.0625
| 2
|
ABC_A/ABC021_A.py
|
ryosuke0825/atcoder_python
| 0
|
12777322
|
n = int(input())
ret_list = []
if n % 2 == 1:
n -= 1
ret_list.append(1)
for _ in range(n//2):
ret_list.append(2)
print(len(ret_list))
for i in ret_list:
print(i)
| 3.421875
| 3
|
specs/egg_spec.py
|
jaimegildesagredo/server-expects
| 4
|
12777323
|
<reponame>jaimegildesagredo/server-expects<filename>specs/egg_spec.py
# -*- coding: utf-8 -*-
import os.path
from expects import expect
from expects.testing import failure
from server_expects import *
from .constants import c
with describe('egg'):
with describe('be_installed'):
with it('passes if package is installed'):
expect(egg(c.AN_INSTALLED_EGG_NAME)).to(be_installed)
with it('passes if package version is installed'):
expect(egg(c.AN_INSTALLED_EGG_NAME,
c.AN_INSTALLED_EGG_VERSION)).to(be_installed)
with it('passes if editable package is installed'):
expect(egg(c.AN_INSTALLED_EDITABLE_EGG_NAME)).to(be_installed)
with it('passes if package is installed in virtualenv'):
expect(egg(c.A_VIRTUALENV_INSTALLED_EGG,
virtualenv=c.A_VIRTUALENV_PATH)).to(be_installed)
with it('fails if package is not installed'):
with failure:
expect(egg(c.AN_UNINSTALLED_EGG)).to(be_installed)
with it('fails if package with a different version is installed'):
with failure('{!r} version is installed'.format(c.AN_INSTALLED_EGG_VERSION)):
expect(egg(c.AN_INSTALLED_EGG_NAME,
c.AN_UNINSTALLED_EGG_VERSION)).to(be_installed)
with it('fails if package is not installed in virtualenv'):
with failure:
expect(egg(c.A_VIRTUALENV_UNINSTALLED_EGG,
virtualenv=c.A_VIRTUALENV_PATH)).to(be_installed)
with it('fails if virtualenv does not exist'):
with failure('{} not found'.format(_pip_path(c.A_NONEXISTENT_VIRTUALENV_PATH))):
expect(egg(c.A_VIRTUALENV_INSTALLED_EGG,
virtualenv=c.A_NONEXISTENT_VIRTUALENV_PATH)).to(be_installed)
def _pip_path(prefix):
return os.path.join(prefix, 'bin', 'pip')
| 2.546875
| 3
|
home/migrations/0012_homepage_sector_button_text.py
|
uktrade/invest
| 1
|
12777324
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-05-09 10:05
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0011_auto_20180327_1341'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='sector_button_text',
field=models.TextField(default='Search more industries', max_length=255),
),
]
| 1.492188
| 1
|
fiscales/admin.py
|
fastslack/escrutinio-social
| 10
|
12777325
|
from django.db.models import Q
from django.urls import reverse
from django.contrib import admin
from django.contrib.contenttypes.admin import GenericTabularInline
from .models import Voluntario, AsignacionVoluntario, DatoDeContacto
from .forms import VoluntarioForm, DatoDeContactoModelForm
from django_admin_row_actions import AdminRowActionsMixin
from django.contrib.admin.filters import DateFieldListFilter
class FechaIsNull(DateFieldListFilter):
def __init__(self, field, request, params, model, model_admin, field_path):
super().__init__(field, request, params, model, model_admin, field_path)
self.links = self.links[-2:]
class ContactoAdminInline(GenericTabularInline):
model = DatoDeContacto
form = DatoDeContactoModelForm
class AsignadoFilter(admin.SimpleListFilter):
title = 'Asignación'
parameter_name = 'asignado'
def lookups(self, request, model_admin):
return (
('sí', 'sí'),
('no', 'no'),
)
def queryset(self, request, queryset):
value = self.value()
if value:
isnull = value == 'no'
general = Q(
tipo='general',
asignacion_escuela__isnull=isnull,
asignacion_escuela__eleccion__slug='generales2017'
)
de_mesa = Q(
tipo='de_mesa',
asignacion_mesa__isnull=isnull,
asignacion_mesa__mesa__eleccion__slug='generales2017'
)
queryset = queryset.filter(general | de_mesa)
return queryset
class ReferenteFilter(admin.SimpleListFilter):
title = 'Referente'
parameter_name = 'referente'
def lookups(self, request, model_admin):
return (
('sí', 'sí'),
('no', 'no'),
)
def queryset(self, request, queryset):
value = self.value()
if value:
isnull = value == 'no'
queryset = queryset.filter(es_referente_de_circuito__isnull=isnull).distinct()
return queryset
class VoluntarioAdmin(AdminRowActionsMixin, admin.ModelAdmin):
def get_row_actions(self, obj):
row_actions = []
if obj.user:
row_actions.append(
{
'label': f'Loguearse como {obj.nombre}',
'url': f'/hijack/{obj.user.id}/',
'enabled': True,
}
)
row_actions += super().get_row_actions(obj)
return row_actions
def telefonos(o):
return ' / '.join(o.telefonos)
form = VoluntarioForm
list_display = ('__str__', 'dni', telefonos)
search_fields = (
'apellido', 'nombre', 'dni',
'asignacion_escuela__lugar_votacion__nombre',
'asignacion_mesa__mesa__lugar_votacion__nombre'
)
list_display_links = ('__str__',)
list_filter = ('estado', 'email_confirmado', AsignadoFilter)
# readonly_fields = ('mesas_desde_hasta',)
inlines = [
ContactoAdminInline,
]
class AsignacionVoluntarioAdmin(AdminRowActionsMixin, admin.ModelAdmin):
list_filter = ('mesa__eleccion', 'mesa__lugar_votacion__circuito')
raw_id_fields = ("mesa", "voluntario")
search_fields = (
'voluntario__apellido', 'voluntario__nombre', 'voluntario__dni',
'mesa__numero',
'mesa__lugar_votacion__nombre',
'mesa__lugar_votacion__direccion',
'mesa__lugar_votacion__barrio',
'mesa__lugar_votacion__ciudad',
)
admin.site.register(AsignacionVoluntario, AsignacionVoluntarioAdmin)
admin.site.register(Voluntario, VoluntarioAdmin)
| 1.914063
| 2
|
auto_learn.py
|
Dlut-lab-zmn/GRAA-for-data-protection
| 0
|
12777326
|
<reponame>Dlut-lab-zmn/GRAA-for-data-protection
#Author <NAME>
from models import *
import torch
import torch.nn as nn
__all__ = [
'auto_learn',
]
class Auto_learn(nn.Module):
def __init__(self,resume,attribute,joint,joint2,args):
super(Auto_learn, self).__init__()
self.net =ResNet18(args.in_channel,args.nclass).cuda()
self.net = torch.nn.DataParallel(self.net)
checkpoint = torch.load(resume, map_location='cpu')
self.net.load_state_dict(checkpoint['net'])
for k,v in self.net.named_parameters():
v.requires_grad=False
self.attribute = attribute
self.joint = joint
self.joint2 = joint2
def forward(self, input1,input2):
att_1 = self.attribute(input1)
att_2 = self.attribute(input2)
fea = torch.cat((att_1,att_2),1)
att_1 = self.joint(fea)
att_2 = self.joint2(fea)
self.output1 = torch.clamp(input1 +att_1+128.,0,255)
self.output2 = torch.clamp(input2 +att_2+128.,0,255)
return self.output1,self.output2,att_1,att_2
def attribute(in_channels,batch_norm=False):
res = [64, 64, 64]
layers = []
for v in res:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def joint(out_channels,batch_norm=False):
res = [128, 128, int(out_channels)]
layers = []
in_channels = 128
for v in res:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def joint2(out_channels,batch_norm=False):
res = [128, 128, int(out_channels)]
layers = []
in_channels = 128
for v in res:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def auto_learn(resume,args):
"""VGG 11-layer model (configuration "A")"""
return Auto_learn(resume,attribute(args.in_channel),joint(args.in_channel),joint2(args.in_channel),args)
| 2.3125
| 2
|
infrastructure/gunicorn-config.py
|
bitraf/p2k16
| 5
|
12777327
|
<gh_stars>1-10
import yaml
import logging.config
import os
with open(os.getenv("P2K16_LOGGING")) as f:
cfg = yaml.safe_load(f)
logging.config.dictConfig(cfg)
if not os.path.isdir("log"):
os.mkdir("log")
accesslog = "log/access.log"
bind = os.getenv("P2K16_BIND", "127.0.0.1:5000")
pidfile = "p2k16.pid"
timeout = 5 * 60
| 1.960938
| 2
|
relancer-exp/original_notebooks/camnugent_california-housing-prices/california-housing-tutorial.py
|
Chenguang-Zhu/relancer
| 1
|
12777328
|
<reponame>Chenguang-Zhu/relancer
#!/usr/bin/env python
# coding: utf-8
# In[ ]:
import os
import tarfile
from six.moves import urllib
import pandas as pd
DOWNLOAD_ROOT = "https://github.com/ageron/handson-ml/tree/master/"
HOUSING_PATH = "datasets/housing"
HOUSING_URL = DOWNLOAD_ROOT + HOUSING_PATH + "/housing.tgz"
def fetch_housing_data(housing_url=HOUSING_URL, housing_path=HOUSING_PATH):
if not os.path.isdir(housing_path):
os.makedirs(housing_path)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
#print(tgz_path)
housing_tgz = tarfile.open(tgz_path, 'r:gz')
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
def load_housing_data(housing_path=HOUSING_PATH):
#csv_path=os.path.join(housing_path, "../../../input/camnugent_california-housing-prices/housing.csv")
return pd.read_csv("../../../input/camnugent_california-housing-prices/housing.csv")
# # Fetch the Housing Data
# In[ ]:
#fetch_housing_data()
housing = load_housing_data()
housing.head()
# # Explore the Data
# In[ ]:
housing.info()
# In[ ]:
housing.ocean_proximity.value_counts()
# In[ ]:
housing.describe()
# In[ ]:
import matplotlib.pyplot as plt
housing.hist(bins=50, figsize=(20,15))
print()
# # Create a Test Set
# In[ ]:
import numpy as np
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled_indices[:test_set_size]
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
# In[ ]:
train_set, test_set = split_train_test(housing, 0.2)
print(len(train_set), "train +", len(test_set), "test")
# In[ ]:
import hashlib
hash = hashlib.md5
hash(np.int64(4)).digest()[-1]
# In[ ]:
def test_set_check(identifier, test_ratio, hash):
return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio
def split_train_test_by_id(data, test_ratio, id_column, hash=hashlib.md5):
ids = data[id_column]
in_test_set = ids.apply(lambda id_: test_set_check(id_, test_ratio, hash))
return data.loc[~in_test_set], data.loc[in_test_set]
# In[ ]:
# we can easily add an identifier column using reset_index
housing_with_id = housing.reset_index()
train_set, test_set = split_train_test_by_id(housing_with_id, 0.2, "index")
# In[ ]:
print(len(train_set), "train + ", len(test_set), "test")
# ## Stratified Sampling
# In[ ]:
# Trim down the number of income categories by scaling and make a greater-than-5 category
housing["income_cat"] = np.ceil(housing["median_income"] / 1.5)
housing["income_cat"].where(housing["income_cat"] < 5, 5.0, inplace=True)
housing["income_cat"].hist()
# In[ ]:
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["income_cat"]):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
# drop the temporarily created income_cat column from both sets
for set_ in (strat_train_set, strat_test_set):
set_.drop(["income_cat"], axis=1, inplace=True)
# # Visualize the Data
# In[ ]:
# s is the size and c is the color
#train_set.plot(kind="scatter", x="longitude", y="latitude", alpha=0.4, s=housing["population"]/100, label="population", c="median_house_value", figsize=(12,8), cmap=plt.get_cmap("jet"), colorbar=True)
# In[ ]:
from pandas.plotting import scatter_matrix
attributes = ["median_house_value", "median_income", "total_rooms", "housing_median_age"]
scatter_matrix(housing[attributes], figsize=(12, 8))
# # Data Cleaning
# ## Separate Labels from Predictors
# In[ ]:
housing = strat_train_set.drop(["median_house_value"], axis=1)
housing_num = housing.drop(["ocean_proximity"], axis=1)
housing_labels = strat_train_set["median_house_value"].copy()
# ## Convert Text Categories into a One Hot Vector
# This is great for unordered lists. There are other ways of doing it, first creating a LabelEncoder (which is an ordered integer) and then using that to convert it with a OneHotEncoder(). This LabelBinarizer does it in one shot.
# In[ ]:
from sklearn.preprocessing import LabelBinarizer
encoder = LabelBinarizer()
housing_cat_1hot = encoder.fit_transform(housing["ocean_proximity"])
housing_cat_1hot
# i couldn't figure out how to set this array value as a column to the housing dataframe because it provides too many values.
# ## Handle Missing Data with Impute
# In[ ]:
# Take a look at some rows with missing data
sample_incomplete_rows = housing[housing.isnull().any(axis=1)].head()
sample_incomplete_rows
# In[ ]:
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy="median")
imputer.fit(housing_num)
# # Create a Transformer Class
# In[ ]:
from sklearn.base import BaseEstimator, TransformerMixin
rooms_ix, bedrooms_ix, population_ix, household_ix = 3, 4, 5, 6
# Create a class, which inherits from TransformerMixin
class CombinedAttributesAdder(BaseEstimator, TransformerMixin):
def __init__(self, add_bedrooms_per_room = True): # no *args or **kargs
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self, X, y=None):
return self # Nothing else to do
def transform(self, X, y=None):
rooms_per_household = X[:, rooms_ix] / X[:, household_ix]
population_per_household = X[:, population_ix] / X[:, household_ix]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, bedrooms_ix] / X[:, rooms_ix]
return np.c_[X, rooms_per_household, population_per_household, bedrooms_per_room]
else:
return np.c_[X, rooms_per_household, population_per_household]
# In[ ]:
attr_adder = CombinedAttributesAdder(add_bedrooms_per_room=False)
housing_extra_attribs = attr_adder.transform(housing_num.values)
# In[ ]:
housing_extra_attribs
# ## Create a Pipeline
# In[ ]:
from sklearn.pipeline import Pipeline
from sklearn.pipeline import FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.base import BaseEstimator, TransformerMixin
# Our DataFrameSelector will select the columns from a pandas dataframe
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X):
return X[self.attribute_names].values
# LabelBinarizer changed to support only encoding labels (y)
# not datasets (X) so these days it takes only X not
# X AND y so this custom class is a hack for that.
class CustomLabelBinarizer(BaseEstimator, TransformerMixin):
def __init__(self, sparse_output=False):
self.sparse_output = sparse_output
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
enc = LabelBinarizer(sparse_output=self.sparse_output)
return enc.fit_transform(X)
cat_attribs = ["ocean_proximity"]
num_attribs = list(housing.drop(cat_attribs, axis=1))
num_pipeline = Pipeline([ ('selector', DataFrameSelector(num_attribs)), ('imputer', Imputer(strategy="median")), ('attribs_adder', CombinedAttributesAdder()), ('std_scaler', StandardScaler()), ])
# we have a separate pipeline for the categorical columns
cat_pipeline = Pipeline([ ('selector', DataFrameSelector(cat_attribs)), ('label_binarizer', CustomLabelBinarizer()), ])
full_pipeline = FeatureUnion(transformer_list=[ ("num_pipeline", num_pipeline), ("cat_pipeline", cat_pipeline) ])
# In[ ]:
housing_prepared = full_pipeline.fit_transform(housing)
# In[ ]:
housing_prepared.shape
# # Select and Train Your Model
# ## A Linear Regression Model
# In[ ]:
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(housing_prepared, housing_labels)
# In[ ]:
# let's try the full pipeline on a few training instances
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
some_data_prepared = housing_prepared[:5]
some_data_prepared
# In[ ]:
print("Predictions:", lin_reg.predict(some_data_prepared))
print("Labels:", list(some_labels))
# In[ ]:
from sklearn.metrics import mean_squared_error
housing_predictions = lin_reg.predict(housing_prepared)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_rmse
# ## Cross Validated Regression Tree
# In[ ]:
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import cross_val_score
tree_reg = DecisionTreeRegressor()
tree_reg.fit(housing_prepared, housing_labels)
#scikit0learn cross val expects utilty function
#(Greater is better) rather than cost function (lower better)
scores = cross_val_score(tree_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
tree_rmse_scores = np.sqrt(-scores)
def display_scores(scores):
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
display_scores(tree_rmse_scores)
# ### Compare to Lin Reg Scores
# In[ ]:
lin_scores = cross_val_score(lin_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
lin_rmse_scores = np.sqrt(-lin_scores)
display_scores(lin_rmse_scores)
# **The linear regression model wins!**
# ## Random Forest
# In[ ]:
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor()
forest_reg.fit(housing_prepared, housing_labels)
forest_scores = cross_val_score(forest_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
forest_rmse_scores = np.sqrt(-forest_scores)
display_scores(forest_rmse_scores)
# **wow, random forest is even better**
# ## Support Vector Machine
# In[ ]:
from sklearn.svm import SVR
svm_reg = SVR()
svm_reg.fit(housing_prepared, housing_labels)
svm_scores = cross_val_score(svm_reg, housing_prepared, housing_labels, scoring="neg_mean_squared_error", cv=10)
svm_rmse_scores = np.sqrt(-svm_scores)
display_scores(svm_rmse_scores)
# **The support vectormachine takes a long time to learn! But OK, it is an amazing model!**
# ## Grid Search to Identify the Best Hyperparameters
# In[ ]:
from sklearn.model_selection import GridSearchCV
param_grid = [ {'n_estimators': [3, 10, 30], 'max_features': [2, 4, 6, 8]}, {'bootstrap': [False], 'n_estimators': [3, 10], 'max_features': [2, 3, 4]}, ]
forest_reg = RandomForestRegressor(random_state=42)
# train across 5 folds, that's a total of (12+6)*5=90 rounds of training
grid_search = GridSearchCV(forest_reg, param_grid, cv=5, scoring='neg_mean_squared_error')
grid_search.fit(housing_prepared, housing_labels)
# In[ ]:
# The best hyperparameter combination found:
grid_search.best_params_
# In[ ]:
grid_search.best_estimator_
# In[ ]:
# Let's look at the score of each hyperparameter combination tested during the grid search:
cvres = grid_search.cv_results_
for mean_score, params in zip(cvres["mean_test_score"], cvres["params"]):
print(np.sqrt(-mean_score), params)
# In[ ]:
pd.DataFrame(grid_search.cv_results_)
| 2.578125
| 3
|
vecLib/2tool.py
|
chenjl0710/arcpyTools
| 1
|
12777329
|
# -*- coding: utf8 -*-
import arcpy
import os
import setting
class ToolValidator(object):
"""Class for validating a tool's parameter values and controlling
the behavior of the tool's dialog."""
def __init__(self):
"""Setup arcpy and the list of tool parameters."""
self.params = arcpy.GetParameterInfo()
self.current_path = setting.env[0]
self.sdefile = os.path.join(self.current_path,"vector.sde")
self.boundary = os.path.join(self.sdefile, 'SDE.Boundary')
self.province = os.path.join(self.boundary,"SDE.全国省界")
self.city = os.path.join(self.boundary,"SDE.全国市界")
self.country = os.path.join(self.boundary,"SDE.全国区县界")
self.project = os.path.join(self.sdefile, 'SDE.PROJECT')
self.fields = ['NAME',"ADMINCODE",'SHAPE@']
self.prj_fields = ['PRODUCT_TY','LOCATION','PRJ_ID','PRO_YEAR','RESOLUTION','PRJ_NAME','SHAPE@']
def initializeParameters(self):
"""Refine the properties of a tool's parameters. This method is
called when the tool is opened."""
cur = arcpy.da.SearchCursor(self.province, self.fields)
self.province_list = []
for row in cur:
self.province_name = row[0]+"-"+row[1]
self.province_list.append(self.province_name)
self.params[0].filter.list = self.province_list
cur = arcpy.da.SearchCursor(self.city, self.fields)
self.city_list = []
for row in cur:
self.city_name = row[0] + "-" + row[1]
self.city_list.append(self.city_name)
self.params[1].filter.list = self.city_list
cur = arcpy.da.SearchCursor(self.country, self.fields)
self.country_list = []
for row in cur:
self.country_name = row[0] + "-" + row[1]
self.country_list.append(self.country_name)
self.params[2].filter.list = self.country_list
# cur = arcpy.da.SearchCursor(self.project, self.prj_fields)
# self.project_list = []
# for row in cur:
# self.project_name = row[2] + "-" + row[5]
# self.project_list.append(self.project_name)
# self.params[3].filter.list = self.project_list
return
def updateParameters(self):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
self.city_list = []
self.country_list = []
if self.params[0].value:
pro_code = self.params[0].value.split('-')[1][:2]
self.expresscity = "ADMINCODE LIKE '{0}%'".format(pro_code)
cur = arcpy.da.SearchCursor(self.city, self.fields,self.expresscity)
for row in cur:
self.city_name = row[0]+"-"+row[1]
self.city_list.append(self.city_name)
self.params[1].filter.list = self.city_list
if self.params[1].value:
city_code = self.params[1].value.split('-')[1][:4]
self.expresscountry = "ADMINCODE LIKE '{0}%'".format(city_code)
cur = arcpy.da.SearchCursor(self.country, self.fields,self.expresscountry)
for row in cur:
self.country_name = row[0]+"-"+row[1]
self.country_list.append(self.country_name)
self.params[2].filter.list = self.country_list
return
def updateMessages(self):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
| 2.53125
| 3
|
algs4/symbol_graph.py
|
dumpmemory/algs4-py
| 230
|
12777330
|
<reponame>dumpmemory/algs4-py
"""
Execution: python symbol_graph.py filename.txt delimiter
Data files: https://algs4.cs.princeton.edu/41graph/routes.txt
https://algs4.cs.princeton.edu/41graph/movies.txt
https://algs4.cs.princeton.edu/41graph/moviestiny.txt
https://algs4.cs.princeton.edu/41graph/moviesG.txt
https://algs4.cs.princeton.edu/41graph/moviestopGrossing.txt
% python symbol_graph.py routes.txt " "
JFK
MCO
ATL
ORD
LAX
PHX
LAS
% python symbol_graph.py movies.txt "/"
Tin Men (1987)
Hershey, Barbara
Geppi, Cindy
<NAME> (II)
Herr, Marcia
...
Blumenfeld, Alan
DeBoy, David
Bacon, Kevin
Woodsman, The (2004)
Wild Things (1998)
Where the Truth Lies (2005)
Tremors (1990)
...
Apollo 13 (1995)
Animal House (1978)
Assumes that input file is encoded using UTF-8.
% iconv -f ISO-8859-1 -t UTF-8 movies-iso8859.txt > movies.txt
"""
from algs4.st import ST
from algs4.graph import Graph
class SymbolGraph:
def __init__(self, stream, sp):
self.st = ST()
for line in open(stream):
a = line.strip().split(sp)
for i in range(len(a)):
if not self.st.contains(a[i]):
self.st.put(a[i], self.st.size())
self.keys = ["" for _ in range(self.st.size())]
for key in self.st.keys():
self.keys[self.st.get(key)] = key
self.G = Graph(self.st.size())
for line in open(stream):
a = line.strip().split(sp)
v = self.st.get(a[0])
for i in range(1, len(a)):
self.G.add_edge(v, self.st.get(a[i]))
def contains(self, s):
return self.st.contains(s)
def index(self, s):
return self.st.get(s)
def name(self, v):
return self.keys[v]
def graph(self):
return self.G
if __name__ == "__main__":
import sys
filename, delimiter = sys.argv[1], sys.argv[2]
sg = SymbolGraph(filename, delimiter)
graph = sg.graph()
for line in sys.stdin:
source = line.strip()
if sg.contains(source):
s = sg.index(source)
for v in graph.adj[s]:
print(" ", sg.name(v), end='')
else:
print("input not contains source: ", source)
| 2.75
| 3
|
basicsr/models/losses/__init__.py
|
Salah856/BasicSR
| 20
|
12777331
|
<gh_stars>10-100
from .losses import (CharbonnierLoss, GANLoss, GradientPenaltyLoss, L1Loss,
MSELoss, PerceptualLoss, WeightedTVLoss)
__all__ = [
'L1Loss', 'MSELoss', 'CharbonnierLoss', 'WeightedTVLoss', 'PerceptualLoss',
'GANLoss', 'GradientPenaltyLoss'
]
| 1.101563
| 1
|
nuggetemoji/plugins/guild_management.py
|
LimeProgramming/NuggetEmoji
| 0
|
12777332
|
<reponame>LimeProgramming/NuggetEmoji<filename>nuggetemoji/plugins/guild_management.py
import sys
import json
import discord
import asyncio
import datetime
from discord.ext import commands
from nuggetemoji.util import dataclasses
from .util import checks
from .util.misc import RANDOM_DISCORD_COLOUR, AVATAR_URL_AS, GUILD_URL_AS
from nuggetemoji.util.allowed_mentions import AllowedMentions
class GuildManagement(commands.Cog):
def __init__(self, bot):
self.bot = bot
# -------------------- local Cog Events --------------------
@asyncio.coroutine
async def cog_before_invoke(self, ctx):
#if "sends webhook" in (ctx.command.help).lower():
# return
await ctx.channel.trigger_typing()
@asyncio.coroutine
async def cog_after_invoke(self, ctx):
if self.bot.config.delete_invoking:
try:
await ctx.message.delete()
except discord.errors.NotFound:
pass
@asyncio.coroutine
async def cog_command_error(self, ctx, error):
if self.bot.config.delete_invoking:
try:
await ctx.message.delete()
except discord.errors.NotFound:
pass
# -------------------- Listeners --------------------
@commands.Cog.listener()
async def on_guild_join(self, guild):
# ---------- If using SQLite database ----------
if self.bot.config.use_sqlite and len(self.bot.guilds) >= self.bot.config.sqlitelimit:
owner = guild.owner
try:
await owner.send("Unfortunately I cannot join your awesome Discord server because of limitations of my database. \n I'll be leaving now. 👋")
except Exception:
pass
bot_owner = await self.bot.application_info().owner
try:
await bot_owner.send(f"I failed to join another guild. I could not join {guild.name} because I am at my max limit of guilds while using SQLite. If you want me to be able to join more servers, consider switched to PostgreSQL database which has unlimited guilds.")
except Exception:
pass
await guild.leave()
return
# ---------- Setup variables ----------
bot_member = guild.get_member(self.bot.user.id)
bot_ava = await self.bot.user.avatar_url_as(format="png", size=128).read()
guild_issues = []
gsguild = dataclasses.Guild(
name= guild.name,
id= guild.id,
prefix = self.bot.config.command_prefix,
allowed_roles = [],
allow_mentions = False,
allow_everyone = False
)
await self.bot.db.add_guild_settings(guild, prefix=self.bot.config.command_prefix)
# ---------- Sort out the webhooks ----------
# ===== Iter guilds text channels to sort out the guilds webhooks.
for channel in guild.text_channels:
# === If bot cannot manage_webhooks.
if not channel.permissions_for(bot_member).manage_webhooks:
guild_issues.append(f"Missing Manage Webhooks permissions in <#{channel.id}>.")
continue
# === Get the channels existing webhooks
webhooks = await channel.webhooks()
# --------------------------------------------------
# === If channel has no webhooks it's safe to assume that we have to make one.
if len(webhooks) == 0:
# - Create webhook
hook = await self.bot._create_webhook(channel, bot_ava)
# - Write webhook to database
await self.bot.db.set_webhook(id = hook.id, token = hook.token, ch_id=channel)
# - Write webhook to guild dataclass
gsguild.set_webhook2(dataclasses.Webhook(id = hook.id, token = hook.token, ch_id = channel.id))
# - Move onto the next channel
continue
# --------------------------------------------------
# === Try to find a webhook the bot made and pick one.
try:
hook = [webhook for webhook in webhooks if webhook.user == self.bot.user].pop()
except IndexError:
# - Create webhook if bot didn't make any of the channels webhooks.
hook = await self.bot._create_webhook(channel, bot_ava)
finally:
# - Store the webhook in database
await self.bot.db.set_webhook(hook.id, hook.token, channel)
gsguild.set_webhook(channel.id, hook.id, hook.token)
# ===== Add guild to guild_settings.
self.bot.guild_settings.add_guild(gsguild)
# ---------- Deal with guilds emotes ----------
#only way for this to be true is if guild has multiple emojis with the same name.
if not len(guild.emojis) == len(set([emji.name for emji in guild.emojis])):
edict = {}
for emji in guild.emojis:
if emji.name in edict:
edict[emji.id].append(emji)
else:
edict[emji.id] = [emji]
for problemEmojis in [emji for emji in edict.items() if len(emji) > 1]:
dupeemji = f"More than one emoji with the name {problemEmojis[0].name} "
for e in problemEmojis:
em = f'<{"a" if e.animated else ""}:{e.name}:{e.id}> '
dupeemji += em
guild_issues.append(dupeemji)
# ---------- DM Guild Owner ----------
owner = guild.owner
msg_content = ""
msg_content = "Thank you for adding me to your server. "
if self.bot.config.support_invite is not None:
msg_content += f"If you want help with the bot join the support discord {self.bot.config.support_invite}"
if guild_issues:
msg_content += "I have some one or more issues with your guild.\n```\n\t>"
msg_content += '\n\t>'.join(guild_issues)
msg_content += "\n```\nIt is recommended to correct these issue(s). Bot will try to compensate for them none the less."
await owner.send(msg_content)
return
async def on_guild_remove(self, guild):
await self.bot.db.remove_guild(guild)
pass
def setup(bot):
bot.add_cog(GuildManagement(bot))
| 1.945313
| 2
|
python/pmercury/protocols/dhcp.py
|
raj-apoorv/mercury
| 299
|
12777333
|
"""
Copyright (c) 2019 Cisco Systems, Inc. All rights reserved.
License at https://github.com/cisco/mercury/blob/master/LICENSE
"""
import os
import sys
import functools
from socket import AF_INET, AF_INET6, inet_ntop
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.dirname(os.path.abspath(__file__))+'/../')
from pmercury.protocols.protocol import Protocol
MAX_CACHED_RESULTS = 2**24
class DHCP(Protocol):
def __init__(self, fp_database=None, config=None):
# populate fingerprint databases
self.fp_db = None
DHCP.static_data = set([0x35, 0x37])
DHCP.contextual_data = {0x03: ('router',lambda x: inet_ntop(AF_INET, x)),
0x06: ('domain_name_server',lambda x: inet_ntop(AF_INET, x)),
0x0c: ('hostname',lambda x: x.decode()),
0x0f: ('domain_name',lambda x: x.decode()),
0x32: ('requested_ip',lambda x: inet_ntop(AF_INET, x)),
0x3c: ('vendor_class_id',lambda x: x.decode())}
@staticmethod
def proto_identify(data, offset, data_len):
if data_len < 230:
return False
if (data[offset] != 0x01 or
data[offset+236] != 0x63 or
data[offset+237] != 0x82 or
data[offset+238] != 0x53 or
data[offset+239] != 0x63):
return False
return True
@staticmethod
def fingerprint(data, offset, data_len):
hardware_address_length = data[offset + 2]
cmac = data[offset+28:offset+28+hardware_address_length].hex()
context = [{'name': 'client_mac_address', 'data': '%s' % ':'.join(a+b for a,b in zip(cmac[::2], cmac[1::2]))}]
offset += 240
fp_ = '('
while offset < data_len:
kind = data[offset]
if kind == 0xff or kind == 0x00: # End / Padding
fp_ += '(%02x)' % kind
break
length = data[offset+1]
if kind in DHCP.contextual_data:
name_, transform_ = DHCP.contextual_data[kind]
context.append({'name':name_,
'data':transform_(data[offset+2:offset+2+length])})
if offset+length+2 >= data_len:
return None
if kind not in DHCP.static_data:
fp_ += '(%02x)' % kind
offset += length+2
continue
fp_ += '(%s)' % data[offset:offset+2+length].hex()
offset += length+2
fp_ += ')'
return fp_, context
| 1.9375
| 2
|
setup.py
|
ATLASControlTower/aCT
| 0
|
12777334
|
from setuptools import setup, find_packages
setup(name='aCT',
version='0.1',
description='ARC Control Tower',
url='http://github.com/ARCControlTower/aCT',
python_requires='>=3.6',
author='aCT team',
author_email='<EMAIL>',
license='Apache 2.0',
package_dir = {'': 'src'},
packages=find_packages('src'),
install_requires=[
'mysql-connector-python', # connection to MySQL database
'htcondor', # bindings to use HTCondor to submit jobs
'pylint', # for travis automatic tests
'requests', # for APF mon calls
'prometheus_client', # Prometheus monitoring
'selinux', # SELinux context handling
'psutil', # Reports of process kills
'pyopenssl',
'flask',
'gunicorn',
'sqlalchemy'
],
entry_points={
'console_scripts': [
'actbootstrap = act.common.aCTBootstrap:main',
'actmain = act.common.aCTMain:main',
'actreport = act.common.aCTReport:main',
'actcriticalmonitor = act.common.aCTCriticalMonitor:main',
'actheartbeatwatchdog = act.atlas.aCTHeartbeatWatchdog:main',
'actldmxadmin = act.ldmx.aCTLDMXAdmin:main',
'actbulksub = act.client.actbulksub:main',
'actcat = act.client.actcat:main',
'actclean = act.client.actclean:main',
'actfetch = act.client.actfetch:main',
'actget = act.client.actget:main',
'actkill = act.client.actkill:main',
'actproxy = act.client.actproxy:main',
'actresub = act.client.actresub:main',
'actstat = act.client.actstat:main',
'actsub = act.client.actsub:main'
]
},
data_files=[
('etc/act', ['doc/aCTConfigARC.xml.template',
'doc/aCTConfigATLAS.xml.template'])
]
)
| 1.625
| 2
|
philoseismos/segy/components/TextualFileHeader.py
|
sir-dio/old-philoseismos
| 1
|
12777335
|
""" philoseismos: with passion for the seismic method.
This file defines the TextualFileHeader object that represents
a Textual File Header of a SEG-Y file.
@author: <NAME>
e-mail: <EMAIL> """
class TextualFileHeader:
""" Textual File Header for the SEG-Y file.
Textual description of the file. Exactly 3200 characters.
40 lines, 80 characters each. No strict format.
"""
def __init__(self, file=None):
""" Create an empty Textual File Header object. """
# standard encoding specified in SEG-Y file format description
self.encoding = 'cp500'
self.text = ' ' * 3200
self.lines = [self.text[i * 80: (i + 1) * 80] for i in range(40)]
self._bytes = None
if file:
self.load_from_file(file)
# ----- Loading, decoding, writing ----- #
def load_from_file(self, file):
""" Loads the bytes representing a Textual File Header.
Parameters
----------
file : str
Path to the file to load the Textual File Header from.
"""
with open(file, 'br') as f:
bytes = f.read(3200)
self.load_from_bytes(bytes)
def replace_in_file(self, file):
""" Replaces the Textual File Header in the file with self.
Parameters
----------
file : str
Path to the file to replace Textual File Header in.
"""
self._bytes = self.text.encode(self.encoding)
with open(file, 'br') as f:
file_content = bytearray(f.read())
file_content[:3200] = self._bytes
with open(file, 'bw') as f:
f.write(file_content)
def load_from_bytes(self, bytes):
""" Unpacks given bytes into self.
Parameters
----------
bytes : bytes
Bytes to decode.
"""
self._bytes = bytes
self.text = bytes.decode(self.encoding)
self.lines = [self.text[i * 80: (i + 1) * 80] for i in range(40)]
def redecode(self):
""" Decodes bytes, loaded from file, using the endoding..
Can be useful if the encoding was changed, to reset the .text attribute. """
self.text = self._bytes.decode(self.encoding)
# ----- Modifying content ----- #
def set_content(self, content):
""" Set the content for the Textual File Header.
Parameters
----------
content : str
New content for the Textual File Header.
Notes
-----
Textual File Header has to contain exactly 3200 characters: 40 lines, 80 symbols each.
The given content is splitted into lines by a new line symbol. If there are more than 40,
only the first 40 are taken. Each line is then set to be exactly 80 characters long.
If there are less then 40 lines, empty lines are added.
"""
lines = content.split('\n')[:40]
self.lines = [line[:80].ljust(80) for line in lines]
while len(self.lines) < 40:
self.lines.append(' ' * 80)
self.text = ''.join(self.lines)
self._bytes = self.text.encode(self.encoding)
def set_line(self, line_no, content):
""" Set the content for a specific line.
Parameters
----------
line_no : int
Number of the line to change (starting from 1).
content : str
New content for the line.
Notes
-----
Since each line in Textual File Header is exactly 80 characters, the content is cropped
or padded with spaces.
"""
line = content[:80].ljust(80)
self.lines[line_no - 1] = line
self.text = ''.join(self.lines)
self._bytes = self.text.encode(self.encoding)
# ----- Working with other files ----- #
def export_to_txt(self, file):
""" Saves the content of the Textual File Header in .txt format.
Parameters
----------
file : str
Path and name of the file to export Textual File Header to.
Notes
-----
Lines are separated.
"""
with open(file, 'w') as f:
for line in self.lines:
f.write(line + '\n')
def import_from_txt(self, file):
""" Loads the content from the .txt file.
Parameters
----------
file : str
Path to the file to import Textual File Header from.
Notes
-----
Reads 40 lines, 80 characters each, and combines them.
"""
with open(file, 'r') as f:
self.lines = [f.readline().strip()[:80].ljust(80) for i in range(40)]
self.text = ''.join(self.lines)
# ----- Dunder methods ----- #
def __repr__(self):
return '\n'.join(self.lines)
| 3.328125
| 3
|
utest/namespace/test_retrievercontextfactory.py
|
veryl-technologies/t24-tests-ide
| 1
|
12777336
|
<reponame>veryl-technologies/t24-tests-ide
import unittest
from robotide.namespace.namespace import _RetrieverContextFactory
from robot.parsing.model import ResourceFile
from robot.utils.asserts import assert_equals
def datafileWithVariables(vars):
data = ResourceFile()
for var in vars:
data.variable_table.add(var, vars[var])
return data
class RetrieverContextFactoryTest(unittest.TestCase):
def test_created_context_has_variable_table_variables(self):
factory = _RetrieverContextFactory()
ctx = factory.ctx_for_datafile(datafileWithVariables({'${foo}':'moi',
'${bar}': 'hoi',
'@{zoo}': 'koi'}))
result = ctx.vars.replace_variables('!${foo}!${bar}!@{zoo}!')
assert_equals(result, "!moi!hoi!['koi']!")
if __name__ == '__main__':
unittest.main()
| 2.5625
| 3
|
contextual-repr-analysis/contexteval/data/__init__.py
|
Albert-Ma/bert-fine-tuned-gain
| 2
|
12777337
|
<gh_stars>1-10
from contexteval.data.dataset_readers import * # noqa: F401,F403
from contexteval.data.fields import * # noqa: F401,F403
| 1.203125
| 1
|
src/api/posts_api/serializers.py
|
DevHub-Azerbaycan/python_web_site
| 25
|
12777338
|
from rest_framework import serializers
from blog.models import Post
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
fullName = serializers.SerializerMethodField()
class Meta:
model = User
fields = ['id','username','first_name','last_name','fullName']
def get_fullName(self,obj):
return obj.get_full_name()
post_detail_url = serializers.HyperlinkedIdentityField(
view_name = 'api:post-detail',
lookup_field = 'pk'
)
class PostSerializer(serializers.ModelSerializer):
detail_url = post_detail_url
author = UserSerializer()
likes_count = serializers.SerializerMethodField()
dislikes_count = serializers.SerializerMethodField()
class Meta:
model = Post
fields = '__all__'
def get_likes_count(self,obj):
return obj.total_likes()
def get_dislikes_count(self,obj):
return obj.total_dislikes()
class PostCreateSerializer(serializers.ModelSerializer):
detail_url = post_detail_url
class Meta:
model = Post
fields = ['name','body','image','detail_url']
| 2.328125
| 2
|
src/checkvist/__main__.py
|
nuno-andre/checkvist
| 0
|
12777339
|
from checkvist.app import cli
import sys
sys.exit(cli.cli(prog_name='checkvist'))
| 1.320313
| 1
|
MinimalTriangle/Python/hello_triangle.py
|
vladiant/OpenGLsamples
| 0
|
12777340
|
<filename>MinimalTriangle/Python/hello_triangle.py
# https://stackabuse.com/brief-introduction-to-opengl-in-python-with-pyopengl
# pip3 install PyOpenGL PyOpenGL_accelerate
# https://pythonprogramming.net/opengl-rotating-cube-example-pyopengl-tutorial/
# https://gist.github.com/deepankarsharma/3494203
import OpenGL
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
w, h = 500, 500
def triangle():
glClearColor(0.5, 0.5, 0.5, 1.0)
glBegin(GL_TRIANGLES)
glColor3f(0.0, 0.0, 1.0)
glVertex2f(w / 2, h / 3)
glColor3f(0.0, 1.0, 0.0)
glVertex2f(w / 4, h * 2 / 3)
glColor3f(1.0, 0.0, 0.0)
glVertex2f(w * 3 / 4, h * 2 / 3)
glEnd()
def iterate():
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0, w, 0, h, -1, 1)
glScalef(1, -1, 1)
glTranslatef(0, -h, 0)
def showScreen():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glLoadIdentity()
iterate()
triangle()
glutSwapBuffers()
glutInit()
glutInitDisplayMode(GLUT_RGBA)
glutInitWindowSize(w, h)
glutInitWindowPosition(0, 0)
wind = glutCreateWindow("Single Triangle")
glutDisplayFunc(showScreen)
glutIdleFunc(showScreen)
glutMainLoop()
| 3.203125
| 3
|
test/torch_optimizer_test.py
|
ymchen7/bluefog
| 1
|
12777341
|
<reponame>ymchen7/bluefog
# Copyright 2020 Bluefog Team. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Note this end-to-end only covers the BlueFog optimizers for a single machine,
# all the hierarchical cases are not fully under test yet.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader
import bluefog.torch as bf
from bluefog.common import topology_util
TEST_ON_GPU = torch.cuda.is_available()
# A linear model for testing
class LinearNet(nn.Module):
def __init__(self, input_dim, output_dim):
super(LinearNet, self).__init__()
self.fc = nn.Linear(input_dim, output_dim)
self._num_parameters = input_dim*output_dim
def forward(self, x):
return self.fc(x)
@property
def num_parameters(self):
return self._num_parameters
# A deep linear model for testing
class DuplicatedLinearNet(nn.Module):
def __init__(self, input_dim, output_dim):
super(DuplicatedLinearNet, self).__init__()
self.fc1 = nn.Linear(input_dim, output_dim)
self.fc2 = nn.Linear(output_dim, output_dim)
self._num_parameters = input_dim*output_dim+output_dim**2
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc2(x)
return x
@property
def num_parameters(self):
return self._num_parameters
class HierarchicalLinearNet(nn.Module):
def __init__(self, input_dim, output_dim):
super(HierarchicalLinearNet, self).__init__()
self.fc1 = LinearNet(input_dim, output_dim)
self.fc2 = LinearNet(output_dim, output_dim)
self._num_parameters = self.fc1.num_parameters+self.fc2.num_parameters
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
@property
def num_parameters(self):
return self._num_parameters
# A Simple dataset for testing.
class SimpleDataset:
def __init__(self, x, y):
self._x = x
self._y = y
def __len__(self):
return len(self._y)
def __getitem__(self, idx):
return (torch.tensor(self._x[idx], dtype=torch.float32),
torch.tensor(self._y[idx], dtype=torch.float32))
# A ProblemBuilder for the linear problem with a specified input and output dimension.
# The matrix A are randomly generated now.
# Y = XA + E, E ~ N(0, noise_level^2)
class LinearProblemBuilder:
def __init__(self, input_dim=16, output_dim=3, noise_level=1e-5):
self._input_dim = input_dim
self._output_dim = output_dim
self._noise_level = noise_level
self._matrix_gen_seed = 0
self._generate_matrices()
def _generate_matrices(self):
state = np.random.get_state()
np.random.seed(self._matrix_gen_seed)
self._A = np.random.randn(self._input_dim, self._output_dim)
np.random.set_state(state)
@property
def input_dim(self):
return self._input_dim
@input_dim.setter
def input_dim(self, value):
if not isinstance(value, int) or value <= 0:
raise ValueError(
"Input dimension should be an integer larger than 0.")
self._input_dim = value
self._generate_matrices()
@property
def output_dim(self):
return self._output_dim
@output_dim.setter
def output_dim(self, value):
if not isinstance(value, int) or value <= 0:
raise ValueError(
"Output dimension should be an integer larger than 0.")
self._output_dim = value
self._generate_matrices()
@property
def noise_level(self):
return self._noise_level
@noise_level.setter
def noise_level(self, value):
if value < 0:
raise ValueError(
"Noise level should be larger than or equal to 0.")
self._noise_level = value
def get_dataset(self, num_sample):
X = np.random.randn(num_sample, self.input_dim)
E = np.random.randn(num_sample, self.output_dim) * self.noise_level
Y = np.matmul(X, self._A) + E
return SimpleDataset(X, Y)
# Prepare the problem to be solved
def problem_setup(net=LinearNet):
bf.init()
num_epochs = 50
batch_size = 128
num_train_per_node = 1024
num_test_per_node = 128
lr = 0.01
# Setup Problem
problem_builder = LinearProblemBuilder()
train_dataset = problem_builder.get_dataset(num_train_per_node)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size)
test_dataset = problem_builder.get_dataset(num_test_per_node)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size)
# Setup Model
model = net(problem_builder.input_dim, problem_builder.output_dim)
assert (
num_train_per_node*bf.size() >= model.num_parameters
), "The number of samples is too small making it an underdetermined system."
# Setup Optimizer
optimizer = optim.Adam(model.parameters(), lr=lr*bf.size())
bf.broadcast_parameters(model.state_dict(), root_rank=0)
bf.broadcast_optimizer_state(optimizer, root_rank=0)
return problem_builder, train_dataloader, test_dataloader, model, optimizer, num_epochs
def pin_model_to_device(device, model):
isCUDA = device == "GPU"
if isCUDA:
# Bluefog: pin GPU to local rank.
device_id = (bf.local_rank() if bf.nccl_built() else
bf.local_rank() % torch.cuda.device_count())
torch.cuda.set_device(device_id)
model.cuda()
return isCUDA
# Standard training process
def standard_train(model, optimizer, dataloader, isCUDA):
mseloss = nn.MSELoss()
model.train()
for data, target in dataloader:
if isCUDA:
data, target = data.cuda(), target.cuda()
y = model(data)
loss = mseloss(y, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Dynamic topology training process for win_put
def dynamic_win_put_train(model, optimizer, dataloader, isCUDA, epoch):
mseloss = nn.MSELoss()
model.train()
for batch_idx, (data, target) in enumerate(dataloader):
if epoch < 3:
return
num_out_neighbors = len(bf.out_neighbor_ranks())
sent_neighbor = bf.out_neighbor_ranks()[batch_idx % num_out_neighbors]
optimizer.dst_weights = {sent_neighbor: 1.0}
if isCUDA:
data, target = data.cuda(), target.cuda()
y = model(data)
loss = mseloss(y, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Dynamic topology training process for neighbor_allreduce
def dynamic_neighbor_allreduce_train(model, optimizer, dataloader, isCUDA, dynamic_topo_gen):
mseloss = nn.MSELoss()
model.train()
for data, target in dataloader:
send_neighbors, recv_neighbors = next(dynamic_topo_gen)
optimizer.dst_weights = send_neighbors
optimizer.src_weights = {
r: 1/(len(recv_neighbors) + 1) for r in recv_neighbors}
optimizer.self_weight = 1 / (len(recv_neighbors) + 1)
if isCUDA:
data, target = data.cuda(), target.cuda()
y = model(data)
loss = mseloss(y, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Training process with mini_batch, expecting optimizer with a preset num_step_per_communication
# TODO(ybc,hanbinhu): left-over data if batch_size cannot be divided by num_step_per_communication.
def local_aggregation_train(model, optimizer, dataloader, isCUDA, mini_batch_size):
mseloss = nn.MSELoss()
model.train()
for data, target in dataloader:
if isCUDA:
data, target = data.cuda(), target.cuda()
optimizer.zero_grad()
for i in range(0, len(data), mini_batch_size):
data_batch = data[i: i + mini_batch_size]
target_batch = target[i: i + mini_batch_size]
y = model(data_batch)
loss = mseloss(y, target_batch)
loss.div_(len(data) / mini_batch_size)
loss.backward()
optimizer.step()
def evaluation(model, dataloader, isCUDA):
mseloss = nn.MSELoss()
model.eval()
total_loss = 0
with torch.no_grad():
for data, target in dataloader:
if isCUDA:
data, target = data.cuda(), target.cuda()
y = model(data)
loss = mseloss(y, target)
total_loss += loss * len(target)
total_loss /= len(dataloader.dataset)
avg_total_loss = bf.allreduce(total_loss)
return avg_total_loss.item()
static_topo_scenarios = []
static_topo_scenarios.append(
pytest.param("CPU", bf.CommunicationType.empty, {"ATC": False, "error_threshold": 2},
id="AWC Empty on CPU"))
static_topo_scenarios.append(
pytest.param("CPU", bf.CommunicationType.empty, {"ATC": True, "error_threshold": 2},
id="ATC Empty on CPU"))
static_topo_scenarios.append(
pytest.param("CPU", bf.CommunicationType.allreduce, {"ATC": False},
id="AWC Allreduce on CPU"))
static_topo_scenarios.append(
pytest.param("CPU", bf.CommunicationType.allreduce, {"ATC": True},
id="ATC Allreduce on CPU"))
static_topo_scenarios.append(
pytest.param("CPU", bf.CommunicationType.neighbor_allreduce, {"ATC": False},
id="AWC Neighbor Allreduce on CPU"))
static_topo_scenarios.append(
pytest.param("CPU", bf.CommunicationType.neighbor_allreduce, {"ATC": True},
id="ATC Neighbor Allreduce on CPU"))
static_topo_scenarios.append(
pytest.param("CPU", "gradient.allreduce", {}, id="Gradient Allreduce on CPU"))
static_topo_scenarios.append(
pytest.param("CPU", "win.put", {'window_prefix': 'CPU'}, id="Window put on CPU"))
if TEST_ON_GPU:
static_topo_scenarios.append(
pytest.param("GPU", bf.CommunicationType.empty, {"ATC": False, "error_threshold": 2},
id="AWC Empty on GPU"))
static_topo_scenarios.append(
pytest.param("GPU", bf.CommunicationType.empty, {"ATC": True, "error_threshold": 2},
id="ATC Empty on GPU"))
static_topo_scenarios.append(
pytest.param("GPU", bf.CommunicationType.allreduce, {"ATC": False},
id="AWC Allreduce on GPU"))
static_topo_scenarios.append(
pytest.param("GPU", bf.CommunicationType.allreduce, {"ATC": True},
id="ATC Allreduce on GPU"))
static_topo_scenarios.append(
pytest.param("GPU", bf.CommunicationType.neighbor_allreduce, {"ATC": False},
id="AWC Neighbor Allreduce on GPU"))
static_topo_scenarios.append(
pytest.param("GPU", bf.CommunicationType.neighbor_allreduce, {"ATC": True},
id="ATC Neighbor Allreduce on GPU"))
static_topo_scenarios.append(
pytest.param("GPU", "gradient.allreduce", {}, id="Gradient Allreduce on GPU"))
static_topo_scenarios.append(
pytest.param("GPU", "win.put", {'window_prefix': 'GPU'}, id="Window put on GPU"))
# device can be set to "GPU" or "CPU".
# communication_type can be selected from bf.CommunicationType, "gradient.allreduce" or "win.put".
# kwargs is some optional parameters related to certain communication types.
@pytest.mark.parametrize("device,communication_type,kwargs", static_topo_scenarios)
def test_standard_optimizer(device, communication_type, kwargs):
atc_style = kwargs.get("ATC", False)
error_threshold = kwargs.get("error_threshold", 1.5)
window_prefix = kwargs.get("window_prefix", None)
problem_builder, train_dataloader, test_dataloader, model, optimizer, num_epochs = \
problem_setup()
isCUDA = pin_model_to_device(device, model)
if isinstance(communication_type, bf.CommunicationType):
base_dist_optimizer = (bf.DistributedAdaptThenCombineOptimizer if atc_style else
bf.DistributedAdaptWithCombineOptimizer)
optimizer = base_dist_optimizer(optimizer, model=model,
communication_type=communication_type)
elif communication_type == "win.put":
optimizer = bf.DistributedWinPutOptimizer(optimizer, model=model,
window_prefix=window_prefix)
elif communication_type == "gradient.allreduce":
optimizer = bf.DistributedGradientAllreduceOptimizer(
optimizer, model=model)
else:
raise ValueError("Communication_type under test is not expected.")
# Train and test
train_mse = []
test_mse = []
for _ in range(num_epochs):
standard_train(model, optimizer, train_dataloader, isCUDA)
train_mse.append(evaluation(model, train_dataloader, isCUDA))
test_mse.append(evaluation(model, test_dataloader, isCUDA))
train_mse = np.array(train_mse)
test_mse = np.array(test_mse)
# Check if the MSEs in the last three epochs are small enough
assert (
train_mse[-3:].max() < error_threshold*problem_builder.noise_level**2
), "Train MSE in the last three epochs doesn't coverge."
assert (
test_mse[-3:].max() < error_threshold*problem_builder.noise_level**2
), "Train MSE in the last three epochs doesn't coverge."
if communication_type == "win.put":
optimizer.unregister_window()
hierarchical_model_scenarios = []
hierarchical_model_scenarios.append(
pytest.param("CPU", bf.CommunicationType.neighbor_allreduce, {"ATC": False},
id="AWC Neighbor Allreduce on CPU",
marks=pytest.mark.skip(reason="AWC doesn't converge for hierarchical model.")))
hierarchical_model_scenarios.append(
pytest.param("CPU", bf.CommunicationType.neighbor_allreduce, {"ATC": True},
id="ATC Neighbor Allreduce on CPU",
marks=pytest.mark.skip(reason="ATC doesn't converge for hierarchical model.")))
hierarchical_model_scenarios.append(
pytest.param("CPU", "gradient.allreduce", {}, id="Gradient Allreduce on CPU",
marks=pytest.mark.skip(reason="GA may not converge for hierarchical model.")))
hierarchical_model_scenarios.append(
pytest.param("CPU", "win.put", {'window_prefix': 'CPU'}, id="Window put on CPU",
marks=pytest.mark.skip(reason="Win put may not converge for hierarchical model.")))
if TEST_ON_GPU:
hierarchical_model_scenarios.append(
pytest.param("GPU", bf.CommunicationType.neighbor_allreduce, {"ATC": False},
id="AWC Neighbor Allreduce on GPU",
marks=pytest.mark.skip(reason="AWC doesn't converge for hierarchical model.")))
hierarchical_model_scenarios.append(
pytest.param("GPU", bf.CommunicationType.neighbor_allreduce, {"ATC": True},
id="ATC Neighbor Allreduce on GPU",
marks=pytest.mark.skip(reason="ATC doesn't converge for hierarchical model.")))
hierarchical_model_scenarios.append(
pytest.param("GPU", "gradient.allreduce", {}, id="Gradient Allreduce on GPU",
marks=pytest.mark.skip(reason="GA may not converge for hierarchical model.")))
hierarchical_model_scenarios.append(
pytest.param("GPU", "win.put", {'window_prefix', 'GPU'}, id="Window put on GPU",
marks=pytest.mark.skip(
reason="Win put may not converge for hierarchical model.")))
@pytest.mark.parametrize("device,communication_type,kwargs", hierarchical_model_scenarios)
def test_optimizer_for_hierarchical_model(device, communication_type, kwargs):
atc_style = kwargs.get("ATC", False)
error_threshold = kwargs.get("error_threshold", 1.5)
window_prefix = kwargs.get("window_prefix", None)
problem_builder, train_dataloader, test_dataloader, model, optimizer, num_epochs = \
problem_setup(HierarchicalLinearNet)
isCUDA = pin_model_to_device(device, model)
if isinstance(communication_type, bf.CommunicationType):
base_dist_optimizer = (bf.DistributedAdaptThenCombineOptimizer if atc_style else
bf.DistributedAdaptWithCombineOptimizer)
optimizer = base_dist_optimizer(optimizer, model=model,
communication_type=communication_type)
elif communication_type == "win.put":
optimizer = bf.DistributedWinPutOptimizer(optimizer, model=model,
window_prefix=window_prefix)
elif communication_type == "gradient.allreduce":
optimizer = bf.DistributedGradientAllreduceOptimizer(
optimizer, model=model)
else:
raise ValueError("Communication_type under test is not expected.")
# Train and test
train_mse = []
test_mse = []
for _ in range(num_epochs):
standard_train(model, optimizer, train_dataloader, isCUDA)
train_mse.append(evaluation(model, train_dataloader, isCUDA))
test_mse.append(evaluation(model, test_dataloader, isCUDA))
train_mse = np.array(train_mse)
test_mse = np.array(test_mse)
# Check if the MSEs in the last three epochs are small enough
assert (
train_mse[-3:].max() < error_threshold*problem_builder.noise_level**2
), "Train MSE in the last three epochs doesn't coverge."
assert (
test_mse[-3:].max() < error_threshold*problem_builder.noise_level**2
), "Train MSE in the last three epochs doesn't coverge."
if communication_type == "win.put":
optimizer.unregister_window()
# Neighbor allreduce dynamic tests
dynamic_neighbor_allreduce_scenarios = []
dynamic_neighbor_allreduce_scenarios.append(
pytest.param("CPU", False, {}, id="Dynamic AWC Neighbor Allreduce on CPU"))
dynamic_neighbor_allreduce_scenarios.append(
pytest.param("CPU", True, {}, id="Dynamic ATC Neighbor Allreduce on CPU"))
if TEST_ON_GPU:
dynamic_neighbor_allreduce_scenarios.append(
pytest.param("GPU", False, {}, id="Dynamic AWC Neighbor Allreduce on GPU"))
dynamic_neighbor_allreduce_scenarios.append(
pytest.param("GPU", True, {}, id="Dynamic ATC Neighbor Allreduce on GPU"))
@pytest.mark.parametrize("device,atc_style,kwargs", dynamic_neighbor_allreduce_scenarios)
def test_dynamic_neighbor_allreduce_optimizer(device, atc_style, kwargs):
error_threshold = kwargs.get("error_threshold", 1.5)
problem_builder, train_dataloader, test_dataloader, model, optimizer, num_epochs = \
problem_setup()
isCUDA = pin_model_to_device(device, model)
base_dist_optimizer = (bf.DistributedAdaptThenCombineOptimizer if atc_style else
bf.DistributedAdaptWithCombineOptimizer)
optimizer = base_dist_optimizer(optimizer, model=model,
communication_type=bf.CommunicationType.neighbor_allreduce)
dynamic_topo_gen = topology_util.GetDynamicOnePeerSendRecvRanks(
bf.load_topology(), bf.rank())
# Train and test
train_mse = []
test_mse = []
for _ in range(num_epochs):
dynamic_neighbor_allreduce_train(model, optimizer, train_dataloader, isCUDA,
dynamic_topo_gen)
train_mse.append(evaluation(model, train_dataloader, isCUDA))
test_mse.append(evaluation(model, test_dataloader, isCUDA))
train_mse = np.array(train_mse)
test_mse = np.array(test_mse)
# Check if the MSEs in the last three epochs are small enough
assert (
train_mse[-3:].max() < error_threshold*problem_builder.noise_level**2
), "Train MSE in the last three epochs doesn't coverge."
assert (
test_mse[-3:].max() < error_threshold*problem_builder.noise_level**2
), "Train MSE in the last three epochs doesn't coverge."
# Window put dynamic tests
dynamic_win_put_scenarios = []
dynamic_win_put_scenarios.append(
pytest.param("CPU", {'window_prefix':'CPU'}, id="Dynamic window put on CPU"))
if TEST_ON_GPU:
dynamic_win_put_scenarios.append(
pytest.param("GPU", {'window_prefix':'GPU'}, id="Dynamic window put on GPU"))
@pytest.mark.parametrize("device,kwargs", dynamic_win_put_scenarios)
def test_dynamic_win_put_optimizer(device, kwargs):
error_threshold = kwargs.get("error_threshold", 1.5)
window_prefix = kwargs.get("window_prefix", None)
problem_builder, train_dataloader, test_dataloader, model, optimizer, num_epochs = \
problem_setup()
isCUDA = pin_model_to_device(device, model)
optimizer = bf.DistributedWinPutOptimizer(optimizer, model=model, window_prefix=window_prefix)
# Train and test
train_mse = []
test_mse = []
for epoch in range(num_epochs):
dynamic_win_put_train(
model, optimizer, train_dataloader, isCUDA, epoch)
train_mse.append(evaluation(model, train_dataloader, isCUDA))
test_mse.append(evaluation(model, test_dataloader, isCUDA))
train_mse = np.array(train_mse)
test_mse = np.array(test_mse)
# Check if the MSEs in the last three epochs are small enough
assert (
train_mse[-3:].max() < error_threshold*problem_builder.noise_level**2
), "Train MSE in the last three epochs doesn't coverge."
assert (
test_mse[-3:].max() < error_threshold*problem_builder.noise_level**2
), "Train MSE in the last three epochs doesn't coverge."
optimizer.unregister_window()
local_aggregation_scenarios = []
local_aggregation_scenarios.append(
pytest.param("CPU", bf.CommunicationType.empty, {"ATC": False, "error_threshold": 2},
id="AWC Empty on CPU"))
local_aggregation_scenarios.append(
pytest.param("CPU", bf.CommunicationType.empty, {"ATC": True, "error_threshold": 2},
id="ATC Empty on CPU"))
local_aggregation_scenarios.append(
pytest.param("CPU", bf.CommunicationType.allreduce, {"ATC": False},
id="AWC Allreduce on CPU"))
local_aggregation_scenarios.append(
pytest.param("CPU", bf.CommunicationType.allreduce, {"ATC": True},
id="ATC Allreduce on CPU"))
local_aggregation_scenarios.append(
pytest.param("CPU", bf.CommunicationType.neighbor_allreduce, {"ATC": False},
id="AWC Neighbor Allreduce on CPU"))
local_aggregation_scenarios.append(
pytest.param("CPU", bf.CommunicationType.neighbor_allreduce,
{"ATC": True}, id="ATC Neighbor Allreduce on CPU"))
local_aggregation_scenarios.append(
pytest.param("CPU", "gradient.allreduce", {}, id="Gradient Allreduce on CPU"))
local_aggregation_scenarios.append(
pytest.param("CPU", "win.put", {'window_prefix': 'CPU'}, id="Window put on CPU"))
local_aggregation_scenarios.append(
pytest.param("CPU", bf.CommunicationType.neighbor_allreduce, {"mini_batch_size": 4},
id="Neighbor allreduce AWC on CPU with a mini_batch_size of 4"))
local_aggregation_scenarios.append(
pytest.param("CPU", bf.CommunicationType.neighbor_allreduce, {"mini_batch_size": 8},
id="Neighbor allreduce AWC on CPU with a mini_batch_size of 8"))
local_aggregation_scenarios.append(
pytest.param("CPU", bf.CommunicationType.neighbor_allreduce, {"mini_batch_size": 32},
id="Neighbor allreduce AWC on CPU with a mini_batch_size of 32"))
if TEST_ON_GPU:
local_aggregation_scenarios.append(
pytest.param("GPU", bf.CommunicationType.empty, {"ATC": False, "error_threshold": 2},
id="AWC Empty on GPU"))
local_aggregation_scenarios.append(
pytest.param("GPU", bf.CommunicationType.empty, {"ATC": True, "error_threshold": 2},
id="ATC Empty on GPU"))
local_aggregation_scenarios.append(
pytest.param("GPU", bf.CommunicationType.allreduce, {"ATC": False},
id="AWC Allreduce on GPU"))
local_aggregation_scenarios.append(
pytest.param("GPU", bf.CommunicationType.allreduce, {"ATC": True},
id="ATC Allreduce on GPU"))
local_aggregation_scenarios.append(
pytest.param("GPU", bf.CommunicationType.neighbor_allreduce, {"ATC": False},
id="AWC Neighbor Allreduce on GPU"))
local_aggregation_scenarios.append(
pytest.param("GPU", bf.CommunicationType.neighbor_allreduce, {"ATC": True},
id="ATC Neighbor Allreduce on GPU"))
local_aggregation_scenarios.append(
pytest.param("GPU", "gradient.allreduce", {}, id="Gradient Allreduce on GPU"))
local_aggregation_scenarios.append(
pytest.param("GPU", "win.put", {'window_prefix': 'GPU'}, id="Window put on GPU"))
@pytest.mark.parametrize("device,communication_type,kwargs", local_aggregation_scenarios)
def test_optimizer_local_aggregation(device, communication_type, kwargs):
atc_style = kwargs.get("ATC", False)
error_threshold = kwargs.get("error_threshold", 1.5)
mini_batch_size = kwargs.get("mini_batch_size", 16)
window_prefix = kwargs.get("window_prefix", None)
problem_builder, train_dataloader, test_dataloader, model, optimizer, num_epochs = \
problem_setup()
isCUDA = pin_model_to_device(device, model)
J = train_dataloader.batch_size // mini_batch_size
if isinstance(communication_type, bf.CommunicationType):
base_dist_optimizer = (bf.DistributedAdaptThenCombineOptimizer if atc_style else
bf.DistributedAdaptWithCombineOptimizer)
optimizer = base_dist_optimizer(optimizer, model=model,
communication_type=communication_type,
num_steps_per_communication=J)
elif communication_type == "win.put":
optimizer = bf.DistributedWinPutOptimizer(optimizer, model=model,
num_steps_per_communication=J)
elif communication_type == "gradient.allreduce":
optimizer = bf.DistributedGradientAllreduceOptimizer(optimizer, model=model,
num_steps_per_communication=J)
else:
raise ValueError("Communication_type under test is not expected.")
# Train and test
train_mse = []
test_mse = []
for _ in range(num_epochs):
local_aggregation_train(
model, optimizer, train_dataloader, isCUDA, mini_batch_size)
train_mse.append(evaluation(model, train_dataloader, isCUDA))
test_mse.append(evaluation(model, test_dataloader, isCUDA))
train_mse = np.array(train_mse)
test_mse = np.array(test_mse)
# Check if the MSEs in the last three epochs are small enough
assert (
train_mse[-3:].max() < error_threshold*problem_builder.noise_level**2
), "Train MSE in the last three epochs doesn't coverge."
assert (
test_mse[-3:].max() < error_threshold*problem_builder.noise_level**2
), "Train MSE in the last three epochs doesn't coverge."
if communication_type == "win.put":
optimizer.unregister_window()
local_aggregation_duplicated_scenarios = []
local_aggregation_duplicated_scenarios.append(
pytest.param("CPU", bf.CommunicationType.neighbor_allreduce, {"ATC": False},
id="AWC Neighbor Allreduce on CPU"))
local_aggregation_duplicated_scenarios.append(
pytest.param("CPU", bf.CommunicationType.neighbor_allreduce, {"ATC": True},
id="ATC Neighbor Allreduce on CPU"))
local_aggregation_duplicated_scenarios.append(
pytest.param("CPU", "win.put", {'window_prefix': 'CPU'}, id="Win Put on CPU"))
local_aggregation_duplicated_scenarios.append(
pytest.param("CPU", "gradient.allreduce", {}, id="Gradient Allreduce on CPU"))
if TEST_ON_GPU:
local_aggregation_duplicated_scenarios.append(
pytest.param("GPU", bf.CommunicationType.neighbor_allreduce, {"ATC": False},
id="AWC Neighbor Allreduce on GPU"))
local_aggregation_duplicated_scenarios.append(
pytest.param("GPU", bf.CommunicationType.neighbor_allreduce, {"ATC": True},
id="ATC Neighbor Allreduce on GPU"))
local_aggregation_duplicated_scenarios.append(
pytest.param("GPU", "win.put", {'window_prefix': 'GPU'}, id="Win Put on GPU"))
local_aggregation_duplicated_scenarios.append(
pytest.param("GPU", "gradient.allreduce", {}, id="Gradient Allreduce on GPU"))
@pytest.mark.filterwarnings("error:Unexpected behavior")
@pytest.mark.parametrize("device,communication_type,kwargs", local_aggregation_duplicated_scenarios)
def test_optimizer_local_aggregation_duplicated(device, communication_type, kwargs):
# Accuracy doesn't matter here, mainly to test if there is warning thrown
# for local aggregation.
atc_style = kwargs.get("ATC", False)
mini_batch_size = kwargs.get("mini_batch_size", 16)
window_prefix = kwargs.get("window_prefix", None)
_, train_dataloader, test_dataloader, model, optimizer, num_epochs = \
problem_setup(DuplicatedLinearNet)
isCUDA = pin_model_to_device(device, model)
mini_batch_size = train_dataloader.batch_size
J = train_dataloader.batch_size // mini_batch_size
if isinstance(communication_type, bf.CommunicationType):
base_dist_optimizer = (bf.DistributedAdaptThenCombineOptimizer if atc_style else
bf.DistributedAdaptWithCombineOptimizer)
optimizer = base_dist_optimizer(optimizer, model=model,
communication_type=communication_type,
num_steps_per_communication=J)
elif communication_type == "win.put":
optimizer = bf.DistributedWinPutOptimizer(optimizer, model=model,
window_prefix=window_prefix,
num_steps_per_communication=J)
elif communication_type == "gradient.allreduce":
optimizer = bf.DistributedGradientAllreduceOptimizer(optimizer, model=model,
num_steps_per_communication=J)
else:
raise ValueError("Communication_type under test is not expected.")
# Train and test
for _ in range(num_epochs):
local_aggregation_train(
model, optimizer, train_dataloader, isCUDA, mini_batch_size)
evaluation(model, train_dataloader, isCUDA)
evaluation(model, test_dataloader, isCUDA)
if communication_type == "win.put":
optimizer.unregister_window()
| 2.265625
| 2
|
15_three_sum_sorting.py
|
ojhaanshu87/LeetCode
| 0
|
12777342
|
```
Approach 1: Hashset
Since triplets must sum up to the target value, we can try the hash table approach from the Two Sum solution. This approach won't work, however, if the sum is not necessarily equal to the target, like in 3Sum Smaller and 3Sum Closest.
We move our pivot element nums[i] and analyze elements to its right. We find all pairs whose sum is equal -nums[i] using the Two Sum: One-pass Hash Table approach, so that the sum of the pivot element (nums[i]) and the pair (-nums[i]) is equal to zero.
To do that, we process each element nums[j] to the right of the pivot, and check whether a complement -nums[i] - nums[j] is already in the hashset. If it is, we found a triplet. Then, we add nums[j] to the hashset, so it can be used as a complement from that point on.
Like in the approach above, we will also sort the array so we can skip repeated values. We provide a different way to avoid duplicates in the "No-Sort" approach below.
Algorithm
The main function is the same as in the Two Pointers approach above. Here, we use twoSum (instead of twoSumII), modified to produce triplets and skip repeating values.
For the main function:
Sort the input array nums.
Iterate through the array:
If the current value is greater than zero, break from the loop. Remaining values cannot sum to zero.
If the current value is the same as the one before, skip it.
Otherwise, call twoSum for the current position i.
For twoSum function:
For each index j > i in A:
Compute complement value as -nums[i] - nums[j].
If complement exists in hashset seen:
We found a triplet - add it to the result res.
Increment j while the next value is the same as before to avoid duplicates in the result.
Add nums[j] to hashset seen
Return the result res.
```
class Solution:
def threeSum(self, nums: List[int]) -> List[List[int]]:
res = []
nums.sort()
for i in range(len(nums)):
if nums[i] > 0:
break
if i == 0 or nums[i - 1] != nums[i]:
self.twoSum(nums, i, res)
return res
def twoSum(self, nums: List[int], i: int, res: List[List[int]]):
seen = set()
j = i + 1
while j < len(nums):
complement = -nums[i] - nums[j]
if complement in seen:
res.append([nums[i], nums[j], complement])
while j + 1 < len(nums) and nums[j] == nums[j + 1]:
j += 1
seen.add(nums[j])
j += 1
| 3.703125
| 4
|
22.funcoes_lambda/10.exercicio2.py
|
robinson-1985/python-zero-dnc
| 0
|
12777343
|
<gh_stars>0
# 2. Utilize uma função filter para retornar somente os números pares da lista abaixo.
par = list(filter(lambda x: x %2==0, [5,2,5,7,4,2,6,10,342,54,23,6,7,9,12]))
print(par)
| 3.015625
| 3
|
model.py
|
Estefaniajim/League-of-legends-helper
| 0
|
12777344
|
import app
import Data.dataAnalysis as da
import Data.liveDataLAN as lan
import Data.liveDataNA as na
import time
#summonerName,server,lane = app.getUser()
def getDataServer(server,summonerName):
if server == "LAN":
summonerId = lan.gettingSummonerId(summonerName)
tier, rank = lan.getRankedPosition(summonerId)
return summonerId,tier,rank
else:
summonerId= na.gettingSummonerId(summonerName)
tier,rank = na.getRankedPosition(summonerId)
return summonerId,tier,rank
def refreshData(lane,server,summonerName,creepsPerMin,goldPerMin):
while True:
try:
summonerId,tier,rank = getDataServer(server,summonerName)
if server == "LAN":
gameTime = lan.gettingLiveScores(summonerId)
else:
gameTime = na.gettingLiveScores(summonerId)
creepsPerMin, goldPerMin = da.gettingAvgScores(gameTime,lane,tier,rank)
time.sleep(60)
except:
print("Matched Ended")
print("Your Score should look like")
deaths = da.getAvgDeaths(lane,tier,rank)
kills = da.getAvgKills(lane,tier,rank)
assists = da.getAvgAssists(lane,tier,rank)
wardsKilled = da.getAvgWardsKilled(lane,tier,rank)
wardsPlaced = da.getAvgWardsPlaced(lane,tier,rank)
print("Your KDA: "+ str(kills)+"/"+str(deaths)+"/"+str(assists))
print("Your wards placed: "+ str(wardsPlaced)+ " yes, wards are important even if you are not a support")
print("Your wads killed: "+ str(wardsKilled)+ "yes, even killing wards is important")
return deaths,kills,assists,wardsKilled,wardsPlaced
| 2.625
| 3
|
src/training/oct_resnet.py
|
yutake27/P3CMQA
| 0
|
12777345
|
<gh_stars>0
import chainer
import chainer.functions as F
import chainer.links as L
from octconv import OctConv
from octconv import OctConv_BN
from octconv import OctConv_BN_ACT
from octconv import oct_add, oct_function
class Building(chainer.Chain):
def __init__(self, n_in, n_mid, n_out, stride=1, alpha=0.25, bn_kwargs={}):
w = chainer.initializers.HeNormal()
super(Building, self).__init__()
self.use_conv = (n_in != n_out)
with self.init_scope():
self.conv_bn_act1 = OctConv_BN_ACT(n_in, n_mid, 3, stride, 1, nobias=True, initialW=w, alpha_out=alpha, bn_kwargs=bn_kwargs)
self.conv_bn2 = OctConv_BN(n_mid, n_out, 3, 1, 1, nobias=True, initialW=w, alpha_out=alpha, bn_kwargs=bn_kwargs)
if self.use_conv:
self.conv_bn3 = OctConv_BN(n_in, n_out, 1, stride, 0, nobias=True, initialW=w, alpha_out=alpha, bn_kwargs=bn_kwargs)
def __call__(self, x):
h = self.conv_bn_act1(x)
h = self.conv_bn2(h)
h = oct_add(h, self.conv_bn3(x)) if self.use_conv else oct_add(h, x)
h = oct_function(F.relu)(h)
return h
class BottleNeck(chainer.Chain):
def __init__(self, n_in, n_mid, n_out, stride=1, alpha=0.25, bn_kwargs={}):
w = chainer.initializers.HeNormal()
super(BottleNeck, self).__init__()
self.use_conv = (n_in != n_out)
with self.init_scope():
self.conv_bn_act1 = OctConv_BN_ACT(n_in, n_mid, 1, stride, 0, nobias=True, initialW=w, alpha_out=alpha, bn_kwargs=bn_kwargs)
self.conv_bn_act2 = OctConv_BN_ACT(n_mid, n_mid, 3, 1, 1, nobias=True, initialW=w, alpha_out=alpha, bn_kwargs=bn_kwargs)
self.conv_bn3 = OctConv_BN(n_mid, n_out, 1, 1, 0, nobias=True, initialW=w, alpha_out=alpha, bn_kwargs=bn_kwargs)
if self.use_conv:
self.conv_bn4 = Conv_BN(
n_in, n_out, 1, stride, 0, nobias=True, initialW=w, alpha_out=alpha, bn_kwargs=bn_kwargs)
def __call__(self, x):
h = self.conv_bn_act1(x)
h = self.conv_bn_act2(h)
h = self.conv_bn3(h)
h = oct_add(h, self.conv_bn4(x)) if self.use_conv else oct_add(h, x)
h = oct_function(F.relu)(h)
return h
class Block(chainer.ChainList):
def __init__(self, n_in, n_mid, n_out, n_bottlenecks, stride=2, alpha=0.25, block=BottleNeck, bn_kwargs={}):
super(Block, self).__init__()
self.add_link(block(n_in, n_mid, n_out, stride, alpha=alpha, bn_kwargs=bn_kwargs))
for _ in range(n_bottlenecks - 1):
self.add_link(block(n_out, n_mid, n_out, alpha=alpha, bn_kwargs=bn_kwargs))
def __call__(self, x):
for f in self:
x = f(x)
return x
class OctResNet18(chainer.Chain):
def __init__(self, n_class=1, n_blocks=[2, 2, 2, 2], alpha=0.25, bn_kwargs={}):
super(OctResNet18, self).__init__()
w = chainer.initializers.HeNormal()
with self.init_scope():
self.conv_bn_act1 = OctConv_BN_ACT(None, 64, 3, 1, 1, nobias=True, initialW=w, alpha_out=alpha, bn_kwargs=bn_kwargs)
self.res3 = Block(64, 64, 64, n_blocks[0], 1, alpha=alpha, block=Building, bn_kwargs=bn_kwargs)
self.res4 = Block(64, 128, 128, n_blocks[1], 2, alpha=alpha, block=Building, bn_kwargs=bn_kwargs)
self.res5 = Block(128, 256, 256, n_blocks[2], 2, alpha=alpha, block=Building, bn_kwargs=bn_kwargs)
self.res6 = Block(256, 512, 512, n_blocks[3], 2, alpha=0, block=Building, bn_kwargs=bn_kwargs)
self.fc7 = L.Linear(None, n_class)
def __call__(self, x_h, x_l=None):
h = self.conv_bn_act1((x_h, x_l))
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
h = self.res6(h)
h = F.average_pooling_3d(h, h.shape[2:])
h = self.fc7(h)
return h
class OctResNet(chainer.Chain):
def __init__(self, n_class=10, n_blocks=[3, 4, 6, 3], alpha=0.25, bn_kwargs={}):
super(OctResNet, self).__init__()
w = chainer.initializers.HeNormal()
with self.init_scope():
self.conv_bn_act1 = OctConv_BN_ACT(None, 64, 3, 1, 1, nobias=True, initialW=w, alpha_out=alpha) #TODO why origin pad==0??
self.res3 = Block(64, 64, 256, n_blocks[0], 1, alpha=alpha, bn_kwargs=bn_kwargs)
self.res4 = Block(256, 128, 512, n_blocks[1], 2, alpha=alpha, bn_kwargs=bn_kwargs)
self.res5 = Block(512, 256, 1024, n_blocks[2], 2, alpha=alpha, bn_kwargs=bn_kwargs)
self.res6 = Block(1024, 512, 2048, n_blocks[3], 2, alpha=0, bn_kwargs=bn_kwargs)
self.fc7 = L.Linear(None, n_class)
def __call__(self, x_h, x_l=None):
h = self.conv_bn_act1((x_h, x_l))
h = self.res3(h)
h = self.res4(h)
h = self.res5(h)
h = self.res6(h)
h = F.average_pooling_3d(h, h.shape[2:])
h = self.fc7(h)
return h
class OctResNet50(OctResNet):
def __init__(self, n_class=10):
super(OctResNet50, self).__init__(n_class, [3, 4, 6, 3])
class OctResNet101(OctResNet):
def __init__(self, n_class=10):
super(OctResNet101, self).__init__(n_class, [3, 4, 23, 3])
class OctResNet152(OctResNet):
def __init__(self, n_class=10):
super(OctResNet152, self).__init__(n_class, [3, 8, 36, 3])
if __name__ == '__main__':
import numpy as np
x = np.random.randn(1, 14, 32, 32, 32).astype(np.float32)
y = np.random.randn(1, 24, 16, 16, 16).astype(np.float32)
model = OctResNet18(1)
y = model(x,y)
print(y.shape)
| 2.328125
| 2
|
inceptor/utils/utils.py
|
whitefi/inceptor
| 1
|
12777346
|
<reponame>whitefi/inceptor
import hashlib
import os
import re
import secrets
import subprocess
import tempfile
from binascii import hexlify, unhexlify
from random import random, randint
from pefile import *
from pathlib import Path
def get_project_root() -> Path:
return Path(__file__).parent.parent
def bin2sh(filename):
if not os.path.isfile(filename):
raise FileNotFoundError("[-] Missing Bin2Sh target file")
utility = os.path.join(get_project_root(), "libs", "public", "adon.exe")
if not os.path.isfile(utility):
raise FileNotFoundError("[-] Missing Bin2Sh utility file")
return subprocess.check_output(f"{utility} \"{filename}\"").decode().strip()
def bin2hex4pe2sh(filename):
if not os.path.isfile(filename):
raise FileNotFoundError("[-] Missing bin2hex (pe2sh) target file")
utility = os.path.join(get_project_root(), "libs", "public", "chunlie.exe")
if not os.path.isfile(utility):
raise FileNotFoundError("[-] Missing bin2hex (pe2sh) utility file")
return unhexlify(subprocess.check_output(f"{utility} \"{filename}\"").decode().strip())
def py_bin2sh(filename):
if not os.path.isfile(filename):
raise FileNotFoundError("[-] Missing PyBin2Sh target file")
content = hexlify(open(filename, "rb").read()).decode()
shellcode = "{" + ",".join([f"0x{content[i:i + 2]}" for i in range(0, len(content), 2)]) + "}"
return shellcode
def sgn(shellcode, arch="x64"):
architecture = "64" if arch in ["x64", None] else "32"
filename = None
temp_filename = tempfile.NamedTemporaryFile(suffix=".raw",
delete=False,
dir=os.path.join(get_project_root(), "temp")).name
with open(temp_filename, "wb") as temp:
temp.write(shellcode)
if not os.path.isfile(temp_filename):
raise FileNotFoundError("[-] Missing Shikata-Ga-Nai target file")
utility = os.path.join(get_project_root(), "libs", "public", "sgn.exe")
if not os.path.isfile(utility):
raise FileNotFoundError("[-] Missing Shikata-Ga-Nai utility file")
try:
cmd = f"{utility} -safe -a {architecture} \"{temp_filename}\""
# print(cmd)
output = subprocess.check_output(cmd).decode().strip()
for line in output.split("\n"):
if line.find("Outfile:") > - 1:
filename = line.split(": ")[1]
print(f" [*] Encoded filename: {filename}")
shellcode = open(filename, "rb").read()
except subprocess.CalledProcessError:
print("[-] Failed to encode payload with Shikata-Ga-Nai")
if os.path.isfile(temp_filename):
os.unlink(temp_filename)
if filename and os.path.isfile(filename):
os.unlink(filename)
return shellcode
def isDotNet(filename):
try:
pe = PE(filename)
clr_metadata = pe.OPTIONAL_HEADER.DATA_DIRECTORY[14]
return not (clr_metadata.VirtualAddress == 0 and clr_metadata.Size == 0)
except PEFormatError:
return False
def shellcode_signature(shellcode):
if isinstance(shellcode, str):
shellcode = shellcode.encode()
return hashlib.sha1(shellcode).hexdigest()
def file_signature(filename):
_, ext = os.path.splitext(filename)
with open(filename, "rb") as file:
signature = hashlib.sha1(file.read()).hexdigest()
return signature
def choose(choices: list):
for n, ver in enumerate(choices):
print(f" {n}: {ver}")
choice = -1
while not (0 <= choice < len(choices)):
try:
choice = int(input("> "))
return choices[choice]
except ValueError:
continue
except TypeError:
continue
def mssql_hex(file):
with open(file=file, mode="rb") as dll:
content = dll.read()
return f"0x{hexlify(content).decode()}"
def static_random_ascii_string(min_size=None, max_size=None):
if not min_size:
min_size = 3
if not max_size:
max_size = 10
return ''.join(secrets.choice(string.ascii_letters) for _ in range(randint(min_size, max_size)))
if __name__ == '__main__':
try:
payload = mssql_hex(sys.argv[1])
if len(payload) <= 8000:
print("[+] Success. payload length is under MAX_LENGTH")
else:
print(f"[-] Warning: payload length is above MAX_LENGTH: {len(payload)}")
print(payload)
except:
print("[-] Error: MSSQL Hexlify needs a file to encode")
| 2.46875
| 2
|
tracklib/init/__init__.py
|
xueyuelei/tracklib
| 5
|
12777347
|
from __future__ import division, absolute_import, print_function
from .init import *
| 1.140625
| 1
|
py/ftpy.py
|
LauriHursti/visions
| 2
|
12777348
|
# This module is a simply a wrapper for libftpy.so that acts as a remainder how its interface is defined
# C++ library libftpy must exist in same folder for this module to work
import libftpy
"""Get bounding boxes for FASText connected components found with given parameters
Parameters
----------
image : numpy array
Short int (0-255) valued grayscale image of size 1024x1024x1
coun : int
Maximum count of boxes that are returned - boxes with keypoints that have least amount of contrast are trimmed
scales : int
How many scales are used in the scale pyramid in addition of the original scale
threshold : int
Threshold use when defining a pixel is a FT keypoint or not
positives : bool
Are boxes found for positive ("bright") keypoints included in the results
negatives : bool
Are boxes found for negative ("dark") keypoints included in the results
wLimit : int
Boxes that are wider than wLimit are trimmed from the results
hLimit : int
Boxes that are higher than hLimit are trimmed from the results
Returns
-------
boxes : numpy array
Numpy array of size N * 4 representing the found boxes in format x, y, width, height (dtype is int32)
"""
def getKpBoxes(image, count, scales, threshold, positives, negatives, wLimit, hLimit):
padding = 0
return libftpy.getKpBoxes(image, padding, count, scales, threshold, positives, negatives, wLimit, hLimit)
"""Get FASText keypoints found with given parameters
Parameters
----------
image : numpy array
Short int (0-255) valued grayscale image of size 1024x1024
count : int
Maximum count of boxes that are returned - boxes with keypoints that have least amount of contrast are trimmed
scales : int
How many scales are used in the scale pyramid in addition of the original scale
threshold : int
Threshold use when defining a pixel is a FT keypoint or not
positives : bool
Are boxes found for positive ("bright") keypoints included in the results
negatives : bool
Are boxes found for negative ("dark") keypoints included in the results
icollector[y][x][0] = y; // y
icollector[y][x][1] = x; // x
icollector[y][x][2] = stats[0]; // kp type (end or bend)
icollector[y][x][3] = stats[1]; // lightess (positive or negative)
icollector[y][x][4] = stats[2]; // max contrast for nms
icollector[y][x][5] = stats[3]; // difference used in thresholding
Returns
-------
keypoints : numpy array
Numpy array of size N * 4 representing the found keypoints in format x, y, kp type (end=1, bend=2), kp lightness (positive=1, negative=2), difference for thresholding
"""
def getFTKeypoints(image, count, scales, threshold, positives, negatives):
padding = 0
return libftpy.getFTKeypoints(image, padding, count, scales, threshold, positives, negatives)
"""Cluster CC boxes using a custom distance algorithm (which can be found in <EMAIL>)
Parameters
----------
boxes : numpy array
int32 bounding boxes for connected components in format left, top, right, top, right, bottom, left, bottom
eps : floating point number
Epsilon (distance) parameter for the dbscan algorithm
min_samples : integer
How many points have be in some points neighbourhood to be a core point
Returns
-------
labels : numpy array
One-dimensional numpy array of cluster labels for each point
Nb! NOISE points have label -2
"""
def kpBoxDBSCAN(boxes, eps, min_samples):
padding = 0
boxN = len(boxes)
return libftpy.kpBoxDBSCAN(boxes, padding, boxN, eps, min_samples)
| 2.53125
| 3
|
pdf_reader.py
|
abdullahwaqar/docsearx
| 1
|
12777349
|
"""
* This file contains source code for reading and extracting data from pdfs
* @author: <NAME>
"""
import fitz
from storage import enumrateFilenames
def readAllPdf():
"""
* @def: Read all the pdf files from the stotage and return the text from all in a list and the file name
* @return: List of tuple, pdf name and text data from all the pdfs
"""
pages = []
for pdf in enumrateFilenames():
with fitz.open(pdf) as infile:
for page in infile:
pages.append((pdf, page.getText()))
return pages
def readPdf(pdfname):
"""
* @def: Read a pdf file from the stotage and return the text from all in a list and the file name
* @param -> pdfname: path to the pdf
* @return: List of tuple, pdf name and text data from the pdf
"""
pages = []
with fitz.open(pdfname) as infile:
for page in infile:
pages.append((pdfname, page.getText()))
return pages
| 3.375
| 3
|
astroNN/__init__.py
|
igomezv/astroNN
| 156
|
12777350
|
r"""
Deep Learning for Astronomers with Tensorflow
"""
from pkg_resources import get_distribution
version = __version__ = get_distribution('astroNN').version
| 1.367188
| 1
|