max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
hand_detection.py
|
sethusaim/Shredder-Machine-System
| 0
|
12783651
|
<filename>hand_detection.py
import argparse
import datetime
from datetime import date
import cv2
import numpy as np
import xlrd
from imutils.video import VideoStream
from xlutils.copy import copy
from xlwt import Workbook
import orien_lines
import utils.detector_utils as detector_utils
lst1, lst2 = [], []
ap = argparse.ArgumentParser()
ap.add_argument(
"-d",
"--display",
dest="display",
type=int,
default=1,
help="Display the detected images using OpenCV. This reduces FPS",
)
args = vars(ap.parse_args())
detection_graph, sess = detector_utils.load_inference_graph()
def save_data(no_of_time_hand_detected, no_of_time_hand_crossed):
try:
today = date.today()
today = str(today)
rb = xlrd.open_workbook("result.xls")
sheet = rb.sheet_by_index(0)
sheet.cell_value(0, 0)
q = sheet.cell_value(sheet.nrows - 1, 1)
rb = xlrd.open_workbook("result.xls")
wb = copy(rb)
w_sheet = wb.get_sheet(0)
if q == today:
w = sheet.cell_value(sheet.nrows - 1, 2)
e = sheet.cell_value(sheet.nrows - 1, 3)
w_sheet.write(sheet.nrows - 1, 2, w + no_of_time_hand_detected)
w_sheet.write(sheet.nrows - 1, 3, e + no_of_time_hand_crossed)
wb.save("result.xls")
else:
w_sheet.write(sheet.nrows, 0, sheet.nrows)
w_sheet.write(sheet.nrows, 1, today)
w_sheet.write(sheet.nrows, 2, no_of_time_hand_detected)
w_sheet.write(sheet.nrows, 3, no_of_time_hand_crossed)
wb.save("result.xls")
except FileNotFoundError:
today = date.today()
today = str(today)
wb = Workbook()
sheet = wb.add_sheet("Sheet 1")
sheet.write(0, 0, "Sl.No")
sheet.write(0, 1, "Date")
sheet.write(0, 2, "Number of times hand detected")
sheet.write(0, 3, "Number of times hand crossed")
m = 1
sheet.write(1, 0, m)
sheet.write(1, 1, today)
sheet.write(1, 2, no_of_time_hand_detected)
sheet.write(1, 3, no_of_time_hand_crossed)
wb.save("result.xls")
except Exception as e:
raise e
if __name__ == "__main__":
score_thresh = 0.80
vs = VideoStream(0).start()
Orientation = "bt"
Line_Perc1 = float(15)
Line_Perc2 = float(30)
num_hands_detect = 2
start_time = datetime.datetime.now()
num_frames = 0
im_height, im_width = (None, None)
cv2.namedWindow("Detection", cv2.WINDOW_NORMAL)
def count_no_of_times(lst):
x = y = cnt = 0
for i in lst:
x = y
y = i
if x == 0 and y == 1:
cnt = cnt + 1
return cnt
try:
while True:
frame = vs.read()
frame = np.array(frame)
if im_height == None:
im_height, im_width = frame.shape[:2]
try:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
except:
print("Error converting to RGB")
boxes, scores, classes = detector_utils.detect_objects(
frame, detection_graph, sess
)
Line_Position2 = orien_lines.drawsafelines(
frame, Orientation, Line_Perc1, Line_Perc2
)
a, b = detector_utils.draw_box_on_image(
num_hands_detect,
score_thresh,
scores,
boxes,
classes,
im_width,
im_height,
frame,
Line_Position2,
Orientation,
)
lst1.append(a)
lst2.append(b)
no_of_time_hand_detected = no_of_time_hand_crossed = 0
num_frames += 1
elapsed_time = (datetime.datetime.now() - start_time).total_seconds()
fps = num_frames / elapsed_time
if args["display"]:
detector_utils.draw_text_on_image(
"FPS : " + str("{0:.2f}".format(fps)), frame
)
cv2.imshow("Detection", cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
if cv2.waitKey(25) & 0xFF == ord("q"):
cv2.destroyAllWindows()
vs.stop()
break
no_of_time_hand_detected = count_no_of_times(lst2)
no_of_time_hand_crossed = count_no_of_times(lst1)
save_data(no_of_time_hand_detected, no_of_time_hand_crossed)
print("Average FPS: ", str("{0:.2f}".format(fps)))
except KeyboardInterrupt:
no_of_time_hand_detected = count_no_of_times(lst2)
no_of_time_hand_crossed = count_no_of_times(lst1)
today = date.today()
save_data(no_of_time_hand_detected, no_of_time_hand_crossed)
print("Average FPS: ", str("{0:.2f}".format(fps)))
| 2.59375
| 3
|
openstack/identity/v3/group.py
|
anton-sidelnikov/openstacksdk
| 0
|
12783652
|
<filename>openstack/identity/v3/group.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import exceptions
from openstack import resource
from openstack import utils
class Group(resource.Resource):
resource_key = 'group'
resources_key = 'groups'
base_path = '/groups'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
commit_method = 'PATCH'
_query_mapping = resource.QueryParameters(
'domain_id', 'name',
)
# Properties
#: The description of this group. *Type: string*
description = resource.Body('description')
#: References the domain ID which owns the group; if a domain ID is not
#: specified by the client, the Identity service implementation will
#: default it to the domain ID to which the client's token is scoped.
#: *Type: string*
domain_id = resource.Body('domain_id')
#: Unique group name, within the owning domain. *Type: string*
name = resource.Body('name')
def add_user(self, session, user):
"""Add user to the group"""
url = utils.urljoin(
self.base_path, self.id, 'users', user.id)
resp = session.put(url,)
exceptions.raise_from_response(resp)
def remove_user(self, session, user):
"""Remove user from the group"""
url = utils.urljoin(
self.base_path, self.id, 'users', user.id)
resp = session.delete(url,)
exceptions.raise_from_response(resp)
def check_user(self, session, user):
"""Check whether user belongs to group"""
url = utils.urljoin(
self.base_path, self.id, 'users', user.id)
resp = session.head(url,)
if resp.status_code == 404:
# If we recieve 404 - treat this as False,
# rather then returning exception
return False
exceptions.raise_from_response(resp)
if resp.status_code == 204:
return True
return False
| 2.109375
| 2
|
bot/__main__.py
|
BLovegrove/boomer
| 3
|
12783653
|
<gh_stars>1-10
import os
import discord
from discord.enums import Status
from discord.ext import commands
from discord_slash.client import SlashCommand
from . import config
import logging
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S'
)
bot = commands.Bot(
command_prefix="!wat",
activity=discord.Game(name="nothing."),
status=Status.idle
)
slash = SlashCommand(bot, sync_commands=True)
@bot.event
async def on_ready():
logging.info(f'{bot.user} has logged in.')
command_folders = [
'music',
'admin'
]
module_directory = os.path.join(os.path.dirname(os.path.dirname(__file__))) + '/bot'
def main():
# laod core cogs
bot.load_extension('bot.cogs.core.setup')
bot.load_extension('bot.cogs.core.voice')
bot.load_extension('bot.cogs.core.queue')
bot.load_extension('bot.cogs.core.music')
bot.load_extension('bot.cogs.core.events')
# dynamically load all the other cogs
for type in command_folders:
for file in os.listdir(f'{module_directory}/cogs/{type}'):
if file.endswith('.py'):
bot.load_extension(f'bot.cogs.{type}.{file[:-3]}')
cfg = config.load_config()
bot.run(cfg['bot']['token'])
if __name__ == '__main__':
main()
| 2.28125
| 2
|
tidycsv/core/parsing.py
|
gmagannaDevelop/tidyCSV.py
| 2
|
12783654
|
"""
Parsing functions
All the functions needed to parse an arbitrary
csv file and find the biggest semantically coherent
group (set) of lines.
This maximum set is what we call a "tidy csv",
implying that the underlying idea of a consistent
comma-separated observations are present.
Some programs can "magically" parse poorly specified,
invalid, and heterogeneous csv files.
This is not the case for all functions. This set of functions
aims to be this interface which allows other programs to access
data from a csv file even when it has been poorly written.
"""
# TODO : review or shorten docstring ?
from pathlib import Path
from typing import Dict, List, Union, Optional
from functools import reduce
__all__ = ["get_csv_counts", "get_maximum_csv_group"]
def get_csv_counts(
file: Union[str, Path], separator: Optional[str] = None
) -> Dict[int, List[str]]:
"""
Get groups of semantically consistsent csv lines.
i.e. same number of commas
Parameters
----------
file : a string, pathlib.Path to used within a
call to `open(file, "r") as f`.
separator: (optional) a string indicating the token
that is to be taken as column delimiter.
it defaults to ","
Returns
-------
A dictionnary containing the number of commas as keys
and the lines which have said number of commas.
i.e.
{
3: [
"x1,x2,x3",
"11,21,31",
"12,22,33",
],
2: [
"extrainfo,date",
"none,2020-05-05"
]
}
"""
_separator: str = separator or ","
with open(file, "r") as file_reader:
lines: List[str] = file_reader.readlines()
csv_counts: Dict[int, List[str]] = {}
for line in lines:
n_commas: int = line.count(_separator)
if n_commas in csv_counts.keys():
csv_counts[n_commas].append(line)
else:
csv_counts.update({n_commas: [line]})
return csv_counts
def get_maximum_csv_group(csv_counts: Dict[int, List[str]]) -> List[str]:
"""Get the list with the maximum number of
semantically consistent csv lines, from
a dictionary of number of lines"""
return reduce(lambda x, y: x if len(x[1]) > len(y[1]) else y, csv_counts.items())[1]
| 4.15625
| 4
|
21_excel/02_obtendo_linhas_colunas.py
|
smartao/estudos_python
| 0
|
12783655
|
#!/usr/bin/python3
import openpyxl
from openpyxl.utils import get_column_letter, column_index_from_string
# Porque usar o util https://is.gd/YrDuST
wb = openpyxl.load_workbook('example.xlsx')
sheet = wb.get_sheet_by_name('Sheet1')
print('Obtendo a letra da coluna a partir de um inteiro')
print(get_column_letter(1))
print(get_column_letter(2))
print(get_column_letter(27))
print(get_column_letter(900))
print('\nObtendo o numero da coluna a partir da letra')
print(get_column_letter(sheet.max_column))
# Obetendo a letra da ultima coluna
print(column_index_from_string(get_column_letter(sheet.max_column)))
print(column_index_from_string('A'))
print(column_index_from_string('AA'))
print('\nObtendo linhas e colunas das planilhas')
# Imprimindo os valores em uma tupla
# Contem tres duplas dentro
# print(tuple(sheet['A1':'C3']))
# print(tuple(sheet.columns)[1])
# for cellObj in list(sheet.columns)[1]:
# print(cellObj.value)
# Percorrendo a area de A1 a C3
for rowOfCellObjects in sheet['A1':'C3']:
for cellObj in rowOfCellObjects:
print(cellObj.coordinate, cellObj.value)
print('--- END OF ROW ---')
print('\nAcessando valores de celulas de uma linha ou coluna')
for cellObj in list(sheet.columns)[1]: # 1 = Coluna B
print(cellObj.value)
# Porque precisa utilizar o metodo list https://is.gd/I3d9PR
| 3.46875
| 3
|
sh/get_GFED4s_CO_emissions.py
|
stevenjoelbrey/PMFutures
| 1
|
12783656
|
#!/usr/bin/env python2
import numpy as np
import h5py # if this creates an error please make sure you have the h5py library
months = '01','02','03','04','05','06','07','08','09','10','11','12'
sources = 'SAVA','BORF','TEMF','DEFO','PEAT','AGRI'
# in this example we will calculate annual CO emissions for the 14 GFED
# basisregions over 1997-2014. Please adjust the code to calculate emissions
# for your own specie, region, and time period of interest. Please
# first download the GFED4.1s files and the GFED4_Emission_Factors.txt
# to your computer and adjust the directory where you placed them below
directory = '......'
"""
Read in emission factors
"""
species = [] # names of the different gas and aerosol species
EFs = np.zeros((41, 6)) # 41 species, 6 sources
k = 0
f = open(directory+'/GFED4_Emission_Factors.txt')
while 1:
line = f.readline()
if line == "":
break
if line[0] != '#':
contents = line.split()
species.append(contents[0])
EFs[k,:] = contents[1:]
k += 1
f.close()
# we are interested in CO for this example (4th row):
EF_CO = EFs[3,:]
start_year = 1997
end_year = 2014
"""
make table with summed DM emissions for each region, year, and source
"""
CO_table = np.zeros((15, end_year - start_year + 1)) # region, year
for year in range(start_year, end_year+1):
string = directory+'/GFED4.1s_'+str(year)+'.hdf5'
f = h5py.File(string, 'r')
if year == start_year: # these are time invariable
basis_regions = f['/ancill/basis_regions'][:]
grid_area = f['/ancill/grid_cell_area'][:]
CO_emissions = np.zeros((720, 1440))
for month in range(12):
# read in DM emissions
string = '/emissions/'+months[month]+'/DM'
DM_emissions = f[string][:]
for source in range(6):
# read in the fractional contribution of each source
string = '/emissions/'+months[month]+'/partitioning/DM_'+sources[source]
contribution = f[string][:]
# calculate CO emissions as the product of DM emissions (kg DM per
# m2 per month), the fraction the specific source contributes to
# this (unitless), and the emission factor (g CO per kg DM burned)
CO_emissions += DM_emissions * contribution * EF_CO[source]
# fill table with total values for the globe (row 15) or basisregion (1-14)
for region in range(15):
if region == 14:
mask = np.ones((720, 1440))
else:
mask = basis_regions == (region + 1)
CO_table[region, year-start_year] = np.sum(grid_area * mask * CO_emissions)
print year
# convert to Tg CO
CO_table = CO_table / 1E12
print CO_table
# please compare this to http://www.falw.vu/~gwerf/GFED/GFED4/tables/GFED4.1s_CO.txt
| 2.78125
| 3
|
setup.py
|
andrey-avdeev/black-fire
| 0
|
12783657
|
<reponame>andrey-avdeev/black-fire
# -*- coding: utf-8 -*-
import io
from setuptools import setup, find_packages
with io.open("README.md", "r", encoding="utf-8") as f:
readme = f.read()
version = "0.0.1"
setup(
name="black-fire",
version=version,
description="Usefull wrappers for https://github.com/google/python-fire",
long_description=readme,
long_description_content_type="text/markdown",
author="<NAME>",
license="Apache 2.0",
packages=find_packages(),
zip_safe=False,
python_requires=">=3.7",
install_requires=[
],
keywords="python-fire black-fire fire",
url="https://github.com/andrey-avdeev/black-fire",
)
| 1.390625
| 1
|
googletranslateextract.py
|
LRjunior/FOPP2Ed-slovak
| 0
|
12783658
|
<filename>googletranslateextract.py
import re
import codecs
filenamein = 'foppsk20151010.html'
filenameout = 'fopp20151010slovak.html'
fh = codecs.open(filenamein, 'r', 'utf-8')
data = fh.read()
fh.close()
f_out = codecs.open(filenameout+"aa", 'w', 'utf-8')
f_out.write(data)
f_out.close()
import re
myfile = codecs.open(filenamein, 'r', 'utf-8')
regex = re.compile(r'<span.*?>(.*?)</span>',re.S|re.M) #re.compile(r'(?<b).*?(?b)')
f_out = codecs.open(filenameout, 'w', 'utf-8')
arr = []
for line in myfile:
matches = regex.findall(line)
for m in matches:
arr.append(m.strip())
arrset = set(arr)
arr = list(arrset)
arr.sort()
for a in arr:
if not a.startswith("<a"):
f_out.write(a+"\n")
f_out.close()
| 3.046875
| 3
|
modules/quiz/models.py
|
Maurilearn/learnings
| 0
|
12783659
|
<reponame>Maurilearn/learnings
from shopyoapi.init import db
class Quiz(db.Model):
__tablename__ = 'quizes'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
question = db.Column(db.String(100))
section_id = db.Column(db.Integer, db.ForeignKey('sections.id'),
nullable=False)
# https://stackoverflow.com/questions/60805/getting-random-row-through-sqlalchemy
answers = db.relationship('Answer', backref='quiz', lazy=True,
cascade="all, delete, delete-orphan", order_by='func.random()')
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
class Answer(db.Model):
__tablename__ = 'answers'
id = db.Column(db.Integer, primary_key=True)
string = db.Column(db.String(100))
correct = db.Column(db.Boolean)
quizz_id = db.Column(db.Integer, db.ForeignKey('quizes.id'),
nullable=False)
def insert(self):
db.session.add(self)
db.session.commit()
def update(self):
db.session.commit()
def delete(self):
db.session.delete(self)
db.session.commit()
| 3.3125
| 3
|
src/jenkins_tui/views/base.py
|
chelnak/jenkins-tui
| 13
|
12783660
|
<filename>src/jenkins_tui/views/base.py<gh_stars>10-100
from __future__ import annotations
from rich.text import Text
from textual import events, messages
from textual.binding import Bindings
from textual.geometry import Size, SpacingDimensions
from textual.layouts.grid import GridLayout
from textual.reactive import Reactive
from textual.view import View
from textual.views._window_view import WindowChange
from ..widgets import ButtonWidget
class BaseView(View):
"""A base view containing common properties and methods."""
visible: Reactive[bool] = Reactive(True)
def __init__(self) -> None:
"""A base view containing common properties and methods."""
gutter: SpacingDimensions = (1, 0)
name = self.__class__.__name__
layout = GridLayout(gap=(1, 1), gutter=gutter, align=("center", "top"))
super().__init__(name=name, layout=layout)
self.layout: GridLayout = layout
self.buttons: dict[str, ButtonWidget] = {}
self.bindings = Bindings()
async def add_button(self, text: str, id: str | None = None) -> None:
"""Add a button to the view.
Args:
text (str): This is the text that will be displayed on the button.
id (str | None): The id of the button. Defaults to None.
"""
if id is None:
id = text.lower()
label = Text(text=text)
button = ButtonWidget(label=label, name=id)
self.buttons[id] = button
async def on_hide(self) -> None:
self.visible = False
async def on_show(self) -> None:
self.visible = True
async def on_mount(self) -> None:
"""Actions that are executed when the widget is mounted."""
pass
async def handle_layout(self, message: messages.Layout) -> None:
"""Handle a layout message.
Args:
message (messages.Layout): The message to handle.
"""
self.layout.require_update()
message.stop()
self.refresh()
async def handle_update(self, message: messages.Update) -> None:
"""Handle an update message.
Args:
message (messages.Update): The message to handle.
"""
message.prevent_default()
await self.emit(WindowChange(self))
async def watch_scroll_x(self, value: int) -> None:
self.layout.require_update()
self.refresh()
async def watch_scroll_y(self, value: int) -> None:
"""Watch the scrol_y attribute.
Args:
value (int): The new value of the scroll_y attribute.
"""
self.layout.require_update()
self.refresh()
async def watch_virtual_size(self, size: Size) -> None:
"""Watch the virtual_size attribute.
Args:
size (Size): The new value of the virtual_size attribute.
"""
await self.emit(WindowChange(self))
async def on_resize(self, event: events.Resize) -> None:
"""Events that are executed when the window is resized.
Args:
event (events.Resize): A resize event.
"""
await self.emit(WindowChange(self))
| 2.375
| 2
|
code/visualization/2020/05/5_6_finetune_sparse_facto_perf_vs_param_dense_and_not_dense.py
|
lucgiffon/psm-nets
| 1
|
12783661
|
import pathlib
import pandas as pd
from palmnet.visualization.utils import get_palminized_model_and_df, get_df
import matplotlib.pyplot as plt
import numpy as np
import logging
import plotly.graph_objects as go
import plotly.io as pio
from pprint import pprint as pprint
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.ERROR)
pio.templates.default = "plotly_white"
dataset = {
"Cifar10": "--cifar10",
"Cifar100": "--cifar100",
# "SVHN": "--svhn",
"MNIST": "--mnist"
}
models_data = {
"Cifar10": ["--cifar10-vgg19"],
# "Cifar100": ["--cifar100-resnet20", "--cifar100-resnet50"],
"Cifar100": ["--cifar100-vgg19", "--cifar100-resnet20", "--cifar100-resnet50"],
"SVHN": ["--svhn-vgg19"],
"MNIST":["--mnist-lenet"],
}
color_bars_sparsity = {
2: "g",
3: "c",
4: "b",
5: "y"
}
tasks = {
"nb-param-compressed-total",
"finetuned-score",
"param-compression-rate-total"
}
ylabel_task = {
"nb-param-compressed-total": "log(# non-zero value)",
"finetuned-score": "Accuracy",
"param-compression-rate-total": "Compression Rate"
}
scale_tasks = {
"nb-param-compressed-total": "log",
"finetuned-score": "linear",
"param-compression-rate-total": "linear"
}
def get_palm_results():
results_path = "2020/03/9_10_finetune_palminized_no_useless"
results_path_2 = "2020/04/9_10_finetune_palminized_no_useless"
src_results_path = root_source_dir / results_path / "results.csv"
src_results_path_2 = root_source_dir / results_path_2 / "results.csv"
df = pd.read_csv(src_results_path, header=0)
df_2 = pd.read_csv(src_results_path_2, header=0)
df = pd.concat([df, df_2])
df = df.fillna("None")
df = df.drop(columns=["Unnamed: 0", "idx-expe"]).drop_duplicates()
df = df[df["keep-last-layer"] == 0]
df = df[df["use-clr"] == 1]
df = df.assign(**{"only-dense": False, "keep-first-layer": False})
return df
def get_faust_results():
results_path = "2020/05/3_4_finetune_faust_no_hierarchical_only_cifar_mnist"
src_results_path = root_source_dir / results_path / "results.csv"
df = pd.read_csv(src_results_path, header=0)
df = df.fillna("None")
df = df[df["hierarchical"] == False]
df = df.drop(columns=["Unnamed: 0", "idx-expe"]).drop_duplicates()
df = df[df["keep-last-layer"] == 0]
df = df.assign(**{"only-dense": False, "keep-first-layer": False})
return df
def get_tucker_results():
results_path_tucker = "2020/04/0_1_compression_tucker_tensortrain"
src_results_path_tucker = root_source_dir / results_path_tucker / "results.csv"
df_tucker_tt = pd.read_csv(src_results_path_tucker, header=0)
df_tucker_tt = df_tucker_tt.fillna("None")
df_tucker_tt = df_tucker_tt.assign(**{"only-dense": False, "use-pretrained": False})
df_tucker_tt = df_tucker_tt[df_tucker_tt["compression"] == "tucker"]
return df_tucker_tt
def get_tensortrain_results():
results_path_tucker = "2020/05/2_3_compression_tensortrain"
src_results_path_tucker = root_source_dir / results_path_tucker / "results.csv"
df_tucker_tt = pd.read_csv(src_results_path_tucker, header=0)
df_tucker_tt = df_tucker_tt.fillna("None")
# df_tucker_tt = df_tucker_tt[df_tucker_tt["use-pretrained"] == True]
# df_tucker_tt = df_tucker_tt[df_tucker_tt["only-dense"] == False]
return df_tucker_tt
def get_tucker_tensortrain_only_denseresults():
results_path_tucker = "2020/05/2_3_compression_tucker_tensortrain_only_dense"
src_results_path_tucker = root_source_dir / results_path_tucker / "results.csv"
df_tucker_tt = pd.read_csv(src_results_path_tucker, header=0)
df_tucker_tt = df_tucker_tt.fillna("None")
# df_tucker_tt = df_tucker_tt[df_tucker_tt["use-pretrained"] == True]
# df_tucker_tt = df_tucker_tt[df_tucker_tt["only-dense"] == True]
return df_tucker_tt
def get_palm_results_only_dense_keep_first():
results_path = "2020/05/5_6_finetune_sparse_facto_no_hierarchical_keep_first_layer_only_dense"
src_results_path = root_source_dir / results_path / "results.csv"
df = pd.read_csv(src_results_path, header=0)
df = df.fillna("None")
df = df.drop(columns=["Unnamed: 0", "idx-expe"]).drop_duplicates()
# df = df[df["only-dense"] == False]
return df
def get_deepfried_results():
results_path_tucker = "2020/05/5_6_compression_baselines"
src_results_path_tucker = root_source_dir / results_path_tucker / "results.csv"
df_tucker_tt = pd.read_csv(src_results_path_tucker, header=0)
df_tucker_tt = df_tucker_tt.fillna("None")
# df_tucker_tt = df_tucker_tt.assign(**{"only-dense": True, "use-pretrained": False})
df_tucker_tt = df_tucker_tt[df_tucker_tt["compression"] == "deepfried"]
return df_tucker_tt
def get_magnitude_results():
results_path_tucker = "2020/05/5_6_compression_baselines"
src_results_path_tucker = root_source_dir / results_path_tucker / "results.csv"
df_tucker_tt = pd.read_csv(src_results_path_tucker, header=0)
df_tucker_tt = df_tucker_tt.fillna("None")
# df_tucker_tt = df_tucker_tt.assign(**{"only-dense": True, "use-pretrained": False})
df_tucker_tt = df_tucker_tt[df_tucker_tt["compression"] == "magnitude"]
return df_tucker_tt
if __name__ == "__main__":
root_source_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/results/processed")
SHOW_FAUST = False
SHOW_KEEP_FIRST_ONLY = False
SHOW_PRETRAINED_ONLY = True
results_path = "2020/05/5_6_finetune_sparse_facto_perf_vs_param"
df_tucker = get_tucker_results()
df_tt = get_tensortrain_results()
df_deepfried = get_deepfried_results()
df_tucker_tt_only_dense = get_tucker_tensortrain_only_denseresults()
df_magnitude = get_magnitude_results()
df_tucker_tt_deepfried = pd.concat([df_tucker, df_tt, df_tucker_tt_only_dense, df_deepfried, df_magnitude])
df_palm = get_palm_results()
df_palm_bis = get_palm_results_only_dense_keep_first()
df_palm = pd.concat([df_palm, df_palm_bis])
# ONLY_DENSE = False
# df_tucker_tt = df_tucker_tt[df_tucker_tt["only-dense"] == ONLY_DENSE]
# df_palm = df_palm[df_palm["only-dense"] == ONLY_DENSE]
root_output_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/reports/figures/")
output_dir = root_output_dir / results_path / "histogrammes"
output_dir.mkdir(parents=True, exist_ok=True)
# sparsity_factors = sorted(set(df_palminized["--sparsity-factor"]))
# nb_factors = set(df_palm["nb-factor"].values)
hue_by_sparsity= {
2: 10,
3: 60,
4: 110,
5: 180
}
saturation_by_perm = {
1: 50,
0: 75
}
saturation_by_hier = {
1: 50,
0: 75
}
lum_by_clr = {
1: 20,
0: 30
}
lum_by_keep = {
1: 40,
0: 50
}
dct_symbol = {
"FAUST Q=2": "square",
"FAUST Q=3": "diamond",
"FAUST Q=None": "square-x",
"FAUST Q=None H": "star-square",
"PYQALM Q=2": "square-open",
"PYQALM Q=3": "diamond-open",
"PYQALM Q=None": "hash-open",
"PYQALM Q=None H": "star-square-open",
"PYQALM Q=2 -1": "square-open-dot",
"PYQALM Q=3 -1": "diamond-open-dot",
"PYQALM Q=None -1": "hash-open-dot",
"PYQALM Q=None H -1": "star-square-open-dot",
"PYQALM Q=2 -1 M": "square",
"PYQALM Q=3 -1 M": "diamond",
"PYQALM Q=None -1 M": "hash",
"PYQALM Q=None H -1 M": "star-square",
"Base": "x",
"Tucker": "circle",
"Tucker -1": "circle-dot",
"TT": "triangle-up",
"TT -1": "triangle-up-dot",
"TT -1 pretrained": "triangle-up-open-dot",
"Deepfried": "hexagram",
"Magnitude ": "square",
"Magnitude -1": "square",
}
dct_colors = {
"PALM K=2": "dodgerblue",
"PALM K=3": "darkorchid",
"PALM K=4": "green",
"PALM K=6": "aqua",
"PALM K=8": "cadetblue",
"TT R=2": "orange",
"TT R=6": "gold",
"TT R=10": "red",
"TT R=12": "darkred",
"TT R=14": "indianred",
"Base": "grey",
"Tucker": "pink",
"Tucker + Low Rank 10%": "orange",
"Tucker + Low Rank 20%": "gold",
"Tucker + Low Rank 30%": "red",
"Deepfried": "blueviolet",
"Magnitude 50%": "red",
"Magnitude 70%": "red",
"Magnitude 90%": "red",
}
SIZE_MARKERS = 15
WIDTH_MARKER_LINES = 2
datasets = set(df_palm["dataset"].values)
dct_table = dict()
for dataname in datasets:
dct_table[dataname] = dict()
df_data_palm = df_palm[df_palm["dataset"] == dataname]
df_tucker_tt_data = df_tucker_tt_deepfried[df_tucker_tt_deepfried["dataset"] == dataname]
df_model_values = set(df_data_palm["model"].values)
for modelname in df_model_values:
dct_table[dataname][modelname] = dict()
df_model_palm = df_data_palm[df_data_palm["model"] == modelname]
df_tucker_tt_model = df_tucker_tt_data[df_tucker_tt_data["model"] == modelname]
for ONLY_DENSE in [True, False]:
df_tucker_tt_model_dense = df_tucker_tt_model[df_tucker_tt_model["only-dense"] == ONLY_DENSE]
df_model_palm_dense = df_model_palm[df_model_palm["only-dense"] == ONLY_DENSE]
if ONLY_DENSE:
str_nb_param_compressed = "nb-param-compressed-dense"
str_nb_param_base = "nb-param-base-dense"
str_only_dense = " only dense"
else:
str_nb_param_compressed = "nb-param-compressed-total"
str_nb_param_base = "nb-param-base-total"
str_only_dense = ""
dct_entry_only_dense = "Dense" if ONLY_DENSE else "Conv+Dense"
dct_table[dataname][modelname][dct_entry_only_dense] = list()
fig = go.Figure()
base_score = None
base_nb_param = None
palm_algo = "PYQALM"
for idx_row, row in df_model_palm_dense.iterrows():
hierarchical_value = row["hierarchical"]
str_hierarchical = ' H' if hierarchical_value is True else ''
try:
nb_factor = int(row["nb-factor"])
except:
nb_factor = None
sparsity_factor = int(row["sparsity-factor"])
keep_first = row["keep-first-layer"]
str_keep_first = ' -1' if keep_first is True else ''
if SHOW_KEEP_FIRST_ONLY and not keep_first and not ONLY_DENSE:
continue
only_mask = row["only-mask"]
str_only_mask = " M" if only_mask is True else ""
name_trace = f"{palm_algo} Q={nb_factor} K={sparsity_factor}{str_hierarchical}{str_keep_first}{str_only_mask}"
finetuned_score = row["finetuned-score"]
nb_param = row[str_nb_param_compressed]
dct_row = dict()
dct_row["method"] = name_trace
dct_row["perf"] = finetuned_score
dct_row["nb_param"] = nb_param
dct_table[dataname][modelname][dct_entry_only_dense].append(dct_row)
base_score_tmp = row["base-model-score"]
assert base_score == base_score_tmp or base_score is None
base_nb_param_tmp = row[str_nb_param_base]
assert base_nb_param == base_nb_param_tmp or base_nb_param is None
base_score = base_score_tmp
base_nb_param = base_nb_param_tmp
fig.add_trace(
go.Scatter(
x=[nb_param],
y=[finetuned_score],
mode='markers',
name=name_trace,
hovertext=name_trace,
legendgroup=f"{palm_algo} K={sparsity_factor}{str_only_mask}",
marker=dict(
color=dct_colors[f"PALM K={sparsity_factor}"],
symbol=dct_symbol[f"{palm_algo} Q={nb_factor}{str_hierarchical}{str_keep_first}{str_only_mask}"],
size=SIZE_MARKERS,
line=dict(
color='Black',
width=WIDTH_MARKER_LINES
)
)
))
dct_row = dict()
dct_row["method"] = "Base"
dct_row["perf"] = base_score
dct_row["nb_param"] = base_nb_param
dct_table[dataname][modelname][dct_entry_only_dense].append(dct_row)
#############
# base data #
#############
fig.add_trace(
go.Scatter(
x=[base_nb_param],
y=[base_score],
mode='markers',
name="Base",
hovertext="Base",
legendgroup=f"Base",
marker=dict(
color=dct_colors[f"Base"],
symbol=dct_symbol[f"Base"],
size=SIZE_MARKERS,
line=dict(
color='Black',
width=WIDTH_MARKER_LINES,
)
)
))
###############
# tucker data #
###############
df_tucker = df_tucker_tt_model_dense[df_tucker_tt_model_dense["compression"] == "tucker"]
for idx_row, row in df_tucker.iterrows():
keep_first = row["keep-first-layer"]
str_keep_first = ' -1' if keep_first is True else ''
if SHOW_KEEP_FIRST_ONLY and not keep_first and not ONLY_DENSE:
continue
try:
rank_percentage = int(float(row["rank-percentage-dense"]) * 100)
except:
try:
rank_percentage = int(float(row["rank-percentage"]) * 100)
except:
rank_percentage = None
str_percentage = f' + Low Rank {rank_percentage}%' if rank_percentage is not None else ''
name_trace = f"Tucker{str_keep_first}{str_percentage}"
finetuned_score = row["finetuned-score"]
nb_param = row[str_nb_param_compressed]
dct_row = dict()
dct_row["method"] = name_trace
dct_row["perf"] = finetuned_score
dct_row["nb_param"] = nb_param
dct_table[dataname][modelname][dct_entry_only_dense].append(dct_row)
base_score_tmp = row["base-model-score"]
assert base_score == base_score_tmp or base_score is None
base_nb_param_tmp = row[str_nb_param_base]
assert base_nb_param == base_nb_param_tmp or base_nb_param is None or base_nb_param_tmp == 0, f"{base_nb_param}!={base_nb_param_tmp}"
fig.add_trace(
go.Scatter(
x=[nb_param],
y=[finetuned_score],
mode='markers',
name=name_trace,
hovertext=name_trace,
legendgroup=f"Tucker{str_percentage}",
marker=dict(
color=dct_colors[f"Tucker{str_percentage}"],
symbol=dct_symbol[f"Tucker{str_keep_first}"],
size=SIZE_MARKERS,
line=dict(
color='Black',
width=WIDTH_MARKER_LINES
)
)
))
###############
# magnitude data #
###############
df_magnitude = df_tucker_tt_model_dense[df_tucker_tt_model_dense["compression"] == "magnitude"]
for idx_row, row in df_magnitude.iterrows():
keep_first = row["keep-first-layer"]
str_keep_first = ' -1' if keep_first is True else ''
if SHOW_KEEP_FIRST_ONLY and not keep_first and not ONLY_DENSE:
continue
# try:
sparsity_percentage = int(float(row["final-sparsity"]) * 100)
# except:
# try:
# rank_percentage = int(float(row["rank-percentage"]) * 100)
# except:
# rank_percentage = None
str_percentage = f' {sparsity_percentage}%' #if sparsity_percentage is not None else ''
name_trace = f"Magnitude {str_keep_first}{str_percentage}"
finetuned_score = row["finetuned-score"]
nb_param = row[str_nb_param_compressed]
dct_row = dict()
dct_row["method"] = name_trace
dct_row["perf"] = finetuned_score
dct_row["nb_param"] = nb_param
dct_table[dataname][modelname][dct_entry_only_dense].append(dct_row)
print(finetuned_score)
base_score_tmp = row["base-model-score"]
assert np.isclose(base_score, base_score_tmp) or base_score is None, f"{base_score}!={base_score_tmp}"
base_nb_param_tmp = row[str_nb_param_base]
assert base_nb_param == base_nb_param_tmp or base_nb_param is None or base_nb_param_tmp == 0, f"{base_nb_param}!={base_nb_param_tmp}"
fig.add_trace(
go.Scatter(
x=[nb_param],
y=[finetuned_score],
mode='markers',
name=name_trace,
hovertext=name_trace,
legendgroup=f"Magnitude",
marker=dict(
color=dct_colors[f"Magnitude {str_percentage}"],
symbol=dct_symbol[f"Magnitude {str_keep_first}"],
size=SIZE_MARKERS,
line=dict(
color='Black',
width=WIDTH_MARKER_LINES
)
)
))
###############
# deepfried data #
###############
df_deepfried = df_tucker_tt_model_dense[df_tucker_tt_model_dense["compression"] == "deepfried"]
for idx_row, row in df_deepfried.iterrows():
keep_first = row["keep-first-layer"]
str_keep_first = ' -1' if keep_first is True else ''
if SHOW_KEEP_FIRST_ONLY and not keep_first and not ONLY_DENSE:
continue
# try:
# sparsity_percentage = int(float(row["final-sparsity"]) * 100)
# except:
# try:
# rank_percentage = int(float(row["rank-percentage"]) * 100)
# except:
# rank_percentage = None
# str_percentage = f' {sparsity_percentage}%' #if sparsity_percentage is not None else ''
name_trace = f"Deepfried {str_keep_first}"
finetuned_score = row["finetuned-score"]
nb_param = row[str_nb_param_compressed]
if nb_param == 0:
conv_nb_weights = row["nb-param-base-total"] - base_nb_param
nb_param = row["nb-param-compressed-total"] - conv_nb_weights
dct_row = dict()
dct_row["method"] = name_trace
dct_row["perf"] = finetuned_score
dct_row["nb_param"] = nb_param
dct_table[dataname][modelname][dct_entry_only_dense].append(dct_row)
print(finetuned_score)
base_score_tmp = row["base-model-score"]
assert np.isclose(base_score, base_score_tmp) or base_score is None, f"{base_score}!={base_score_tmp}"
base_nb_param_tmp = row[str_nb_param_base]
assert base_nb_param == base_nb_param_tmp or base_nb_param is None or base_nb_param_tmp == 0, f"{base_nb_param}!={base_nb_param_tmp}"
fig.add_trace(
go.Scatter(
x=[nb_param],
y=[finetuned_score],
mode='markers',
name=name_trace,
hovertext=name_trace,
legendgroup=f"Deepfried",
marker=dict(
color=dct_colors[f"Deepfried"],
symbol=dct_symbol[f"Deepfried"],
size=SIZE_MARKERS,
line=dict(
color='Black',
width=WIDTH_MARKER_LINES
)
)
))
####################
# tensortrain data #
####################
df_tt = df_tucker_tt_model_dense[df_tucker_tt_model_dense["compression"] == "tensortrain"]
for idx_row, row in df_tt.iterrows():
keep_first = row["keep-first-layer"]
str_keep_first = ' -1' if keep_first is True else ''
if SHOW_KEEP_FIRST_ONLY and not keep_first and not ONLY_DENSE:
continue
order = int(row["order"])
rank_value = int(row["rank-value"])
if not np.isnan(row["use-pretrained"]):
use_petrained = bool(row["use-pretrained"])
str_pretrained = " pretrained" if use_petrained else ""
else:
use_petrained = False
str_pretrained = ""
if SHOW_PRETRAINED_ONLY and not use_petrained and not ONLY_DENSE:
continue
name_trace = f"Tensortrain{str_keep_first} K={order} R={rank_value}{str_pretrained}"
finetuned_score = row["finetuned-score"]
nb_param = row[str_nb_param_compressed]
dct_row = dict()
dct_row["method"] = name_trace
dct_row["perf"] = finetuned_score
dct_row["nb_param"] = nb_param
dct_table[dataname][modelname][dct_entry_only_dense].append(dct_row)
base_score_tmp = row["base-model-score"]
assert base_score == base_score_tmp or base_score is None
base_nb_param_tmp = row[str_nb_param_base]
assert base_nb_param == base_nb_param_tmp or base_nb_param is None
fig.add_trace(
go.Scatter(
x=[nb_param],
y=[finetuned_score],
mode='markers',
name=name_trace,
hovertext=name_trace,
legendgroup=f"TT R={rank_value}",
marker=dict(
color=dct_colors[f"TT R={rank_value}"],
symbol=dct_symbol[f"TT{str_keep_first}{str_pretrained}"],
size=SIZE_MARKERS,
line=dict(
color='Black',
width=WIDTH_MARKER_LINES
)
)
))
title = "Performance = f(# Param); " + dataname + " " + modelname + str_only_dense
fig.update_layout(title=title,
xaxis_title="# Parameter in Dense and Conv Layers",
yaxis_title="Accuracy (%)",
xaxis_type="log",
)
fig.show()
fig.write_image(str((output_dir / title).absolute()) + ".png")
pprint(dct_table)
# string_table = """
# \begin{tabular}{lcccccccccccccccccccccc}
# \toprule
#
# {} & \multicolumn{2}{c}{ \thead{ Ensemble } } & \multicolumn{2}{c}{ \thead{ Kmeans } } & \multicolumn{2}{c}{ \thead{ NN-OMP\\w/o weights } } & \multicolumn{2}{c}{ \thead{ NN-OMP } } & \multicolumn{2}{c}{ \thead{ OMP\\w/o weights } } & \multicolumn{2}{c}{ \thead{ OMP } } & \multicolumn{2}{c}{ \thead{ Random } } & \multicolumn{2}{c}{ \thead{ Zhang\\Predictions } } & \multicolumn{2}{c}{ \thead{ Zhang\\Similarities } }\\
# \midrule
# Diam. & 3.032E+05 & 86 & \underline{3.024E+05} & \underline{143} & \textbf{3.024E+05} & \textbf{86} & 3.033E+05 & 86 & 3.025E+05 & 143 & \textit{3.087E+05} & \textit{29} & 3.025E+05 & 114 & 3.047E+05 & 143 & 3.032E+05 & 143\\
# Diab. & 3.431E+03 & 32 & \underline{3.281E+03} & \underline{36} & 3.317E+03 & 36 & 3.549E+03 & 36 & 3.324E+03 & 36 & \textit{3.607E+03} & \textit{25} & 3.303E+03 & 32 & 3.282E+03 & 36 & \textbf{3.241E+03} & \textbf{32}\\
# Kin. & 1.892E-02 & 200 & \textit{2.024E-02} & \textit{33} & 1.921E-02 & 133 & \underline{1.809E-02} & \underline{133} & 1.931E-02 & 67 & \textbf{1.776E-02} & \textbf{333} & 2.002E-02 & 333 & 2.089E-02 & 333 & 2.017E-02 & 333\\
# <NAME>. & \underline{2.187E-01} & \underline{267} & \textit{2.449E-01} & \textit{33} & 2.239E-01 & 100 & \textbf{2.180E-01} & \textbf{133} & \textit{2.267E-01} & \textit{33} & 2.197E-01 & 133 & 2.390E-01 & 333 & 2.536E-01 & 333 & 2.452E-01 & 333\\
# Bos. & 1.267E+01 & 30 & \textit{1.278E+01} & \textit{13} & \textbf{1.214E+01} & \textbf{33} & 1.253E+01 & 33 & \underline{1.247E+01} & \underline{27} & \textit{1.293E+01} & \textit{13} & 1.253E+01 & 33 & 1.430E+01 & 33 & 1.283E+01 & 33\\
# \midrule
# Sp. B. & 94.27\% & 133 & 95.52\% & 167 & \textit{95.57\%} & \textit{100} & \underline{\textit{95.59\%}} & \underline{\textit{100}} & 95.56\% & 167 & 95.39\% & 133 & \textbf{95.59\%} & \textbf{167} & 95.45\% & 333 & 95.46\% & 167\\
# St. P. & 98.69\% & 233 & 99.05\% & 267 & \underline{\textit{99.95\%}} & \underline{\textit{67}} & \textbf{99.95\%} & \textbf{100} & \textit{99.64\%} & \textit{67} & 99.90\% & 333 & \textit{99.41\%} & \textit{67} & 99.43\% & 167 & 98.92\% & 300\\
# KR-KP & \textit{98.22\%} & \textit{33} & 99.00\% & 333 & \underline{99.42\%} & \underline{100} & 99.39\% & 100 & 99.22\% & 100 & \textbf{99.48\%} & \textbf{100} & 99.14\% & 267 & 99.14\% & 133 & 98.94\% & 333\\
# B. C. & 95.09\% & 100 & \textbf{\textit{96.58\%}} & \textbf{\textit{33}} & \underline{96.49\%} & \underline{67} & \textbf{96.58\%} & \textbf{67} & 95.79\% & 133 & 95.35\% & 67 & 95.88\% & 300 & \textit{95.70\%} & \textit{33} & 95.61\% & 333\\
# LFW P. & \textit{56.00\%} & \textit{67} & 65.25\% & 333 & \textbf{66.02\%} & \textbf{333} & 65.73\% & 233 & 65.32\% & 133 & 65.55\% & 167 & \underline{65.98\%} & \underline{267} & 65.43\% & 333 & 65.27\% & 333\\
# Gam. & \textit{80.78\%} & \textit{3} & 87.68\% & 33 & \underline{87.75\%} & \underline{33} & \underline{87.75\%} & \underline{33} & \underline{87.75\%} & \underline{33} & \underline{87.75\%} & \underline{33} & \textbf{87.76\%} & \textbf{33} & 87.72\% & 33 & 87.68\% & 33\\
#
# \bottomrule
# \end{tabular}
# """
tab_headers = [
"Dataset",
"Architecture",
"Compressed layers",
"Method",
"Performance",
"# Parameters"
]
str_table = """\\begin{{tabular}}{{cccccc}}
\\toprule
{}
\\bottomrule
\end{{tabular}}
"""
lst_lines_tabular = ["&".join(tab_headers)]
for dataname in dct_table:
for model in dct_table[dataname]:
for layers in dct_table[dataname][model]:
if layers != "Conv+Dense":
continue
for lin in dct_table[dataname][model][layers]:
if "PYQALM Q=None" in str(lin["method"]):
continue
lst_line = [dataname, model, layers]
lst_line.append(str(lin["method"]))
lst_line.append("{:.2f}".format(lin["perf"]))
lst_line.append(str(int(lin["nb_param"])))
str_line = "&".join(lst_line).replace("%", "\%").replace("#", "\#")
lst_lines_tabular.append(str_line)
final_string = str_table.format("\\\\ \n".join(lst_lines_tabular) + "\\\\")
with open(str((output_dir / "table.tex").absolute()), 'w') as wf:
wf.write(final_string)
print(final_string)
| 2.171875
| 2
|
djtools/socialnetworks/__init__.py
|
anuj2511/django-tools-socialnetworks
| 0
|
12783662
|
<filename>djtools/socialnetworks/__init__.py<gh_stars>0
default_app_config = 'djtools.socialnetworks.apps.SocialNetworksConfig'
| 1.085938
| 1
|
conans/test/functional/toolchains/apple/test_xcodebuild_targets.py
|
Mu-L/conan
| 1
|
12783663
|
<filename>conans/test/functional/toolchains/apple/test_xcodebuild_targets.py<gh_stars>1-10
import platform
import textwrap
import pytest
from conans.test.utils.tools import TestClient
xcode_project = textwrap.dedent("""
name: HelloLibrary
targets:
hello-static:
type: library.static
platform: macOS
sources:
- src
configFiles:
Debug: static.xcconfig
Release: static.xcconfig
hello-dynamic:
type: library.dynamic
platform: macOS
sources:
- src
configFiles:
Debug: dynamic.xcconfig
Release: dynamic.xcconfig
""")
hello_cpp = textwrap.dedent("""
#include "hello.hpp"
#include <iostream>
void hellofunction(){
#ifndef DEBUG
std::cout << "Hello Release!" << std::endl;
#else
std::cout << "Hello Debug!" << std::endl;
#endif
}
""")
hello_hpp = textwrap.dedent("""
#ifndef hello_hpp
#define hello_hpp
void hellofunction();
#endif /* hello_hpp */
""")
test = textwrap.dedent("""
import os
from conan import ConanFile
from conan.tools.cmake import CMake, cmake_layout
from conan.tools.build import cross_building
class HelloTestConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
# VirtualBuildEnv and VirtualRunEnv can be avoided if "tools.env.virtualenv:auto_use" is defined
# (it will be defined in Conan 2.0)
generators = "CMakeDeps", "CMakeToolchain", "VirtualBuildEnv", "VirtualRunEnv"
apply_env = False
test_type = "explicit"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
def requirements(self):
self.requires(self.tested_reference_str)
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def layout(self):
cmake_layout(self)
def test(self):
if not cross_building(self):
cmd = os.path.join(self.cpp.build.bindirs[0], "example")
self.run(cmd, env="conanrun")
if self.options.shared:
self.run("otool -l {}".format(os.path.join(self.cpp.build.bindirs[0], "example")))
else:
self.run("nm {}".format(os.path.join(self.cpp.build.bindirs[0], "example")))
""")
cmakelists = textwrap.dedent("""
cmake_minimum_required(VERSION 3.15)
project(PackageTest CXX)
find_package(hello CONFIG REQUIRED)
add_executable(example src/example.cpp)
target_link_libraries(example hello::hello)
""")
test_src = textwrap.dedent("""
#include "hello.hpp"
int main() {
hellofunction();
}
""")
conanfile = textwrap.dedent("""
import os
from conan import ConanFile
from conan.tools.apple import XcodeBuild
from conan.tools.files import copy
class HelloLib(ConanFile):
name = "hello"
version = "1.0"
settings = "os", "compiler", "build_type", "arch"
generators = "XcodeToolchain"
exports_sources = "HelloLibrary.xcodeproj/*", "src/*", "static.xcconfig", "dynamic.xcconfig"
options = {"shared": [True, False], "fPIC": [True, False]}
default_options = {"shared": False, "fPIC": True}
def build(self):
xcode = XcodeBuild(self)
if self.options.shared:
xcode.build("HelloLibrary.xcodeproj", target="hello-dynamic")
else:
xcode.build("HelloLibrary.xcodeproj", target="hello-static")
def package(self):
name = "hello-dynamic.dylib" if self.options.shared else "libhello-static.a"
copy(self, "build/{}/{}".format(self.settings.build_type, name),
src=self.build_folder, dst=os.path.join(self.package_folder, "lib"), keep_path=False)
copy(self, "*/*.hpp", src=self.build_folder, dst=os.path.join(self.package_folder, "include"), keep_path=False)
def package_info(self):
self.cpp_info.libs = ["hello-{}".format("dynamic.dylib" if self.options.shared else "static")]
""")
static_xcconfig = textwrap.dedent("""
#include \"conan_config.xcconfig\"
LD_DYLIB_INSTALL_NAME = @rpath/libhello-static.dylib
""")
dynamic_xcconfig = textwrap.dedent("""
#include \"conan_config.xcconfig\"
LD_DYLIB_INSTALL_NAME = @rpath/hello-dynamic.dylib
""")
@pytest.mark.skipif(platform.system() != "Darwin", reason="Only for MacOS")
@pytest.mark.tool_xcodebuild
def test_shared_static_targets():
"""
The pbxproj has defined two targets, one for static and one for dynamic libraries, in the
XcodeBuild build helper we pass the target we want to build depending on the shared option
"""
client = TestClient()
client.save({"conanfile.py": conanfile,
"src/hello.cpp": hello_cpp,
"src/hello.hpp": hello_hpp,
"project.yml": xcode_project,
"test_package/conanfile.py": test,
"test_package/src/example.cpp": test_src,
"test_package/CMakeLists.txt": cmakelists,
"conan_config.xcconfig": "",
"static.xcconfig": static_xcconfig,
"dynamic.xcconfig": dynamic_xcconfig})
client.run_command("xcodegen generate")
client.run("create . -o *:shared=True -tf None")
assert "Packaged 1 '.dylib' file: hello-dynamic.dylib" in client.out
client.run("test test_package hello/1.0@ -o *:shared=True")
assert "@rpath/hello-dynamic.dylib" in client.out
client.run("create . -tf None")
assert "Packaged 1 '.a' file: libhello-static.a" in client.out
client.run("test test_package hello/1.0@")
# check the symbol hellofunction in in the executable
assert "hellofunction" in client.out
| 1.835938
| 2
|
utils/notify_about_cancel_request.py
|
itcosplay/cryptobot
| 0
|
12783664
|
async def notify_about_cancel_request(request, username, user_id):
from emoji import emojize
from loader import bot, db
from data.config import super_admins
emo_issuing_office = emojize(':office:', use_aliases=True)
emo_cash_recive = emojize(':chart_with_upwards_trend:', use_aliases=True)
emo_delivery = emojize(':steam_locomotive:', use_aliases=True)
emo_exchange = emojize(':recycle:', use_aliases=True)
emo_cash_in = emojize(':atm:', use_aliases=True)
emo_cash_atm = emojize(':credit_card:', use_aliases=True)
emo_request = {
'выдача в офисе': emo_issuing_office,
'прием кэша': emo_cash_recive,
'доставка': emo_delivery,
'обмен': emo_exchange,
'кэшин': emo_cash_in,
'снятие с карт': emo_cash_atm,
}
type_operation = emo_request[request[3]]
number_request = request[2]
date_request = request[0]
warning = f'Заявка {type_operation} #N{number_request} от {date_request} была отменена. Отменил - {username}'
admins = db.select_id_users(status='admin')
change = db.select_id_users(status='changer')
if not len(super_admins) == 0:
for user in super_admins:
if user == user_id:
pass
else:
await bot.send_message(user, warning)
if not len(admins) == 0:
list_admins_id = []
for item in admins:
list_admins_id.append(item[0])
for user in list_admins_id:
if user == user_id:
pass
else:
await bot.send_message(user, warning)
if not len(change) == 0:
list_changers_id = []
for item in change:
list_changers_id.append(item[0])
for user in list_changers_id:
if user == user_id:
await bot.send_message(user, warning)
return
| 2.15625
| 2
|
reloader/ImageButton.py
|
frdfsnlght/Reloader
| 0
|
12783665
|
<reponame>frdfsnlght/Reloader
from kivy.lang.builder import Builder
from kivy.properties import NumericProperty
from kivy.uix.button import Button
from kivy.clock import Clock
Builder.load_string('''
<ImageButton>:
image_normal: ''
image_down: ''
background_color: 0, 0, 0, 0
padding: self.width * 0.05, self.height * 0.05
# canvas.after:
# Color:
# rgba: 1, 1, 0, 1
# Line:
# width: 1
# rectangle: self.x, self.y, self.width - 1, self.height - 1
Image:
source: self.parent.image_normal
allow_stretch: True
center: self.parent.center
width: self.parent.width - (self.parent.padding_x * 2) if self.parent.state == 'normal' else 0
Image:
source: self.parent.image_down
allow_stretch: True
center: self.parent.center_x - ((self.parent.width - (self.parent.padding_x * 2)) / 2), self.parent.center_y
width: (self.parent.width - (self.parent.padding_x * 2)) if self.parent.state == 'down' else 0
''')
# Is this a bug? Notice the 'center' property of the second image. Why is the parent's center shifted in the down state?
class ImageButton(Button):
long_press_delay = NumericProperty(1)
long_press_interval = NumericProperty(0.2)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.register_event_type('on_long_press')
self.longPressTimer = Clock.create_trigger(self._on_long_press, self.long_press_delay)
self.longPressCount = 0
def on_press(self):
self.longPressCount = 0
self.longPressTimer.timeout = self.long_press_delay
self.longPressTimer()
def on_release(self):
self.longPressTimer.cancel()
def on_long_press(self, count):
pass
def _on_long_press(self, dt):
self.longPressCount = self.longPressCount + 1
self.dispatch('on_long_press', self.longPressCount)
if self.long_press_interval > 0:
self.longPressTimer.timeout = self.long_press_interval
self.longPressTimer()
| 2.09375
| 2
|
stardog/http/virtual_graphs.py
|
fventuri-availity/pystardog
| 0
|
12783666
|
from .. import content_types as content_types
class VirtualGraph(object):
def __init__(self, name, client):
self.name = name
self.client = client
@property
def path(self):
return '/admin/virtual_graphs/{}'.format(self.name)
def update(self, name, mappings, options):
meta = {
'name': name,
'mappings': mappings,
'options': options,
}
self.client.put(self.path, json=meta)
self.name = name
def delete(self):
self.client.delete(self.path)
def options(self):
r = self.client.get(self.path + '/options')
return r.json()['options']
def mappings(self, content_type=content_types.TURTLE):
r = self.client.get(
self.path + '/mappings', headers={'Accept': content_type})
return r.content
def available(self):
r = self.client.get(self.path + '/available')
return bool(r.json()['available'])
def __repr__(self):
return self.name
| 2.40625
| 2
|
repositories/vcs/base.py
|
washingtontimes/django-repositories
| 2
|
12783667
|
import os, shutil
import settings
class BaseVCS(object):
def __init__(self, name, anonymous_access, template=None):
"""
A base class to handle Version Control System functions
name = name of the repository
anonymous_access = Is it public?
template = The name of the template to use
"""
self.public = anonymous_access
self.name = name
self.template = template
self.config = self.get_config()
self._update_path() # Sets self.path and url
def _update_path(self):
"""
Determine where the repository is. It is called in __init__ and sets
self.path and self.url
"""
if self.public:
self.path = os.path.abspath(os.path.join(self.config['public_path'], self.name))
self.url = "%s%s/" % (self.config['public_url'], self.name)
else:
self.path = os.path.abspath(os.path.join(self.config['private_path'], self.name))
self.url = "%s%s/" % (self.config['private_url'], self.name)
def get_config(self):
"""
Search the configuration for the correct record
"""
name = self.__class__.__name__.replace('Repository','')
for value in settings.VCS_CONFIG.values():
if value['name'] == name:
return value
raise Exception("The configuration for %s is missing." % name)
def exists(self):
"""
Does the repository exist on the file system?
"""
return os.path.exists(self.path)
def create(self):
"""
Create a new repository
"""
NotImplementedError
def make_public(self):
"""
Move a repository from private to public
"""
dest = os.path.abspath(os.path.join(self.config['public_path'], self.name))
source = self.path
shutil.move(source, dest)
self.public = True
self._update_path()
def make_private(self):
"""
Move a repository from public to private
"""
source = self.path
dest = os.path.abspath(os.path.join(self.config['private_path'], self.name))
shutil.move(source, dest)
self.public = False
self._update_path()
def delete(self):
"""
Delete the source repository here
"""
if self.exists():
shutil.rmtree(self.path)
def create_remote(self, name, description='', homepage=''):
"""
Create a remote repository on a separate service
"""
raise NotImplementedError
def add_remote(self, name, url, branch=None):
"""
Add a remote repository
"""
raise NotImplementedError
def update_remote(self, name, branch=None):
"""
Update a remote repository.
"""
raise NotImplementedError
def list_directory(self, path, revision=None, branch=None):
"""
List the files directory in the repository
Optionally can specify a revision or branch from which to show the directory.
"""
raise NotImplementedError
def get_file(self, path, revision=None, branch=None):
"""
Get the contents from a file
Optionally can specify a revision or branch from which to retrieve the contents
"""
raise NotImplementedError
def get_absolute_url(self):
"""
Return the absolute url
"""
return self.url
def get_current_revision(self):
"""
Get the current revision of he repository
"""
raise NotImplementedError
def get_archive(self, revision=None, tag=None):
"""
Get an archive of the current revision, or specific revision or tag
"""
raise NotImplementedError
| 2.96875
| 3
|
telegram_bot/webhook.py
|
ProgrammingLanguageLeader/MathematicianBot
| 0
|
12783668
|
<gh_stars>0
import logging
from flask import current_app
from telegram import Bot
def set_webhook() -> Bot:
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO
)
host_url = current_app.config.get('HOST_URL')
host_port = current_app.config.get('HOST_PORT')
telegram_token = current_app.config.get('TELEGRAM_TOKEN')
bot = Bot(telegram_token)
bot.set_webhook(
url='%s:%s/telegram/%s' % (
host_url,
host_port,
telegram_token
)
)
return bot
| 2.328125
| 2
|
vas2nets/urls.py
|
praekelt/hellomama-registration
| 0
|
12783669
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^api/v1/fetch_voice_data/$',
views.FetchVoiceDataView.as_view()),
url(r'^api/v1/sync_welcome_audio/$',
views.SyncWelcomeAudioView.as_view()),
url(r'^api/v1/resend_last_message/$',
views.ResendLastMessageView.as_view()),
]
| 1.679688
| 2
|
health.py
|
oliviamao/first-python
| 0
|
12783670
|
import random
health = 50
difficulty = 3
potion_health = int(random.randint(25,50)/ difficulty)
health = health + potion_health
print (health)
import math
| 2.96875
| 3
|
Ch8/08-tuple-01.py
|
akuks/Python3.6---Novice-to-Ninja
| 0
|
12783671
|
# Create Tuple
tup = (1, 2, 3, "Hello", "World")
print(tup)
# Looping in Tuple
for item in tup:
print (item)
# in operator
b = 4 in tup
print(b)
# Not in Operator
b = 4 not in tup
print (b)
| 4.125
| 4
|
HackerRank/30 Days Of Code/Python/Day26_NestedLogic.py
|
AdityaChirravuri/CompetitiveProgramming
| 1
|
12783672
|
<gh_stars>1-10
d, m, y = map(int, input().split())
dd, dm, dy = map(int, input().split())
if dy == y:
if dm < m:
hackos = 500*(m-dm)
elif dm == m:
if dd < d:
hackos = 15*(d-dd)
else:
hackos = 0
else:
hackos = 0
elif dy > y:
hackos = 0
else:
hackos = 10000
print(hackos)
| 2.515625
| 3
|
random_mac/dataset.py
|
critical-path/random-mac
| 2
|
12783673
|
<gh_stars>1-10
"""
This module contains dataset-related functions.
"""
import csv
import itertools
import os
import macaddress
import numpy
import pandas
def get_ieee_assignments(file):
"""
Retrieve OUIs and CIDs.
Parameters
----------
file : str
The name of a file with information on OUIs
and CIDs assigned by the IEEE.
Typical names are `oui.csv` and `cid.csv`.
Returns
-------
list
A list of 24-bit OUIs or CIDs assigned by the IEEE.
"""
with open(file) as source:
records = csv.DictReader(source)
return list(
map(
lambda record: record["Assignment"],
records
)
)
def make_hexadecimal_digit_strings(assignments):
"""
Make hexadecimal strings based upon OUIs and CIDs.
Parameters
----------
assignments : list
A list of 24-bit OUIs or CIDs assigned by the IEEE.
Returns
-------
list
A list of 48-bit hexadecimal strings, where each
string is the concatenation of a 24-bit OUI/CID and
24 random bits.
"""
return list(
map(
lambda assignment: assignment + os.urandom(3).hex(),
assignments
)
)
def make_random_hexadecimal_digit_strings(number):
"""
Make random hexadecimal strings.
Parameters
----------
number : int
The number of hexadecimal strings to make.
Returns
-------
list
A list of 48-bit hexadecimal strings, where each
string is 48 random bits.
"""
return list(
map(
lambda x: os.urandom(6).hex(),
range(number)
)
)
def get_mac_features(digit_string):
"""
Retrieve the features of a MAC address.
Parameters
----------
digit_string : str
A 48-bit hexadecimal string with which
to instantiate `MediaAccessControlAddress`.
Returns
-------
tuple
An eight-tuple with the features of a MAC address.
The features are `type`, `has_oui`, `has_cid`,
`is_broadcast`, `is_multicast`, `is_unicast`,
`is_uaa`, and `is_laa`.
"""
mac = macaddress.MediaAccessControlAddress(digit_string)
return (
mac.type,
mac.has_oui,
mac.has_cid,
mac.is_broadcast,
mac.is_multicast,
mac.is_unicast,
mac.is_uaa,
mac.is_laa
)
def get_features(digit_strings):
"""
Retrieve the features of MAC addresses.
Parameters
----------
digit_strings : list
A list of 48-bit hexadecimal strings.
Returns
-------
list
A list of tuples, where each tuple contains
the features of a MAC address.
"""
return list(
map(
lambda digit_string: get_mac_features(digit_string),
digit_strings
)
)
def normalize_features(features):
"""
Normalize the features of MAC addresses.
Parameters
----------
features : list
A list of tuples, where each tuple contains
the features of a MAC address.
Returns
-------
numpy array
A numpy array with the normalized features
of MAC addresses, where normalization
means replacing non-numeric with numeric
values and converting the container from a
list to a numpy array.
"""
replacements = {
"unique": 2,
"local": 1,
"unknown": 0,
True: 1,
False: 0
}
return pandas.DataFrame(features).replace(replacements).to_numpy()
def make_labels(value, number):
"""
Make labels for training and testing of
a binary classifier.
Parameters
----------
value : int
The label, where `0` means a non-random
MAC addresses and `1` means a random
MAC address.
number : int
The number of labels.
Returns
-------
list
A list with the given number of the
given label.
"""
return list(
itertools.repeat(
value,
number
)
)
def normalize_labels(labels):
"""
Normalize labels.
Parameters
----------
labels : list
A list of labels.
Returns
-------
numpy array
A numpy array with normalized labels,
where normalization means converting the
container from a list to a numpy array.
"""
return numpy.array(labels)
def make(multiple, oui_file="./oui.csv", cid_file="./cid.csv"):
"""
Make a dataset for training and testing purposes.
Parameters
----------
multiple : int
The number of random MAC addresses to create
for every non-random MAC address.
oui_file : str
The name of the file with OUIs assigned by
the IEEE.
cid_file : str
The name of the file with CIDs assigned by
the IEEE.
Returns
-------
tuple
A tuple with data (features) and labels.
"""
# Get OUIs and CIDs.
ouis = get_ieee_assignments(oui_file)
cids = get_ieee_assignments(cid_file)
# Make non-random and random hexadecimal strings.
digits = make_hexadecimal_digit_strings(ouis + cids)
random_digits = make_random_hexadecimal_digit_strings(
int(multiple * len(digits))
)
# Get features of non-random and random MAC addresses.
features = get_features(digits)
random_features = get_features(random_digits)
# Get labels for non-random and random MAC addresses.
labels = make_labels(0, len(features))
random_labels = make_labels(1, len(random_features))
# Normalize all features and labels.
normalized_features = normalize_features(features + random_features)
normalized_labels = normalize_labels(labels + random_labels)
# Return normalized features and labels.
return (
normalized_features,
normalized_labels,
)
| 3.296875
| 3
|
envisage/plugins/debug/fbi_plugin_definition.py
|
janvonrickenbach/Envisage_wxPhoenix_py3
| 0
|
12783674
|
#-------------------------------------------------------------------------------
#
# FBI (Frame Based Inspector) Plugin.
#
# Written by: <NAME>
#
# Date: 1/4/2006
#
# (c) Copyright 2006 by Enthought, Inc.
#
#-------------------------------------------------------------------------------
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
from envisage.core.core_plugin_definition \
import PluginDefinition
#-------------------------------------------------------------------------------
# The plugin definition:
#-------------------------------------------------------------------------------
PluginDefinition(
# The plugin's globally unique identifier:
id="envisage.plugins.debug.fbi",
# The name of the class that implements the plugin:
class_name="envisage.plugins.debug.fbi_plugin.FBIPlugin",
# General information about the plugin:
name="FBI Plugin",
version="1.0.0",
provider_name="Enthought Inc",
provider_url="www.enthought.com",
enabled=True,
autostart=True,
# The Id's of the plugins that this plugin requires:
requires=["envisage.core", ])
| 1.695313
| 2
|
src/apps/blog/utils.py
|
Pewpewarrows/MyModernLife
| 0
|
12783675
|
<gh_stars>0
import re
def get_unique_slug(slug, conflicts):
num = 1
while True:
test = '%s-%d' % (slug, num)
found = False
for c in conflicts:
if c.slug == test:
found = True
break
if not found:
return test
else:
num += 1
def generate_slug(title):
slug = title.lower()
slug = re.sub('[^\w\d\s]', '', slug)
slug = slug.strip()
slug = re.sub('\s+', '-', slug)
# Slugs ending in a hyphen just look ugly
if len(slug) > 20:
if slug[19] == '-':
slug = slug[:19]
else:
slug = slug[:20]
# Account for possibly everything being stripped
elif len(slug) == 0:
slug = 'default' # Should there be a better filler text, or reject?
return slug
| 3.09375
| 3
|
src/parallel_graph_distance_driver.py
|
Abdumaleek/infinity-mirror
| 0
|
12783676
|
<reponame>Abdumaleek/infinity-mirror<gh_stars>0
import os
import sys;
from pathlib import Path
sys.path.extend('../')
import pandas as pd
from src.graph_distance import GraphDistance
import networkx as nx
from src.graph_stats import GraphStats
from src.parallel import parallel_async
from src.utils import load_pickle, get_imt_input_directory, walker, ColorPrint, ensure_dir, walker_texas_ranger, \
get_imt_output_directory
def distance_computation(dataset, model, trial, stats):
if not isinstance(stats, list):
stats = [stats]
for stat in stats:
GD = GraphDistance(dataset=dataset, trial=trial, model=model, metrics=[stat], iteration=None)
GD.set_root_object(GD.implemented_metrics[stat])
total_iterations = GD.total_iterations
rows = []
for iteration in range(total_iterations+1):
GD.set_iteration(iteration=iteration)
GD.compute_distances([stat])
results = GD.stats[stat]
row = {}
row.update({'dataset': dataset, 'model': model, 'trial': trial, 'iteration': iteration, stat: results})
rows.append(row)
results_df = pd.DataFrame(rows)
# results_df.to_csv(path_or_buf=f'{output_dir}/{stat}_{trial}.csv', index=False)
return results_df
# TODO: fix support for HRG (no objects to contatenate)
if __name__ == '__main__':
implemented_metrics = {'pagerank_js': 'pagerank', 'degree_js': 'degree_dist',
# 'pgd_distance': 'pgd_graphlet_counts', 'netlsd_distance': 'netlsd', 'portrait_divergence': 'portrait',
'lambda_distance': 'laplacian_eigenvalues'}
# datasets = ['clique-ring-500-4', 'eucore', 'flights', 'tree']
models = ['Chung-Lu', 'CNRG', 'SBM', 'Erdos-Renyi', 'BUGGE', 'HRG']
# models = ['BTER', 'BUGGE', 'Chung-Lu', 'CNRG', 'Erdos-Renyi', 'Kronecker', 'SBM', 'GCN_AE', 'Linear_AE']
#stats = ['pagerank_js', 'degree_js', 'pgd_distance', 'netlsd_distance', 'lambda_distance', 'portrait_divergence']
stats = ['degree_js', 'pagerank_js', 'lambda_distance']
# datasets, models, trials, filenames = walker()
datasets = ['cond-mat', 'enron']
for dataset in datasets:
for model in models:
for stat in stats:
ColorPrint.print_green(f'computing {stat} distances for {dataset} {model}')
trials = walker_texas_ranger(dataset, model, stat=implemented_metrics[stat], unique=True)
args = [[dataset, model, trial, stat] for trial in trials]
print(args[: 5])
# exit(-1)
try:
results = parallel_async(distance_computation, args, num_workers=10)
df = pd.concat(results)
except Exception as e:
ColorPrint.print_red(f'Error, for {dataset!r} {model!r} {stat!r}')
continue
# output_dir = f'/data/infinity-mirror/output/distances/{dataset}/{model}/{stat}/'
output_dir = Path(get_imt_output_directory()) / 'distances' / dataset
ensure_dir(output_dir, recursive=True)
df.to_csv(output_dir / f'{dataset}_{model}_{stat}.csv')
# for arg in args:
# distance_computation(*arg)
| 2.125
| 2
|
inspection_sdk/api/template/delete_pb2.py
|
easyopsapis/easyops-api-python
| 5
|
12783677
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: delete.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='delete.proto',
package='template',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0c\x64\x65lete.proto\x12\x08template\x1a\x1bgoogle/protobuf/empty.proto\"=\n\x15\x44\x65leteTemplateRequest\x12\x10\n\x08pluginId\x18\x01 \x01(\t\x12\x12\n\ntemplateId\x18\x02 \x01(\t\"w\n\x1d\x44\x65leteTemplateResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12$\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32\x16.google.protobuf.Emptyb\x06proto3')
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,])
_DELETETEMPLATEREQUEST = _descriptor.Descriptor(
name='DeleteTemplateRequest',
full_name='template.DeleteTemplateRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='pluginId', full_name='template.DeleteTemplateRequest.pluginId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='templateId', full_name='template.DeleteTemplateRequest.templateId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=55,
serialized_end=116,
)
_DELETETEMPLATERESPONSEWRAPPER = _descriptor.Descriptor(
name='DeleteTemplateResponseWrapper',
full_name='template.DeleteTemplateResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='template.DeleteTemplateResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='template.DeleteTemplateResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='template.DeleteTemplateResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='template.DeleteTemplateResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=118,
serialized_end=237,
)
_DELETETEMPLATERESPONSEWRAPPER.fields_by_name['data'].message_type = google_dot_protobuf_dot_empty__pb2._EMPTY
DESCRIPTOR.message_types_by_name['DeleteTemplateRequest'] = _DELETETEMPLATEREQUEST
DESCRIPTOR.message_types_by_name['DeleteTemplateResponseWrapper'] = _DELETETEMPLATERESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
DeleteTemplateRequest = _reflection.GeneratedProtocolMessageType('DeleteTemplateRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETETEMPLATEREQUEST,
'__module__' : 'delete_pb2'
# @@protoc_insertion_point(class_scope:template.DeleteTemplateRequest)
})
_sym_db.RegisterMessage(DeleteTemplateRequest)
DeleteTemplateResponseWrapper = _reflection.GeneratedProtocolMessageType('DeleteTemplateResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _DELETETEMPLATERESPONSEWRAPPER,
'__module__' : 'delete_pb2'
# @@protoc_insertion_point(class_scope:template.DeleteTemplateResponseWrapper)
})
_sym_db.RegisterMessage(DeleteTemplateResponseWrapper)
# @@protoc_insertion_point(module_scope)
| 1.21875
| 1
|
strings/findTheDifference.py
|
kushvr7/High-On-DSA
| 76
|
12783678
|
# https://leetcode.com/problems/find-the-difference/
class Solution(object):
# TC :O(N)
# SC :O(N)
def findTheDifference(self, s, t):
"""
:type s: str
:type t: str
:rtype: str
"""
dict = {i : s.count(i) for i in s}
for i in t:
if i not in dict or dict[i]==0:
return i
else:
dict[i]-=1
| 3.390625
| 3
|
optalg/opt_solver/cplex_cmd.py
|
romcon/OPTALG
| 1
|
12783679
|
#****************************************************#
# This file is part of OPTALG. #
# #
# Copyright (c) 2019, <NAME>. #
# #
# OPTALG is released under the BSD 2-clause license. #
#****************************************************#
from __future__ import print_function
import os
import numpy as np
import tempfile
import subprocess
from . import utils
from .opt_solver_error import *
from .opt_solver import OptSolver
from .problem import OptProblem
from multiprocessing import cpu_count
class OptSolverCplexCMD(OptSolver):
parameters = {'quiet' : False,
'mipgap': None,
'feasibility': None,
'debug': False}
def __init__(self):
"""
CPLEX solver interface (via command-line interface).
"""
# Check
if not utils.cmd_exists('cplex'):
raise ImportError('cplex cmd not available')
OptSolver.__init__(self)
self.parameters = OptSolverCplexCMD.parameters.copy()
def supports_properties(self, properties):
for p in properties:
if p not in [OptProblem.PROP_CURV_LINEAR,
OptProblem.PROP_VAR_CONTINUOUS,
OptProblem.PROP_VAR_INTEGER,
OptProblem.PROP_TYPE_FEASIBILITY,
OptProblem.PROP_TYPE_OPTIMIZATION]:
return False
return True
def read_solution(self, filename, problem):
import xml.etree.ElementTree as ET
x = np.zeros(problem.c.size)
lam = np.zeros(problem.A.shape[0])
nu = np.zeros(0)
mu = np.zeros(x.size)
pi = np.zeros(x.size)
tree = ET.parse(filename)
root = tree.getroot()
header = root.find('header')
status = header.get('solutionStatusString')
for var in root.find('variables'):
name = var.get('name')
value = float(var.get('value'))
index = int(name.split('_')[1])
x[index] = value
rcost = var.get('reducedCost')
if rcost is not None:
if float(rcost) > 0.:
pi[index] = float(rcost)
else:
mu[index] = -float(rcost)
for c in root.find('linearConstraints'):
name = c.get('name')
index = int(name.split('_')[1])
dual = c.get('dual')
if dual is not None:
lam[index] = float(dual)
return status, x, lam, nu, mu, pi
def solve(self, problem):
# Local vars
params = self.parameters
# Parameters
quiet = params['quiet']
mipgap = params['mipgap']
feasibility = params['feasibility']
debug = params['debug']
# Problem
try:
self.problem = problem.to_mixintlin()
except:
raise OptSolverError_BadProblemType(self)
# Solve
status = ''
try:
base_name = next(tempfile._get_candidate_names())
input_filename = base_name+'.lp'
output_filename = base_name+'.sol'
self.problem.write_to_lp_file(input_filename)
cmd = ['cplex']
cmd += ['-c', 'read', input_filename]
if mipgap is not None:
cmd += ['set mip tolerances mipgap %.2e' %mipgap]
if feasibility is not None:
cmd += ['set simplex tolerances feasibility %.2e' %feasibility]
cmd += ['optimize']
cmd += ['write', output_filename]
cmd += ['quit']
if not quiet:
code = subprocess.call(cmd)
else:
code = subprocess.call(cmd,
stdout=open(os.devnull, 'w'),
stderr=subprocess.STDOUT)
assert(code == 0)
status, self.x, self.lam, self.nu, self.mu, self.pi = self.read_solution(output_filename, self.problem)
except Exception as e:
raise OptSolverError_CplexCMDCall(self)
finally:
if os.path.isfile(input_filename) and not debug:
os.remove(input_filename)
if os.path.isfile(output_filename) and not debug:
os.remove(output_filename)
if os.path.isfile('cplex.log') and not debug:
os.remove('cplex.log')
for i in range(cpu_count()):
if os.path.isfile('clone%d.log' %i) and not debug:
os.remove('clone%d.log' %i)
if 'optimal' in status.lower():
self.set_status(self.STATUS_SOLVED)
self.set_error_msg('')
else:
raise OptSolverError_CplexCMD(self)
| 2.140625
| 2
|
Python/testes/unittest-estudo/app/tests/test_validate_file.py
|
wendrewdevelop/Estudos
| 0
|
12783680
|
<reponame>wendrewdevelop/Estudos
import unittest
class TestFile(unittest.TestCase):
"""
Validando o arquivo
"""
def test_validando_path_arquivo(path_file):
'''
Verificando se o nosso arquivo
esta no path correto
'''
path_file='batch.csv'
assert path_file
if __name__ == '__main__':
unittest.main()
| 3.09375
| 3
|
sanitize_temperatures.py
|
mscalora/Plugin-Examples
| 14
|
12783681
|
# coding=utf-8
def sanitize_temperatures(comm, parsed_temps):
return dict((k, v) for k, v in parsed_temps.items()
if isinstance(v, tuple) and len(v) == 2 and is_sane(v[0]))
def is_sane(actual):
return 1.0 <= actual <= 300.0
__plugin_name__ = "Sanitize Temperatures"
__plugin_pythoncompat__ = ">=2.7,<4"
__plugin_hooks__ = {
"octoprint.comm.protocol.temperatures.received": sanitize_temperatures
}
| 3.09375
| 3
|
nnet/model_org.py
|
trip2eee/nnet
| 3
|
12783682
|
<gh_stars>1-10
import csv
import time
import numpy as np
from nnet.loss.loss import Loss
from nnet.optim.optimizer import Optimizer
class Model:
def __init__(self, dim_input, dim_output, hidden_config, rnd_mean=0, rnd_std=0.0030):
self.is_training = False
self.dim_input = dim_input
self.dim_output = dim_output
self.rnd_mean = rnd_mean
self.rnd_std = rnd_std
self.hidden_config = hidden_config
np.random.seed(123)
def randomize(self):
np.random.seed(time.time())
def init_model_hiddens(self):
self.pm_hiddens = []
prev_dim = self.dim_input
for hidden_dim in self.hidden_config:
self.pm_hiddens.append(self.alloc_param_pair([prev_dim, hidden_dim]))
prev_dim = hidden_dim
self.pm_output = self.alloc_param_pair([prev_dim, self.dim_output])
def alloc_param_pair(self, shape):
weight = np.random.normal(self.rnd_mean, self.rnd_std, shape).astype(np.float32)
bias = np.zeros(shape[-1]).astype(np.float32)
return {'w':weight, 'b':bias}
def train(self, x, y, loss_obj : Loss, optim : Optimizer):
output, aux_nn = self.forward(x)
loss, aux_pp = loss_obj.forward(output, y)
G_loss = 1.0
G_output = loss_obj.backward(G_loss, aux_pp)
self.backward(G_output, aux_nn, optim)
return output, loss
def test(self, x, y):
output, _ = self.forward(x)
return output
def relu(self, x):
return np.maximum(x, 0)
def relu_derv(self, y):
# y = relu(x) is not differentiable at x = 0.
# if y > 0, derivative = 1
# otherwise, derivative = 0
return np.sign(y)
def forward(self, x):
hidden = x
aux_layers = []
hiddens = [x]
for pm_hidden in self.pm_hiddens:
hidden, aux = self.forward_layer(hidden, pm_hidden, 'relu')
aux_layers.append(aux)
output, aux_out = self.forward_layer(hidden, self.pm_output, None)
return output, [aux_out, aux_layers]
def forward_layer(self, x, pm, activation):
y = np.matmul(x, pm['w']) + pm['b']
if activation == 'relu':
y = self.relu(y)
return y, [x, y]
def backward(self, G_output, aux, optim : Optimizer):
aux_out, aux_layers = aux
G_hidden = optim.step(G_output, None, self.pm_output, aux_out)
for n in reversed(range(len(self.pm_hiddens))):
G_hidden = optim.step(G_hidden, 'relu', self.pm_hiddens[n], aux_layers[n])
def accuracy(self, output, target):
estimate = np.argmax(output, axis=1)
answer = np.argmax(target, axis=1)
correct = np.equal(estimate, answer)
return np.mean(correct)
| 2.609375
| 3
|
Week6/Day5/dailychallenge.py
|
malharlakdawala/DevelopersInstitute
| 0
|
12783683
|
<reponame>malharlakdawala/DevelopersInstitute<filename>Week6/Day5/dailychallenge.py
import json
import random
import psycopg2
import requests
HOSTNAME = 'localhost'
USERNAME = 'postgres'
PASSWORD = '<PASSWORD>'
DATABASE = 'MenuPython1'
connection = psycopg2.connect(host=HOSTNAME, user=USERNAME, password=PASSWORD, dbname=DATABASE)
api_key = "<KEY>"
api_url = "http://api.countrylayer.com/v2/all"
paramters = {"access_key": api_key}
response = requests.get(api_url, params=paramters)
if response.status_code == 200:
response = response.json()
# print(response)
for n in range(10):
i=random.randint(0,100)
name = response[i]["name"]
capital = response[i]["capital"]
topleveldomain = response[i]["topLevelDomain"][0]
region = response[i]["region"]
query = f"INSERT INTO countries (name,capital,topleveldomain,region) values ('{name}','{capital}','{topleveldomain}','{region}');"
cursor = connection.cursor()
cursor.execute(query)
connection.commit()
connection.close()
| 3.578125
| 4
|
Calculator/Sqroot.py
|
rn44/statsCalculator
| 0
|
12783684
|
<gh_stars>0
def sqroot(a):
a = float(a)
b = a ** (1/2)
return b
| 2.5625
| 3
|
venv/Lib/site-packages/configurationutil/unittests/test_cfg_providers/test_configuration_object.py
|
avim2809/CameraSiteBlocker
| 0
|
12783685
|
# encoding: utf-8
import os
import unittest
from configurationutil.cfg_providers import base_provider
from fdutil.path_tools import pop_path
class TestConfigurationObject(unittest.TestCase):
def setUp(self):
self.cfg_file = os.path.join(pop_path(__file__), u'test_config_object.json')
self.template = os.path.join(pop_path(__file__), u'..', u'resources', u'upgrade_template.json')
self.missing_template = os.path.join(pop_path(__file__), u'test_config_object_template.json')
def tearDown(self):
pass
def test_instantiation(self):
base_provider.ConfigObject.DEFAULT_TEMPLATE = self.template
with self.assertRaises(NotImplementedError):
self.cfg = base_provider.ConfigObject(config_file=self.cfg_file,
create=True)
del base_provider.ConfigObject.DEFAULT_TEMPLATE
def test_instantiation_missing_default_template(self):
with self.assertRaises(NotImplementedError):
self.cfg = base_provider.ConfigObject(config_file=self.cfg_file,
create=True)
def test_instantiation_missing_default_file(self):
base_provider.ConfigObject.DEFAULT_TEMPLATE = self.missing_template
with self.assertRaises(IOError):
self.cfg = base_provider.ConfigObject(config_file=self.cfg_file,
create=True)
del base_provider.ConfigObject.DEFAULT_TEMPLATE
if __name__ == u'__main__':
unittest.main()
| 2.515625
| 3
|
tests/kmburst/mesh/test_case_2.py
|
gauenk/faiss_fork
| 0
|
12783686
|
<reponame>gauenk/faiss_fork
# -- python --
import sys
import pytest
from einops import rearrange
# -- pytorch --
import torch
# -- project --
from pyutils import save_image,get_img_coords
# -- faiss --
import sys
import faiss
sys.path.append("/home/gauenk/Documents/faiss/contrib/")
from kmb_search import jitter_search_ranges,tiled_search_frames,mesh_from_ranges
from kmb_search.testing.interface import exec_test,init_zero_tensors
from kmb_search.testing.mesh_utils import MESH_TYPE,mesh_setup
@pytest.mark.mesh_case2
def test_case_2():
# -- params --
k = 1
t = 4
h = 8
w = 8
c = 3
ps = 11
nsiters = 2 # num search iters
kmeansK = 3
nsearch_xy = 3
nfsearch = 3 # num of frames searched (per iter)
nbsearch = nsearch_xy**2 # num blocks searched (per frame)
nblocks = nbsearch**(kmeansK-1)
std = 20./255.
device = 'cuda:0'
coords = get_img_coords(t,1,h,w)[:,:,0].to(device)
verbose = False
seed = 123
# -- create tensors --
zinits = init_zero_tensors(k,t,h,w,c,ps,nblocks,nbsearch,
nfsearch,kmeansK,nsiters,device)
burst,offset_gt = mesh_setup(k,t,h,w,c,ps,std,device,seed)
block_gt = offset_gt + coords
search_ranges = jitter_search_ranges(nsearch_xy,t,h,w).to(device)
search_frames = tiled_search_frames(nfsearch,nsiters,t//2).to(device)
if verbose:
print(search_frames)
print(search_frames.shape)
print(search_ranges.shape)
# -- for testing --
blocks = zinits.blocks
block_eqs = (search_ranges == block_gt[:,:,None])
block_eqs = torch.all(block_eqs,dim=0)
block_eqs = block_eqs.type(torch.float)
init_blocks = torch.argmax(block_eqs,dim=1,keepdim=True)[:,0]
init_blocks = init_blocks.type(torch.int)
if verbose:
print("init_blocks.shape: ",init_blocks.shape)
print(init_blocks[-1,:,:])
# -- execute test --
exec_test(MESH_TYPE,1,k,t,h,w,c,ps,nblocks,nbsearch,nfsearch,kmeansK,std,
burst,init_blocks,search_frames,search_ranges,zinits.outDists,
zinits.outInds,zinits.modes,zinits.modes3d,zinits.km_dists,
zinits.self_dists,zinits.centroids,zinits.clusters,
zinits.cluster_sizes,blocks,zinits.ave,zinits.vals)
# -- compute using python --
blocks_gt = mesh_from_ranges(search_ranges,search_frames[0],block_gt,t//2)
blocks_gt = blocks_gt.to(device)
# -- visually compare blocks --
if verbose:
print("-- blocks --")
print(blocks.shape)
for s in range(nblocks):
numEq = blocks[:,:,s,:,:] == blocks_gt[:,:,s,:,:]
numEq = numEq.type(torch.float)
print(s,numEq.mean().item())
print("-"*30)
print(search_ranges.shape)
for i in range(search_ranges.shape[1]):
print(i)
print(search_ranges[:,i,:,4,4])
print("-"*30)
print(blocks[:,:,0,4,4])
print(blocks_gt[:,:,0,4,4])
print("-"*30)
print(blocks[:,:,7,4,4])
print(blocks_gt[:,:,7,4,4])
print("-"*30)
print(blocks[:,:,40,4,4])
print(blocks_gt[:,:,40,4,4])
print("-"*30)
# print(blocks[:,:,0,:,:] == blocks_gt[:,:,0,:,:])
# print(blocks[:,:,40,:,:] == blocks_gt[:,:,40,:,:])
# print(block_gt[:,:,0,0])
# print(search_ranges[:,0,:,0,0])
# -- compare results --
delta = torch.sum(torch.abs(blocks - blocks_gt)).item()
assert delta < 1e-8, "Difference must be smaller than tolerance."
| 1.632813
| 2
|
vnegmas/backend/src/nnegmas/monitor.py
|
YueNing/vnegmas
| 3
|
12783687
|
"""
some class that used for monitor the stats information of
negmas during the time of simulation
monitor methode:
1. detect the information of changing file
2. detect the information of shared memory
"""
from abc import ABCMeta, abstractmethod
import os, time
from typing import Optional
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from hachiko.hachiko import AIOWatchdog, AIOEventHandler
import asyncio
class NegmasMonitorFile(AIOEventHandler):
"""
Use this class to monitor the stats file
>>> nm = NegmasMonitorFile()
please see how to initial NegmasMonitorFile need set log_folder!
>>> nm2 = NegmasMonitorFile(log_folder='./log_folder')
{'log': ['log.txt', 'log_test'], 'stats': ['m_product', 'm_balance', 'm_breach', 'm_kkk']}
>>> nm2.run()
"""
def __init__(self, log_folder:Optional[str]=None):
super(NegmasMonitorFile, self).__init__()
self.mode = "debug"
self._watch_path = log_folder
NegmasMonitorFile._watch_path = log_folder
if log_folder is not None:
self._file_detect()
else:
print('please see how to initial NegmasMonitorFile need set log_folder!')
# Use this function to detect log and stats files in a directory, also set up the cursor to zero(after use seek to get the appended content)
def _file_detect(self) -> dict:
try:
self.worlds_stats = {}
def _detect_files(world):
stats_files = {}
logs_files = {}
all_files = {}
for f in os.listdir(self._watch_path+'/'+world):
if f.startswith("log"):
logs_files[f] = 0
elif f.startswith('m_'):
stats_files[f] = 0
all_files["log"] = logs_files
all_files["stats"] = stats_files
return all_files
worlds = next(os.walk(self._watch_path))[1]
for w in worlds:
self.worlds_stats[w] = _detect_files(w)
if self.mode == "debug":
print(self.worlds_stats)
except Exception as e:
print(f'can not find {self._watch_path}')
async def on_deleted(self, event):
print(event)
if not event.is_directory:
if self.mode == "debug":
print(f"delete {event.src_path}")
async def on_created(self, event):
print(event)
world_monitor = []
if not event.is_directory:
new_file = event.src_path.split("/")[-1]
world_name = event.src_path.split("/")[-2]
if world_name in self.worlds_stats:
if new_file.startswith("log"):
self.worlds_stats[world_name]["log"][new_file] = 0
elif new_file.startswith("m_"):
self.worlds_stats[world_name]["stats"][new_file] = 0
if self.mode == "debug":
print(f"create {event.src_path} files {self.worlds_stats}")
else:
self.worlds_stats[event.src_path.split("/")[-1]] = {"log":{}, "stats":{}}
print(self.worlds_stats)
async def on_moved(self, event):
print(event)
if not event.is_directory:
if mode == "debug":
print(f"moved {event.src_path}")
async def on_modified(self, event):
print(event)
if not event.is_directory:
file_path = event.src_path
filename = file_path.split('/')[-1]
world_name = file_path.split('/')[-2]
new_content = ''
if world_name in self.worlds_stats:
if filename.startswith('m_'):
last_seek = self.worlds_stats[world_name]['stats'][filename]
f = open(file_path)
f.seek(last_seek,0)
new_content = f.read().strip().replace("\n", "")
self.worlds_stats[world_name]['stats'][filename] = f.tell()
print(self.worlds_stats[world_name]['stats'][filename])
f.close()
if self.mode == "debug":
print(f"changed {file_path} content {new_content}")
async def watch_fs(path):
watch = AIOWatchdog(path, event_handler=NegmasMonitorFile(log_folder=path))
watch.start()
import threading
print('monitor threading is {}'. format(threading.current_thread()))
import os
print("monitor process id is {}".format(os.getpid()))
for _ in range(100):
await asyncio.sleep(1)
watch.stop()
print("Finish monitoring task")
class NegmasMonitorMemory():
pass
if __name__ == "__main__":
start = time.time()
paths = ['./log_folder']
tasks = [watch_fs(path) for path in paths]
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(asyncio.wait(tasks))
finally:
loop.close()
print("finished all monitoring tasks! time %.5f" % float(time.time()-start))
| 2.890625
| 3
|
Assignment_6/src/LRModel.py
|
JohanneBW/cds_language_assignments
| 0
|
12783688
|
<reponame>JohanneBW/cds_language_assignments
#!/usr/bin/env python
"""
---------- Import libraries ----------
"""
# system tools
import os
import sys
sys.path.append(os.path.join(".."))
# pandas, numpy, gensim
import pandas as pd
import numpy as np
import gensim.downloader
# import classifier utility functions
import utils.classifier_utils as clf
# Machine learning stuff
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import ShuffleSplit
from sklearn import metrics
from sklearn.metrics import classification_report
# tools from tensorflow
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (Dense, Embedding,
Flatten, GlobalMaxPool1D, Conv1D)
from tensorflow.keras.optimizers import SGD, Adam
#from tensorflow.keras import backend as K
from tensorflow.keras.utils import plot_model
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.regularizers import L2
# matplotlib
import matplotlib.pyplot as plt
"""
---------- Functions ----------
"""
def plot_history(H, epochs):
"""
Utility function for plotting model history using matplotlib
H: model history
epochs: number of epochs for which the model was trained
"""
plt.style.use("fivethirtyeight")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.tight_layout()
plt.show()
fig.savefig("../output/LR_performance.png")
def create_embedding_matrix(filepath, word_index, embedding_dim):
"""
A helper function to read in saved GloVe embeddings and create an embedding matrix
filepath: path to GloVe embedding
word_index: indices from keras Tokenizer
embedding_dim: dimensions of keras embedding layer
"""
vocab_size = len(word_index) + 1 # Adding again 1 because of reserved 0 index
embedding_matrix = np.zeros((vocab_size, embedding_dim))
with open(filepath) as f:
for line in f:
word, *vector = line.split()
if word in word_index:
idx = word_index[word]
embedding_matrix[idx] = np.array(
vector, dtype=np.float32)[:embedding_dim]
return embedding_matrix
"""
---------- Main function ----------
"""
def main():
"""
---------- Read data ----------
"""
# Read the data into a pandas data frame
filepath = os.path.join("..", "data", "Game_of_Thrones_Script.csv")
df = pd.read_csv(filepath)
# Make a df with the two columns: season and sentence from the original data set
df = df[["Season", "Sentence"]]
sentence = df['Sentence'].values
season = df['Season'].values
"""
---------- Train, split and vectorize data ----------
"""
# Train and test split using sklearn
X_train, X_test, y_train, y_test = train_test_split(sentence,
season,
test_size=0.25,
random_state=42)
# Vectorize using sklearn
vectorizer = CountVectorizer()
# First we do it for our training data...
X_train_feats = vectorizer.fit_transform(X_train)
#... then we do it for our test data
X_test_feats = vectorizer.transform(X_test)
# We can also create a list of the feature names.
feature_names = vectorizer.get_feature_names()
"""
---------- Logistic regression classifier ----------
"""
# Logistic regression classifier
classifier = LogisticRegression(random_state=42).fit(X_train_feats, y_train)
# Define the y_predict
y_pred = classifier.predict(X_test_feats)
# Evaluate
classifier_metrics = metrics.classification_report(y_test, y_pred)
print(classifier_metrics)
# Plot the data
clf.plot_cm(y_test, y_pred, normalized=True)
# Vectorize full dataset
X_vect = vectorizer.fit_transform(sentence)
# Initialise cross-validation method
title = "Learning Curves (Logistic Regression)"
cv = ShuffleSplit(n_splits=100, test_size=0.2, random_state=0)
# Run on data
model = LogisticRegression(random_state=42)
# Plot the learning curve
clf.plot_learning_curve(model, title, X_vect, season, cv=cv, n_jobs=4)
# Save image in output folder
plt.savefig("../output/LR_CrossValidation.png")
#Define behaviour when called from command line
if __name__ == "__main__":
main()
| 2.34375
| 2
|
5 - synthetic-data-applications/time-series/missing-values-imputation/factors.py
|
ydataai/Blog
| 0
|
12783689
|
import json
def save_json(data, fpath):
"Stores data as a JSON in the provided filepath."
with open(fpath, 'w') as f:
json.dump(data, f)
| 2.921875
| 3
|
hs_core/tests/api/native/test_hs_requests.py
|
tommac7/hydroshare
| 178
|
12783690
|
<gh_stars>100-1000
from django.test import TestCase
from hs_core.hydroshare import hs_requests
from django.conf import settings
class TestRewrite(TestCase):
""" Test local rewriting that bypasses firewalls and hits local nginx server """
def setUp(self):
self.prod_fqdn = getattr(settings, "PROD_FQDN_OR_IP", "www.hydroshare.org")
self.fqdn = getattr(settings, "FQDN_OR_IP", "www.hydroshare.org")
self.nginx_ip = hs_requests.get_nginx_ip()
def test_localize_outer(self):
""" rewrite requests to outer host"""
self.assertEqual(hs_requests.localize_url("https://{}/foo/bar/".format(self.fqdn)),
"https://{}/foo/bar/".format(self.nginx_ip))
self.assertEqual(hs_requests.localize_url("http://{}/foo/bar/".format(self.fqdn)),
"http://{}/foo/bar/".format(self.nginx_ip))
def test_localize_www(self):
""" rewrite requests to production host"""
self.assertEqual(hs_requests.localize_url("https://{}/foo/bar/".format(self.prod_fqdn)),
"https://{}/foo/bar/".format(self.nginx_ip))
self.assertEqual(hs_requests.localize_url("http://{}/foo/bar/".format(self.prod_fqdn)),
"http://{}/foo/bar/".format(self.nginx_ip))
def test_do_not_localize_others(self):
""" don't rewrite other host addresses """
self.assertEqual(hs_requests.localize_url("https://{}/foo/bar/".format("www.foo.com")),
"https://{}/foo/bar/".format("www.foo.com"))
self.assertEqual(hs_requests.localize_url("http://{}/foo/bar/".format("www.foo.com")),
"http://{}/foo/bar/".format("www.foo.com"))
| 2.390625
| 2
|
mooringlicensing/migrations/0204_stickerprintingresponse_email_from.py
|
jawaidm/mooringlicensing
| 0
|
12783691
|
<filename>mooringlicensing/migrations/0204_stickerprintingresponse_email_from.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-08-05 04:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mooringlicensing', '0203_stickerprintingresponse_email_date'),
]
operations = [
migrations.AddField(
model_name='stickerprintingresponse',
name='email_from',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| 1.445313
| 1
|
SparkDemo/src/main/py/gen_wide.py
|
naah69/demo
| 16
|
12783692
|
<reponame>naah69/demo
#!/bin/python
# Copyright (c) 2021 <NAME> Technology Co., Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import random
import time
def genRand(s = 10000):
return random.randint(1,s)
def getLine(cols = 10):
tpl = "%s\t"
line = ""
for x in range(int(cols) -1):
line = line + tpl % genRand(x + 10)
line = line + str(genRand(int(cols) + 10))
return line
def getTable(lines = 10, cols = 10):
tpl = "%s\n"
table = ""
for x in range(int(lines) ):
table = table + tpl % getLine(cols)
return table.strip()
def main():
lines = sys.argv[1]
cols = sys.argv[2]
data = getTable(lines, cols)
print(data)
# f = file(fname, 'wr+')
# f.write(data)
# f.flush()
# f.close()
if __name__ == '__main__':
main()
| 3.125
| 3
|
packages/mcvine/tests/mcvine/run_script/test_instr_with_detsys.py
|
mcvine/mcvine
| 5
|
12783693
|
import os
import mcvine, mcvine.components
instrument = mcvine.instrument()
# add source
source = mcvine.components.sources.Source_simple('source')
instrument.append(source, position=(0,0,0))
# add sample
sample = mcvine.components.samples.V_sample('sample')
instrument.append(sample, position=(0,0,1))
# add detector system
from mcvine import resources
arcsxml = os.path.join(
resources.instrument('ARCS'), 'detsys', 'ARCS.xml.fornxs')
ds = mcvine.components.detectors.DetectorSystemFromXml('ds', instrumentxml=arcsxml, outfilename='events.dat')
instrument.append(ds, position=(0,0,1))
| 2
| 2
|
AdvancedConsole.py
|
jack-the-hack/AdvancedConsole
| 0
|
12783694
|
<filename>AdvancedConsole.py
import os
import keyboard
import platform
if(not platform.system() == "Windows"):
quit("Incompatible system, only compatible with windows")
os.system("@echo off")
colordict = {"black":"0","gray":"8","blue":"1","L_blue":"9","green":"2","L_green":"A","aqua":"3","L_aqua":"B","red":"4","L_red":"C","purple":"5","L_purple":"D","yellow":"6","L_yellow":"E","white":"7","B_white":"F"}
def fs():
keyboard.press_and_release('alt+enter')
def efs():
keyboard.press_and_release('alt+enter')
def addline(txt):
os.system("echo "+txt)
def clear():
os.system("cls")
def color(colorfg,colorbg):
try:
colorfgcode=colordict[colorfg]
except:
quit("unknown color {} ):".format(colorfg))
try:
colorbgcode = colordict[colorbg]
except:
quit("Error:unknown color {}".format(colorbg))
os.system("color {}{}".format(colorbgcode,colorfgcode))
| 3.171875
| 3
|
hardware/ip/nt_recv_capture/test/nt_recv_capture_top_test.py
|
aoeldemann/fluent10g
| 20
|
12783695
|
"""Test bench for the Verilog module 'nt_recv_capture_top'."""
# The MIT License
#
# Copyright (c) 2017-2019 by the author(s)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Author(s):
# - <NAME> <<EMAIL>>
#
# Description:
#
# Test bench for the Verilog module 'nt_recv_capture_top'.
import cocotb
from lib.tb import clk_gen, rstn, wait_n_cycles, swp_byte_order
from lib.mem import Mem
from lib.axilite import AXI_Lite_Reader, AXI_Lite_Writer
from lib.axis import AXIS_Writer
from lib.net import gen_packet, packet_to_axis_data, axis_data_to_packet
import random
from nt_recv_capture_cpuregs_defines import *
# clock frquency in MHz
CLK_FREQ_MHZ = 200
# AXI data width
AXI_BIT_WIDTH = 512
# AXI lite data width
AXI_CTRL_BIT_WIDTH = 32
# AXI stream data width
AXIS_BIT_WIDTH = 64
# maximum byte size of a memory read
RD_TRANSFER_SIZE_MAX = 16384
# ring buffer size in bytes
RING_BUFF_SIZES = [32768, 65536, 131072, 262144]
# offset in memory where ring buffer shall be located
RING_BUFF_ADDRS = [0, 2**32-10*(AXI_BIT_WIDTH/8)]
# different capture lengths that shall be tested
MAX_CAPTURE_LENS = [0, 1514, random.randint(1, 1513)]
# number of packets, latency timestamps and inter-packet times to generate
N_PACKETS = 1331
@cocotb.coroutine
def packets_write(dut, axis_writer, axilite_writer, axilite_reader, pkts,
latencies, inter_packet_times):
"""Apply packets on DuT input."""
# start the module
yield axilite_writer.write(CPUREG_OFFSET_CTRL_ACTIVE, 0x1)
# wait a little bit
yield wait_n_cycles(dut.clk, 10)
# iterate over all packets
for i, pkt in enumerate(pkts):
# convert packet to AXI4-Stream data
(tdata, tkeep) = packet_to_axis_data(pkt, AXIS_BIT_WIDTH)
# include latency and inter-packet time in last TUSER word
tuser = len(tdata) * [0]
tuser[-1] = latencies[i] | (1 << 24) | (inter_packet_times[i] << 25)
# write data
yield axis_writer.write(tdata, tkeep, tuser)
# wait random number of cycles before applying the next packet
yield wait_n_cycles(dut.clk, random.randint(0, 10))
# stop the module
yield axilite_writer.write(CPUREG_OFFSET_CTRL_ACTIVE, 0x0)
def check_data(pkts_ref, latencies_ref, inter_packet_times_ref, data,
max_len_capture):
"""Check the received data for correctness.
The function ensures that the data read from the ring buffer (a list of
512 bit data words) matches the expected meta data (timestamps, wire +
capture length) and packet data.
"""
# data word index
i_data = 0
# iterate over all packets
for i_pkt, pkt_ref in enumerate(pkts_ref):
# determinal actual capture length
len_capture = min(len(pkt_ref), max_len_capture)
# data is captured at the granularity of 8 byte words. how many 8 byte
# words do we have?
if len_capture % 8 == 0:
len_capture_words = len_capture / 8
else:
len_capture_words = len_capture / 8 + 1
# initialize empty packet data list
packet_data = []
# iterate over captured data words (8 byte each)
for i in range(len_capture_words+1):
# get data word and increment data word index
d = data[i_data]
i_data += 1
# swap byte order
d = swp_byte_order(d, AXIS_BIT_WIDTH/8)
if i == 0:
# this is meta data
meta_latency = d & 0xFFFFFF
meta_latency_valid = (d >> 24) & 0x1
meta_interpackettime = (d >> 25) & 0xFFFFFFF
meta_len_wire = (d >> 53) & 0x7FF
# make sure the latency is marked valid
if meta_latency_valid != 0x1:
raise cocotb.result.TestFailure(("Packet #%d: " +
"Latency value not " +
"valid") % i)
# make sure latency matches reference value
if latencies_ref[i_pkt] != meta_latency:
raise cocotb.result.TestFailure(("Packet #%d: " +
"incorrect latency") %
i_pkt)
# make sure inter-packet time matches reference value
if inter_packet_times_ref[i_pkt] != meta_interpackettime:
raise cocotb.result.TestFailure(("Packet #%d: " +
"incorrect inter-" +
"packet time") % i_pkt)
# make sure wire length matches packet length
if len(pkt_ref) != meta_len_wire:
raise cocotb.result.TestFailure(("Packet #%d: " +
"invalid wire " +
"length") % i_pkt)
else:
# this is packet data
packet_data.append(d)
# create packet from captured data
if len_capture % 8 == 0:
pkt = axis_data_to_packet(packet_data, 2**8-1, 64)
else:
pkt = axis_data_to_packet(packet_data,
2**(len_capture % 8)-1, 64)
# make sure packet data matches the exepcted packet data
if str(pkt)[0:len_capture] != \
str(pkt_ref)[0:len_capture]:
raise cocotb.result.TestFailure(("Packet #%d: " +
"invalid data") % i_pkt)
@cocotb.coroutine
def ring_buff_read(dut, axilite_writer, axilite_reader, ring_buff,
ring_buff_addr, max_len_capture, pkts_ref, latencies_ref,
inter_packet_times_ref):
"""Read data from the ring buffer and check it for correctness.
The coroutines monitors the ring buffer write pointer and reads data from
the buffer if sufficient data is available. It ensures that the read data
matches the expected one.
"""
# get ring buffer size
ring_buff_size = ring_buff.size()
# ring buffer must be larger than 16384 bytes
if ring_buff_size <= 16384:
raise cocotb.result.TestFailure("ring buffer size too small")
# ring buffer size must be a multiple of 16384 bytes
if ring_buff_size % 16384 != 0:
raise cocotb.result.TestFailure("ring buffer size invalid")
# transfer size must be smaller than ring buffer
if RD_TRANSFER_SIZE_MAX >= ring_buff_size:
raise cocotb.result.TestFailure("transfer size too large")
# determine the number of bytes that we are expecting to read in total
size_outstanding = 0
# iterate over packets
for pkt in pkts_ref:
# for each packet we need to read 8 byte of meta information
size_outstanding += 8
# determine data capture length
len_capture = min(len(pkt), max_len_capture)
# data is captured at the granularity of 8 byte words
if len_capture % 8 == 0:
size_outstanding += len_capture
else:
size_outstanding += 8 * (len_capture/8 + 1)
# total capture data is 64 byte aligned
if size_outstanding % 64 != 0:
size_outstanding = 64 * (size_outstanding/64 + 1)
# read pointer has been reset and currently is zero
rd = 0
data = []
while True:
# number of outstanding bytes that still need to be read must never be
# negative
assert size_outstanding >= 0
# abort if there is no more data to be read
if size_outstanding == 0:
break
# read error register
errs = yield axilite_reader.read(CPUREG_OFFSET_STATUS_ERRS)
# make sure there was no error
assert errs == 0x0
# get the write pointer
wr = yield axilite_reader.read(CPUREG_OFFSET_CTRL_ADDR_WR)
# get memory size from current read pointer position until the end of
# the ring buffer memory location
ring_buff_size_end = ring_buff_size - rd
# calculate the desired memory transfer size
transfer_size = min(ring_buff_size_end,
min(size_outstanding, RD_TRANSFER_SIZE_MAX))
# calculated memory transfer size must always be positive
assert transfer_size > 0
# ... and it must always be a multiple of 64 bytes
assert transfer_size % 64 == 0
if rd == wr:
# ring buffer is empty -> nothing to transfer
do_transfer = False
elif rd < wr:
# we can read if the difference between both pointers is at least
# the desired transfer size
do_transfer = (wr - rd) >= transfer_size
elif wr < rd:
# we can read until the end of the ring buffer
do_transfer = True
if not do_transfer:
# no data transfer shall take place now, do nothing
continue
# read data from the ring buffer
data_ring_buff = ring_buff.read(ring_buff_addr + rd, transfer_size)
# write data to list in 8 byte words
for i in range(transfer_size/8):
d = data_ring_buff >> ((transfer_size/8 - i - 1)*64) & 2**64-1
data.append(d)
# update read pointer
if (rd + transfer_size) == ring_buff_size:
# end of memory reached, wrap around
rd = 0
else:
assert (rd + transfer_size) < ring_buff_size
rd = rd + transfer_size
# write read pointer to DuT
yield axilite_writer.write(CPUREG_OFFSET_CTRL_ADDR_RD, rd)
# decrement number of bytes that still remain to be written to memory
size_outstanding -= transfer_size
# wait a little bit
yield wait_n_cycles(dut.clk, 100)
# check data for correctness
check_data(pkts_ref, latencies_ref, inter_packet_times_ref, data,
max_len_capture)
@cocotb.test()
def nt_recv_capture_top_test(dut):
"""Test bench main function."""
# start the clock
cocotb.fork(clk_gen(dut.clk, CLK_FREQ_MHZ))
# no software reset
dut.rst_sw <= 0
# reset DuT
yield rstn(dut.clk, dut.rstn)
# create AXI4-Lite writer, connect and reset it
axilite_writer = AXI_Lite_Writer()
axilite_writer.connect(dut, dut.clk, AXI_CTRL_BIT_WIDTH, "ctrl")
yield axilite_writer.rst()
# create AXI4-Lite reader, connect and reset it
axilite_reader = AXI_Lite_Reader()
axilite_reader.connect(dut, dut.clk, AXI_CTRL_BIT_WIDTH, "ctrl")
yield axilite_reader.rst()
# create AXI4-Stream writer, connect and reset it
axis_writer = AXIS_Writer()
axis_writer.connect(dut, dut.clk, AXIS_BIT_WIDTH)
yield axis_writer.rst()
# create a ring buffer memory (initially of size 0) and connect it to the
# DuT
ring_buff = Mem(0)
ring_buff.connect(dut, "ddr3")
# generate a couple of random Ethernet packets. For each packet, generate
# a 16 bit latency value and a 26 bit inter-packet time value
pkts = []
latencies = []
inter_packet_times = []
for _ in range(N_PACKETS):
pkts.append(gen_packet())
latencies.append(random.randint(0, 2**24-1))
inter_packet_times.append(random.randint(0, 2**28-1))
# start the ring buffer memory main routine
cocotb.fork(ring_buff.main())
# wait some more clock cycles
yield wait_n_cycles(dut.clk, 5)
# iterate over all ring buffer sizes
for i, ring_buff_size in enumerate(RING_BUFF_SIZES):
# set ring buffer size
ring_buff.set_size(ring_buff_size)
# iterate over all adderesses where ring buffer shall be located in
# memory
for j, ring_buff_addr in enumerate(RING_BUFF_ADDRS):
# print status
print("Test %d/%d (this will take a while)" %
(i*len(RING_BUFF_ADDRS) + j + 1,
len(RING_BUFF_ADDRS) * len(RING_BUFF_SIZES)))
# we have a total of 8 GByte of memory. Make sure the ring buffer
# fits at the desired address
if ring_buff_addr + ring_buff_size > 0x1FFFFFFFF:
raise cocotb.result.TestFailure("ring buffer is too large")
# to reduce the simulation memory footprint, provide the memory
# module the first memory address that we actually care about
ring_buff.set_offset(ring_buff_addr)
# write ring buffer memory location and address range
yield axilite_writer.write(CPUREG_OFFSET_CTRL_MEM_ADDR_HI,
ring_buff_addr >> 32)
yield axilite_writer.write(CPUREG_OFFSET_CTRL_MEM_ADDR_LO,
ring_buff_addr & 0xFFFFFFFF)
yield axilite_writer.write(CPUREG_OFFSET_CTRL_MEM_RANGE,
ring_buff_size - 1)
# itererate over all capture lengths
for max_len_capture in MAX_CAPTURE_LENS:
# reset read address pointer
yield axilite_writer.write(CPUREG_OFFSET_CTRL_ADDR_RD, 0x0)
# set max capture length
yield axilite_writer.write(CPUREG_OFFSET_CTRL_MAX_LEN_CAPTURE,
max_len_capture)
# start couroutine that applies packets at input
cocotb.fork(packets_write(dut, axis_writer, axilite_writer,
axilite_reader, pkts, latencies,
inter_packet_times))
# wait a bit
yield wait_n_cycles(dut.clk, 50)
# start the ring buffer read coroutine and wait until it
# completes
yield ring_buff_read(dut, axilite_writer, axilite_reader,
ring_buff, ring_buff_addr,
max_len_capture, pkts, latencies,
inter_packet_times)
# make sure no error occured
errs = yield axilite_reader.read(CPUREG_OFFSET_STATUS_ERRS)
assert errs == 0x0
# make sure packet count is correct
pkt_cnt = \
yield axilite_reader.read(CPUREG_OFFSET_STATUS_PKT_CNT)
assert pkt_cnt == len(pkts)
# make sure module is deactivated now
active = yield axilite_reader.read(CPUREG_OFFSET_STATUS_ACTIVE)
assert active == 0
# clear the ring buffer contents
ring_buff.clear()
| 1.320313
| 1
|
i3pystatus/clock.py
|
fkusei/i3pystatus
| 413
|
12783696
|
import errno
import os
import locale
from datetime import datetime
try:
import pytz
HAS_PYTZ = True
except ImportError:
HAS_PYTZ = False
from i3pystatus import IntervalModule
class Clock(IntervalModule):
"""
This class shows a clock.
.. note:: Optionally requires `pytz` for time zone data when using time
zones other than local time.
Format can be passed in four different ways:
- single string, no timezone, just the strftime-format
- one two-tuple, first is the format, second the timezone
- list of strings - no timezones
- list of two tuples, first is the format, second is timezone
Use mousewheel to cycle between formats.
For complete time format specification see:
::
man strftime
All available timezones are located in directory:
::
/usr/share/zoneinfo/
.. rubric:: Format examples
::
# one format, local timezone
format = '%a %b %-d %b %X'
# multiple formats, local timezone
format = [ '%a %b %-d %b %X', '%X' ]
# one format, specified timezone
format = ('%a %b %-d %b %X', 'Europe/Bratislava')
# multiple formats, specified timezones
format = [ ('%a %b %-d %b %X', 'America/New_York'), ('%X', 'Etc/GMT+9') ]
"""
settings = (
("format", "`None` means to use the default, locale-dependent format."),
("color", "RGB hexadecimal code color specifier, default to #ffffff"),
)
format = None
color = "#ffffff"
interval = 1
on_upscroll = ["scroll_format", 1]
on_downscroll = ["scroll_format", -1]
def init(self):
env_lang = os.environ.get('LC_TIME', None)
if env_lang is None:
env_lang = os.environ.get('LANG', None)
if env_lang is not None:
if env_lang.find('.') != -1:
lang = tuple(env_lang.split('.', 1))
else:
lang = (env_lang, None)
else:
lang = (None, None)
if lang != locale.getlocale(locale.LC_TIME):
# affects language of *.strftime() in whole program
locale.setlocale(locale.LC_TIME, lang)
if self.format is None:
if lang[0] == 'en_US':
# MDY format - United States of America
self.format = ["%a %b %-d %X"]
else:
# DMY format - almost all other countries
self.format = ["%a %-d %b %X"]
elif isinstance(self.format, str) or isinstance(self.format, tuple):
self.format = [self.format]
self.system_tz = self._get_system_tz()
self.format = [self._expand_format(fmt) for fmt in self.format]
self.current_format_id = 0
def _expand_format(self, fmt):
if isinstance(fmt, tuple):
if len(fmt) == 1:
return (fmt[0], None)
else:
if not HAS_PYTZ:
raise RuntimeError("Need `pytz` for timezone data")
return (fmt[0], pytz.timezone(fmt[1]))
return (fmt, self.system_tz)
def _get_system_tz(self):
'''
Get the system timezone for use when no timezone is explicitly provided
Requires pytz, if not available then no timezone will be set when not
explicitly provided.
'''
if not HAS_PYTZ:
return None
def _etc_localtime():
try:
with open('/etc/localtime', 'rb') as fp:
return pytz.tzfile.build_tzinfo('system', fp)
except OSError as exc:
if exc.errno != errno.ENOENT:
self.logger.error(
'Unable to read from /etc/localtime: %s', exc.strerror
)
except pytz.UnknownTimeZoneError:
self.logger.error(
'/etc/localtime contains unrecognized tzinfo'
)
return None
def _etc_timezone():
try:
with open('/etc/timezone', 'r') as fp:
tzname = fp.read().strip()
return pytz.timezone(tzname)
except OSError as exc:
if exc.errno != errno.ENOENT:
self.logger.error(
'Unable to read from /etc/localtime: %s', exc.strerror
)
except pytz.UnknownTimeZoneError:
self.logger.error(
'/etc/timezone contains unrecognized timezone \'%s\'',
tzname
)
return None
return _etc_localtime() or _etc_timezone()
def run(self):
time = datetime.now(self.format[self.current_format_id][1])
self.output = {
"full_text": time.strftime(self.format[self.current_format_id][0]),
"color": self.color,
"urgent": False,
}
def scroll_format(self, step=1):
self.current_format_id = (self.current_format_id + step) % len(self.format)
| 3.03125
| 3
|
emka_trans/accounts/tests/tests_view.py
|
mmamica/ERP-system
| 0
|
12783697
|
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from django.test import LiveServerTestCase
from django.test import TestCase
from django.urls import reverse
from accounts.models import UserProfileInfo, User
from admin_app.models import Magazine, Truck
from accounts.forms import UserForm, UserProfileInfoForm
from django.test import Client
from django.contrib.auth.hashers import check_password
from random_word import RandomWords
class RegistrationTestCase(TestCase):
def setUp(self):
self.user1 =User.objects.create(username="user1",first_name="Name1",last_name="Last1",
email="<EMAIL>",password='<PASSWORD>')
self.user1_info= UserProfileInfo.objects.create(user=self.user1,company_name="company 1",phone_number="123456789",longitude=50.064824,
latitude=19.923944,is_client=True)
self.magazine=Magazine.objects.create(longitude=20.262038, latitude=49.819856, radius=50)
self.truck1=Truck.objects.create(id_truck=1,capacity=100, return_date='2018-12-25',start_longitude=20.031655 , start_latitude=49.47704,
end_longitude=19.964476, end_latitude=50.088287)
class RegisterViewTest(RegistrationTestCase):
def test_unique_username(self):
response=self.client.post(reverse('accounts:register_user'),data={'username':'user1','first_name':'Test1',
'last_name':'Test1','email':'<EMAIL>','password':'<PASSWORD>',
'company_name':'TestFirma','city':'Kraków','street':'Floriańska','house_number':27})
self.assertEqual(response.status_code, 200)
self.failUnless(response.context['user_form'])
self.assertFormError(response, 'user_form', field='username',
errors='A user with that username already exists.')
def test_too_long_distance(self):
response = self.client.post(reverse('accounts:register_user'), data={'username': 'test1', 'first_name': 'Test1',
'last_name': 'Test1', 'email': '<EMAIL>',
'password': '<PASSWORD>', 'company_name':'TestFirma',
'city': 'Krzeszowice', 'street': 'Krakowska',
'house_number': 30})
self.assertEqual(response.status_code,200)
self.failUnless(response.context['profile_form'])
self.failUnless(response.context['profile_form'].errors)
def test_success(self):
response = self.client.post(reverse('accounts:register_user'), data={'username': 'test1', 'first_name': 'Test1',
'last_name': 'Test1', 'email': '<EMAIL>',
'password': '<PASSWORD>', 'company_name':'TestFirma',
'city': 'Kraków', 'street': 'Adama Mickiewicza',
'house_number': 30})
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/register_user.html')
self.assertEqual(User.objects.count(), 2)
self.assertEqual(UserProfileInfo.objects.count(),2)
def test_get_success(self):
response=self.client.get(reverse('accounts:register_user'))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response,
'accounts/register_user.html')
self.failUnless(isinstance(response.context['user_form'],
UserForm))
self.failUnless(isinstance(response.context['profile_form'],
UserProfileInfoForm))
def test_coordinates_calcualtion(self):
response=self.client.post(reverse('accounts:register_user'),data={'username': 'test1', 'first_name': 'Test1',
'last_name': 'Test1', 'email': '<EMAIL>',
'password': '<PASSWORD>', 'company_name':'TestFirma',
'city': 'Kraków', 'street': 'Adama Mickiewicza',
'house_number': 30})
created_user=User.objects.get(username='test1')
created_profile=UserProfileInfo.objects.get(user=created_user)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/register_user.html')
self.assertEqual(User.objects.count(), 2)
self.assertEqual(UserProfileInfo.objects.count(), 2)
self.assertEqual(19.92385,created_profile.longitude)
self.assertEqual(50.06445,created_profile.latitude)
def test_cluster_calculation(self):
response = self.client.post(reverse('accounts:register_user'), data={'username': 'test1', 'first_name': 'Test1',
'last_name': 'Test1', 'email': '<EMAIL>',
'password': '<PASSWORD>',
'company_name': 'TestFirma',
'city': 'Myślenice',
'street': '3 Maja',
'house_number': 20})
created_user = User.objects.get(username='test1')
created_profile = UserProfileInfo.objects.get(user=created_user)
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'accounts/register_user.html')
self.assertEqual(self.truck1.id_truck, created_profile.id_cluster.id_truck)
class ChangePasswordViewTest(TestCase):
def setUp(self):
self.user1 = User.objects.create_user(username="user1", first_name="Name1", last_name="Last1",
email="<EMAIL>", password='<PASSWORD>')
self.user1_info = UserProfileInfo.objects.create(user=self.user1, company_name="company 1", phone_number="123456789",
longitude=50.064824,
latitude=19.923944, is_client=True)
self.c = Client()
def test_password_change(self):
login = self.c.login(username='user1', password='<PASSWORD>')
response = self.c.post(reverse('accounts:change_password'), data={'old_password':'<PASSWORD>',
'new_password1':'<PASSWORD>',
'new_password2':'<PASSWORD>'})
self.assertEqual(login,True)
self.assertRedirects(response, reverse('accounts:edit_my_profile'), status_code=302)
self.user1.refresh_from_db()
self.assertTrue(check_password('<PASSWORD>', self.user1.password))
class MyProfileViewTest(TestCase):
def test_get(self):
user1 = User.objects.create_user(username="user1", first_name="Name1", last_name="Last1",
email="<EMAIL>", password='<PASSWORD>')
user1_info = UserProfileInfo.objects.create(user=user1, company_name="company 1", phone_number="123456789",
longitude=50.064824,
latitude=19.923944, is_client=True)
c = Client()
login = c.login(username='user1', password='<PASSWORD>')
response=c.get(reverse('accounts:my_profile'))
self.assertEqual(response.status_code, 200)
self.assertTrue(login)
self.assertEqual(response.context['user'],user1)
self.assertEqual(response.context['user_profile'],user1_info)
self.assertEqual(response.context['user'].first_name,"Name1")
self.assertEqual(response.context['user_profile'].company_name,"company 1")
class AuthViewTest(TestCase):
def setUp(self):
self.user1 = User.objects.create_user(username="user1", first_name="Name1", last_name="Last1",
email="<EMAIL>", password='<PASSWORD>')
self.user1_info = UserProfileInfo.objects.create(user=self.user1, company_name="company 1", phone_number="123456789",
longitude=50.064824, latitude=19.923944, is_client=True)
self.c = Client()
def test_login_success(self):
response = self.c.get(reverse('accounts:user_login'))
self.assertEquals(response.status_code, 200)
response=self.c.post(reverse('accounts:user_login'), data={'username':'user1','password':'<PASSWORD>'})
self.assertIn('_auth_user_id', self.c.session)
self.assertRedirects(response,reverse('index'))
def test_login_fail(self):
response = self.c.get(reverse('accounts:user_login'))
self.assertEquals(response.status_code, 200)
response = self.c.post(reverse('accounts:user_login'), data={'username': 'user1', 'password': '<PASSWORD>'})
self.assertFormError(response, 'form',field=None,
errors='Błąd logowania! Spróbuj ponownie')
def test_logout(self):
login=self.c.login(username='user1',password='<PASSWORD>')
self.assertTrue(login)
response = self.c.get(reverse('logout'))
self.assertEquals(response.status_code, 302)
self.assertRedirects(response,reverse('index'))
self.assertNotIn('_auth_user_id', self.c.session)
class ShowProfileTestView(TestCase):
def setUp(self):
self.user1 = User.objects.create_user(username="user1", first_name="Name1", last_name="Last1",
email="<EMAIL>", password='<PASSWORD>')
self.user1_info = UserProfileInfo.objects.create(user=self.user1, company_name="company 1", phone_number="123456789",
longitude=50.064824, latitude=19.923944, is_client=True)
self.c = Client()
def test_show_profile(self):
response=self.c.get(reverse("accounts:show_profile", kwargs={'username': 'user1'}))
self.assertEqual(response.status_code,200)
self.assertEqual(response.context['user'], self.user1)
self.assertEqual(response.context['user_profile'], self.user1_info)
# views (uses selenium)
class TestRegister(LiveServerTestCase):
def setUp(self):
self.selenium = webdriver.Firefox()
super(TestRegister, self).setUp()
self.randomUsernameClient = RandomWords().get_random_word()
self.randomUsernameDriver = RandomWords().get_random_word()
def tearDown(self):
self.selenium.quit()
super(TestRegister, self).tearDown()
def test_register_deliever_success(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/accounts/register_user/')
selenium.find_element_by_id('id_username').send_keys(self.randomUsernameDriver)
selenium.find_element_by_id('id_first_name').send_keys('testtest')
selenium.find_element_by_id('id_last_name').send_keys('test')
selenium.find_element_by_id('id_email').send_keys('<EMAIL>')
selenium.find_element_by_id('id_password').send_keys('<PASSWORD>')
selenium.find_element_by_id('id_company_name').send_keys('tmp')
selenium.find_element_by_id('id_phone_number').send_keys('123456789')
selenium.find_element_by_id('city').send_keys('Krakow')
selenium.find_element_by_id('street').send_keys('al.Mickiewicza')
selenium.find_element_by_id('house_number').send_keys('1')
selenium.find_element_by_id('id_is_client')
selenium.find_element_by_name('register').click()
selenium.implicitly_wait(40)
assert 'You have registered successfully' in selenium.page_source
def test_register_client_success(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/accounts/register_user/')
selenium.find_element_by_id('id_username').send_keys(
self.randomUsernameClient)
selenium.find_element_by_id('id_first_name').send_keys('test<PASSWORD>')
selenium.find_element_by_id('id_last_name').send_keys('test')
selenium.find_element_by_id('id_email').send_keys('<EMAIL>')
selenium.find_element_by_id('id_password').send_keys('<PASSWORD>')
selenium.find_element_by_id('id_company_name').send_keys('tmp')
selenium.find_element_by_id('id_phone_number').send_keys('123456789')
selenium.find_element_by_id('city').send_keys('Krakow')
selenium.find_element_by_id('street').send_keys('al.Mickiewicza')
selenium.find_element_by_id('house_number').send_keys('1')
selenium.find_element_by_id('id_is_client').click()
selenium.find_element_by_name('register').click()
selenium.implicitly_wait(20)
assert 'You have registered successfully' in selenium.page_source
class TestLogin(LiveServerTestCase):
def setUp(self):
self.selenium = webdriver.Firefox()
super(TestLogin, self).setUp()
def tearDown(self):
self.selenium.quit()
super(TestLogin, self).tearDown()
def test_login_success(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/accounts/user_login/')
selenium.find_element_by_name('username').send_keys('testClient')
selenium.find_element_by_name('password').send_keys('<PASSWORD>')
selenium.find_element_by_name('login').click()
selenium.implicitly_wait(20)
assert 'LOGOUT' in selenium.page_source
def test_login_wrong_password_error(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/accounts/user_login/')
selenium.find_element_by_name('username').send_keys('testtest')
selenium.find_element_by_name('password').send_keys('<PASSWORD>')
selenium.find_element_by_name('login').click()
selenium.implicitly_wait(20)
assert 'LOGIN' in selenium.page_source
def test_login_user_not_exists_error(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/accounts/user_login/')
selenium.find_element_by_name('username').send_keys(
RandomWords().get_random_word())
selenium.find_element_by_name('password').send_keys('<PASSWORD>')
selenium.find_element_by_name('login').click()
assert 'LOGIN' in selenium.page_source
class TestLogout(LiveServerTestCase):
def setUp(self):
self.selenium = webdriver.Firefox()
super(TestLogout, self).setUp()
self.selenium.get('http://127.0.0.1:8000/accounts/user_login/')
self.selenium.find_element_by_name('username').send_keys('testClient')
self.selenium.find_element_by_name('password').send_keys('<PASSWORD>')
self.selenium.find_element_by_name('login').click()
def tearDown(self):
self.selenium.quit()
super(TestLogout, self).tearDown()
def test_logout(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000')
self.selenium.find_element_by_name('logout_nav').click()
assert 'LOGIN' in selenium.page_source
class TestEditProfile(LiveServerTestCase):
def setUp(self):
self.selenium = webdriver.Firefox()
super(TestEditProfile, self).setUp()
self.selenium.get('http://127.0.0.1:8000/accounts/user_login/')
self.selenium.find_element_by_name('username').send_keys('Deliever')
self.selenium.find_element_by_name('password').send_keys('<PASSWORD>')
self.selenium.find_element_by_name('login').click()
def tearDown(self):
self.selenium.quit()
super(TestEditProfile, self).tearDown()
def test_edit_profile_info_success(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/accounts/profile/edit/')
selenium.find_element_by_id('id_first_name').send_keys('test<PASSWORD>')
selenium.find_element_by_id('id_last_name').send_keys('test')
selenium.find_element_by_id('id_company_name').send_keys('test')
selenium.find_element_by_id('id_phone_number').send_keys('123456789')
selenium.find_element_by_name('zapisz').click()
assert 'My profile' in selenium.page_source
class TestButtons(LiveServerTestCase):
def setUp(self):
self.selenium = webdriver.Firefox()
super(TestButtons, self).setUp()
def tearDown(self):
self.selenium.quit()
super(TestButtons, self).tearDown()
def test_index_button(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/')
selenium.find_element_by_name('index').click()
assert 'INDEX' in selenium.page_source
def test_admin_button(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/')
selenium.find_element_by_name('admin').click()
assert 'Django administration' in selenium.page_source
def test_login_button(self):
selenium = self.selenium
selenium.get('http://127.0.0.1:8000/')
selenium.find_element_by_name('login_nav').click()
assert 'Username:' in selenium.page_source
| 2.21875
| 2
|
my_university_api/application/api/mongodb/models.py
|
LittleBitProgrammer/myUniversity
| 1
|
12783698
|
from flask_restx import fields
from application.api.mongodb import *
freshman_model = mongodb.model('freshman', {
'matricola': fields.String
})
id_conversation_model = mongodb.model('id_conversation_model', {
'id_conversation': fields.String
})
send_message_model = mongodb.model('insert_message', {
'id_conversation': fields.String,
'matricola_mittente': fields.String,
'matricola_destinatario': fields.String,
'messaggio': fields.String
})
get_message_model = mongodb.model('get_messages', {
'matricola_mittente': fields.String,
'matricola_destinatario': fields.String,
'messaggio': fields.String,
'data_invio': fields.String
})
conversation_model = mongodb.model('conversation_model', {
'matricola1': fields.String,
'matricola2': fields.String
})
full_conversation_model = mongodb.model('full_conversation_model', {
'id_conversation': fields.String,
'matricola1': fields.String,
'matricola2': fields.String,
'messages': fields.List(fields.Nested(get_message_model))
})
discipline_color_model = mongodb.model('discipline_color_model', {
'codice_corso': fields.String,
'codice_disciplina': fields.String,
'colore_esadecimale': fields.String
})
| 2.265625
| 2
|
PiVideoCapture.py
|
ostev/argo
| 0
|
12783699
|
<filename>PiVideoCapture.py
from typing import Tuple
from picamera.array import PiRGBArray
from picamera import PiCamera
class PiVideoCapture(object):
def __init__(self, camera: PiCamera, size=(192, 108)):
self.camera = camera
self.rawCapture = PiRGBArray(self.camera, size=size)
def read(self, format="bgr"):
"""
Grab a `numpy` array of frame data from the camera
"""
self.camera.capture(self.rawCapture, format)
frame = self.rawCapture.array
self.rawCapture.truncate(0)
return frame
| 3.140625
| 3
|
faster_rcnn/rpn_gt_opr.py
|
tendence/faster_rcnn
| 1
|
12783700
|
<filename>faster_rcnn/rpn_gt_opr.py<gh_stars>1-10
#!/usr/bin/python3
# Copyright 2017, <NAME> <<EMAIL>>
import mxnet as mx
from .anchor_generator import generate_anchors, map_anchors
import numpy as np
from .utils import bbox_overlaps, bbox_transform
from .config import cfg
def rpn_gt_opr(reg_shape, label, ctx, img_h, img_w, return_anchors=False):
_fn, _fc, feature_height, feature_width = reg_shape
label_count = label.shape[1]
anchor_counts = _fc // 4
# only batch size=1 is supported
ref_anchors = generate_anchors(base_size=16, ratios=cfg.anchor_ratios, scales=cfg.anchor_scales)
anchors = map_anchors(ref_anchors, reg_shape, img_h, img_w, ctx)
anchors = anchors.reshape((1, -1, 4, feature_height, feature_width))
# anchors is [1, H, W, A, 4]
anchors = mx.nd.transpose(anchors, (0, 3, 4, 1, 2))
# anchors is [(1, H, W, A), 4]
anchors = anchors.reshape((-1, 4))
# So until now, anchors are N * 4, the order is [(H, W, A), 4]
overlaps = bbox_overlaps(anchors, label[:, :, :4].reshape((-1, 4)))
# Any gt has its own bbox, gt_assignment in [(1, H, W, A), #{gt}]
gt_assignment = mx.nd.argmax(overlaps, axis=0)
max_overlaps = mx.nd.max(overlaps, axis=1)
bbox_cls_gt = (max_overlaps >= cfg.iou_positive_thresh)
bbox_cls_gt = bbox_cls_gt - ((max_overlaps > cfg.iou_negative_thresh) * (max_overlaps < cfg.iou_positive_thresh))
bbox_assignment = mx.nd.argmax(overlaps, axis=1)
for i in gt_assignment.asnumpy():
bbox_cls_gt[int(i)] = 1
num_fg = int(cfg.rpn_fg_fraction * cfg.rpn_batchsize)
num_bg = cfg.rpn_batchsize - num_fg
bbox_cls_gt_np = bbox_cls_gt.asnumpy()
# sample fg if there are too many positive examples
if (np.sum(bbox_cls_gt_np == 1) > num_fg):
fg_inds = np.where(bbox_cls_gt_np==1)[0]
disable_inds = np.random.choice(fg_inds, size=(len(fg_inds) - num_fg), replace=False)
bbox_cls_gt_np[disable_inds] = -1
# sample bg if there are too many negative examples
if (np.sum(bbox_cls_gt_np == 0) > num_bg):
bg_inds = np.where(bbox_cls_gt_np==0)[0]
disable_inds = np.random.choice(bg_inds, size=(len(bg_inds) - num_fg), replace=False)
bbox_cls_gt_np[disable_inds] = -1
bbox_cls_gt = mx.nd.array(bbox_cls_gt_np, bbox_cls_gt.context)
bbox_cls_gt = bbox_cls_gt.reshape((1, feature_height, feature_width, anchor_counts))
bbox_cls_gt = mx.nd.transpose(bbox_cls_gt, (0, 3, 1, 2))
bbox_assignment = bbox_assignment.reshape((1, feature_height, feature_width, anchor_counts))
bbox_assignment = mx.nd.transpose(bbox_assignment, (0, 3, 1, 2))
# reshape acnhors to [1, A, H, W, 4]
anchors = anchors.reshape((1, feature_height, feature_width, anchor_counts, 4))
anchors = mx.nd.transpose(anchors, (0, 3, 1, 2, 4))
reg_label_extend = label[:,:,:4].reshape(
(1, 1, 1, 1, label_count, 4)).broadcast_to(
(1, anchor_counts, feature_height, feature_width, label_count, 4))
# Due to the mother-fucked MXNet slice operator, the operation to get regression target
# is picked as following.
bbox_reg_gt = mx.nd.concatenate(
[mx.nd.pick(reg_label_extend[0][:,:,:,:,0], bbox_assignment[0]).reshape((1, anchor_counts, feature_height, feature_width)),
mx.nd.pick(reg_label_extend[0][:,:,:,:,1], bbox_assignment[0]).reshape((1, anchor_counts, feature_height, feature_width)),
mx.nd.pick(reg_label_extend[0][:,:,:,:,2], bbox_assignment[0]).reshape((1, anchor_counts, feature_height, feature_width)),
mx.nd.pick(reg_label_extend[0][:,:,:,:,3], bbox_assignment[0]).reshape((1, anchor_counts, feature_height, feature_width))], axis=0)
bbox_reg_gt = mx.nd.transpose(bbox_reg_gt, (1, 2, 3, 0)).reshape((1, anchor_counts, feature_height, feature_width, 4))
bbox_reg_gt = bbox_transform(anchors.reshape((-1, 4)), bbox_reg_gt.reshape((-1, 4))).reshape((1, anchor_counts, feature_height, feature_width, 4))
if not return_anchors:
return bbox_cls_gt, bbox_reg_gt
else:
return bbox_cls_gt, bbox_reg_gt, anchors
| 1.6875
| 2
|
scripts/announcement.py
|
vishalbelsare/jina
| 15,179
|
12783701
|
import re
import sys
meetup_svg = '.github/images/meetup.svg'
readme_md = 'README.md'
conf_py = 'docs/conf.py'
def rm_announce():
# remove all announcement
with open(readme_md) as fp:
_old = fp.read()
_new = re.sub(
r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)',
rf'\g<1>\g<2>',
_old,
flags=re.DOTALL,
)
with open(readme_md, 'w') as fp:
fp.write(_new)
with open(conf_py) as fp:
_old = fp.read()
_new = re.sub(
r'(# start-announce\s*?\n).*(\n\s*?# end-announce)',
rf'\g<1>\g<2>',
_old,
flags=re.DOTALL,
)
with open(conf_py, 'w') as fp:
fp.write(_new)
if len(sys.argv) < 3:
rm_announce()
else:
text = sys.argv[1]
url = sys.argv[2]
if not text or not url:
rm_announce()
else:
announce_url = f'''
"announcement": \'\'\'
<a href="{url}">{text}</a>
\'\'\',
'''
meetup_svg_url = f'<a href="{url}"><img src="https://github.com/jina-ai/jina/blob/master/{meetup_svg}?raw=true"></a>'
# update meetup_svg
with open(meetup_svg) as fp:
_old = fp.read()
_new = re.sub(r'(<a href=").*(")', rf'\g<1>{url}\g<2>', _old)
_new = re.sub(
r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)',
rf'\g<1>{text}\g<2>',
_new,
flags=re.DOTALL,
)
with open(meetup_svg, 'w') as fp:
fp.write(_new)
# update readme_md
with open(readme_md) as fp:
_old = fp.read()
_new = re.sub(
r'(<!--startmsg-->\s*?\n).*(\n\s*?<!--endmsg-->)',
rf'\g<1>{meetup_svg_url}\g<2>',
_old,
flags=re.DOTALL,
)
with open(readme_md, 'w') as fp:
fp.write(_new)
# update conf
with open(conf_py) as fp:
_old = fp.read()
_new = re.sub(
r'(# start-announce\s*?\n).*(\n\s*?# end-announce)',
rf'\g<1>{announce_url}\g<2>',
_old,
flags=re.DOTALL,
)
with open(conf_py, 'w') as fp:
fp.write(_new)
| 2.359375
| 2
|
dockit/forms/widgets.py
|
zbyte64/django-dockit
| 5
|
12783702
|
<filename>dockit/forms/widgets.py
from django.forms.widgets import Widget, Media, HiddenInput
from django.utils.safestring import mark_safe
import django.utils.copycompat as copy
from django import forms
from django.forms.util import flatatt
from django.forms.formsets import formset_factory, ORDERING_FIELD_NAME, DELETION_FIELD_NAME
class PrimitiveListWidget(Widget):
'''
Wraps around a subfield
The widget receives the subfield and is responsible for rendering multiple iterations of the subfield and collecting of data
'''
def __init__(self, subfield, attrs=None):
self.subfield = subfield
super(PrimitiveListWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
if not isinstance(value, list):
value = self.decompress(value)
field_count = len(value)
final_attrs = self.build_attrs(attrs)
field = self.subfield
widget = field.widget
parts = ['<div class="list-row form-row"></div>' % widget.render(name, value[i], None) for i in range(field_count)]
output = u'<fieldset%s style="float: left;" class="primitivelistfield" name="%s">%s</fieldset>' % (flatatt(final_attrs), name, u''.join(parts))
return mark_safe(output)
def value_from_datadict(self, data, files, name):
value = list()
if hasattr(data, 'getlist'):
source = data.getlist(name)
else:
source = data.get(name, [])
for i, entry in enumerate(source):
val = dict()
val['value'] = entry
val['ORDER'] = i
value.append(val)
return value
def _has_changed(self, initial, data):
if initial is None:
initial = [u'' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.decompress(initial)
#for widget, initial, data in zip(self.widgets, initial, data):
# if widget._has_changed(initial, data):
# return True
return True #CONSIDER where is my name?
return False
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
media += self.subfield.media
definition = getattr(self, 'Media', None)
if definition:
media += Media(definition)
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super(PrimitiveListWidget, self).__deepcopy__(memo)
obj.subfield = copy.deepcopy(self.subfield)
return obj
class PrimitiveFormsetListWidget(Widget):
'''
Wraps around a subfield
The widget receives the subfield and is responsible for rendering multiple iterations of the subfield and collecting of data
'''
def __init__(self, subfield, attrs=None):
self.subfield = subfield
super(PrimitiveFormsetListWidget, self).__init__(attrs)
def get_base_form_class(self):
class BaseForm(forms.Form):
value = self.subfield
return BaseForm
def get_formset_class(self, **kwargs):
form_cls = self.get_base_form_class()
kwargs.setdefault('can_order', True)
kwargs.setdefault('can_delete', True)
formset = formset_factory(form_cls, **kwargs)
return formset
def render(self, name, value, attrs=None):
if not isinstance(value, list):
value = self.decompress(value)
final_attrs = self.build_attrs(attrs)
formset_class = self.get_formset_class()
initial=[{'value':val} for val in value]
formset = formset_class(initial=initial, prefix=name)
parts = ['<div class="list-row form-row"><table>%s</table></div>' % form.as_table() for form in formset]
parts.append('<div id="%s-empty" class="list-row form-row empty-row"><table>%s</table></div>' % (name, formset.empty_form.as_table()))
output = u'<div%s style="float: left;" class="primitiveformsetlistfield" data-prefix="%s">%s %s</div>' % (flatatt(final_attrs), name, formset.management_form, u''.join(parts))
return mark_safe(output)
def value_from_datadict(self, data, files, name):
formset_class = self.get_formset_class()
formset = formset_class(data=data, files=files, prefix=name)
value = list()
for form in formset.forms:
val = dict()
for key in ('value', ORDERING_FIELD_NAME, DELETION_FIELD_NAME):
val[key] = form.fields[key].widget.value_from_datadict(data, files, form.add_prefix(key))
value.append(val)
return value
def _has_changed(self, initial, data):
if initial is None:
initial = [u'' for x in range(0, len(data))]
else:
if not isinstance(initial, list):
initial = self.decompress(initial)
#for widget, initial, data in zip(self.widgets, initial, data):
# if widget._has_changed(initial, data):
# return True
return True #CONSIDER where is my name?
return False
def decompress(self, value):
"""
Returns a list of decompressed values for the given compressed value.
The given value can be assumed to be valid, but not necessarily
non-empty.
"""
raise NotImplementedError('Subclasses must implement this method.')
def _get_media(self):
"Media for a multiwidget is the combination of all media of the subwidgets"
media = Media()
media += self.subfield.media
definition = getattr(self, 'Media', None)
if definition:
media += Media(definition)
return media
media = property(_get_media)
def __deepcopy__(self, memo):
obj = super(PrimitiveFormsetListWidget, self).__deepcopy__(memo)
obj.subfield = copy.deepcopy(self.subfield)
return obj
| 2.078125
| 2
|
useful_scripts/mod_pkl.py
|
UILXELA/Cooperative-3D-Object-Detection-Using-Shared-Raw-LIDAR-Data
| 6
|
12783703
|
<gh_stars>1-10
import pickle
import numpy as np
filename=input("type in the pkl name\n")
with open(filename , 'rb') as f:
data = pickle.load(f)
for i in range(len(data)):
original=data[i]['point_cloud']['velodyne_path']
modified=[]
for j in range(len(data[i]['annos']['index'])):
label_str=str(int(original[-10:-4]))+" "+str(data[i]['annos']['index'][j])
modified.append(label_str)
data[i]['annos']['dcount']=modified
#print(data[i]['annos']['dcount'])
with open("mod_"+filename, 'wb') as f:
pickle.dump(data,f)
| 2.375
| 2
|
src/training/evaluator.py
|
yutake27/P3CMQA
| 0
|
12783704
|
import copy
import os
from functools import reduce
from pathlib import Path
import chainer
import chainer.functions as F
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import six
from chainer import configuration, cuda, function
from chainer import reporter as reporter_module
from chainer.dataset import convert
from chainer.training.extensions import Evaluator
from chainermn import CommunicatorBase
from sklearn import metrics
from tqdm import tqdm
def _to_list(a):
"""convert value `a` to list
Args:
a: value to be convert to `list`
Returns (list):
"""
if isinstance(a, (int, float)):
return [a, ]
else:
# expected to be list or some iterable class
return a
def plot_roc(y_true, y_score, out_name):
fpr, tpr, thresholds = metrics.roc_curve(y_true=y_true, y_score=y_score)
auc = metrics.auc(fpr, tpr)
plt.clf()
plt.plot(fpr, tpr, label='ROC curve (area = %.3f)' % auc)
plt.legend()
plt.title('ROC curve', fontsize=16)
plt.xlabel('False Positive Rate', fontsize=16)
plt.ylabel('True Positive Rate', fontsize=16)
plt.grid(True)
plt.savefig(out_name)
class Classification_Evaluator(Evaluator):
"""Evaluator which calculates auc and correlation
Note that this Evaluator is only applicable to binary classification task.
Args:
iterator: Dataset iterator for the dataset to calculate pearson.
It can also be a dictionary of iterators. If this is just an
iterator, the iterator is registered by the name ``'main'``.
target: Link object or a dictionary of links to evaluate. If this is
just a link object, the link is registered by the name ``'main'``.
converter: Converter function to build input arrays and true label.
:func:`~chainer.dataset.concat_examples` is used by default.
It is expected to return input arrays of the form
`[x_0, ..., x_n, t]`, where `x_0, ..., x_n` are the inputs to
the evaluation function and `t` is the true label.
device: Device to which the training data is sent. Negative value
indicates the host memory (CPU).
eval_hook: Function to prepare for each evaluation process. It is
called at the beginning of the evaluation. The evaluator extension
object is passed at each call.
eval_func: Evaluation function called at each iteration. The target
link to evaluate as a callable is used by default.
name (str): name of this extension. When `name` is None,
`default_name='validation'` which is defined in super class
`Evaluator` is used as extension name. This name affects to the
reported key name.
pos_labels (int or list): labels of the positive class, other classes
are considered as negative.
ignore_labels (int or list or None): labels to be ignored.
`None` is used to not ignore all labels.
Attributes:
converter: Converter function.
device: Device to which the training data is sent.
eval_hook: Function to prepare for each evaluation process.
eval_func: Evaluation function called at each iteration.
pos_labels (list): labels of the positive class
ignore_labels (list): labels to be ignored.
"""
def __init__(self, iterator, target, comm, label_name, converter=convert.concat_examples,
device=None, eval_hook=None, eval_func=None, name=None,
pos_labels=1, ignore_labels=None, path_data=None):
super(Classification_Evaluator, self).__init__(
iterator, target, converter=converter, device=device,
eval_hook=eval_hook, eval_func=eval_func)
self.rank = comm.rank
self.name = name
self.pos_labels = _to_list(pos_labels)
self.ignore_labels = _to_list(ignore_labels)
self.comm = comm
self.label_name = label_name
self.path_data = path_data
def __call__(self, trainer=None):
"""Executes the evaluator extension.
Unlike usual extensions, this extension can be executed without passing
a trainer object. This extension reports the performance on validation
dataset using the :func:`~chainer.report` function. Thus, users can use
this extension independently from any trainer by manually configuring
a :class:`~chainer.Reporter` object.
Args:
trainer (~chainer.training.Trainer): Trainer object that invokes
this extension. It can be omitted in case of calling this
extension manually.
Returns:
dict: Result dictionary that contains mean statistics of values
reported by the evaluation function.
"""
# set up a reporter
reporter = reporter_module.Reporter()
if self.name is not None:
prefix = self.name + '/'
else:
prefix = ''
for name, target in six.iteritems(self._targets):
reporter.add_observer(prefix + name, target)
reporter.add_observers(prefix + name, target.namedlinks(skipself=True))
with reporter:
with configuration.using_config('train', False):
result = self.evaluate_roc_corr(trainer=trainer)
reporter_module.report(result)
return result
def evaluate_roc_corr(self, trainer):
iterator = self._iterators['main']
eval_func = self.eval_func or self._targets['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
y_total = np.array([]).reshape([0, len(self.label_name)])
t_total = np.array([]).reshape([0, len(self.label_name)])
protein_id_total = np.array([]).reshape([0, len(self.label_name)])
for batch in it:
in_arrays = self.converter(batch, self.device)
with chainer.no_backprop_mode(), chainer.using_config('train', False):
y = eval_func(*in_arrays[:-2])
t = in_arrays[-2]
protein_id = in_arrays[-1]
# y = F.sigmoid(y)
y_data = cuda.to_cpu(y.data)
t_data = cuda.to_cpu(t)
protein_id = cuda.to_cpu(protein_id)
y_total = np.vstack([y_total, y_data])
t_total = np.vstack([t_total, t_data])
protein_id_total = np.vstack([protein_id_total, protein_id])
updater = trainer.updater
epoch = str(updater.epoch)
out_dir = Path(trainer.out)
observation = {}
for label_index, label in enumerate(self.label_name):
y = y_total[:, label_index]
t = t_total[:, label_index]
protein_id = protein_id_total[:, label_index]
index = np.where(t != -1)[0]
y = y[index]
t = t[index]
protein_id = protein_id[index]
gather_data = self.comm.gather(np.vstack([t, y, protein_id]))
if self.rank == 0:
gather_data = np.concatenate(gather_data, axis=1)
gather_t = np.array(gather_data[0], dtype=np.int)
gather_y = np.array(gather_data[1], dtype=np.float32)
gather_protein_id = np.array(gather_data[2], dtype=np.int)
global_score = []
global_label = []
target_name = []
model_path = []
for row, item in self.path_data.iterrows():
model_index = np.where(gather_protein_id==row)[0]
if len(model_index) > 0:
global_score.append(np.mean(F.sigmoid(gather_y[model_index]).data))
global_label.append(item['gdtts'])
target_name.append(item['dir_name'])
model_path.append(item['path'])
df = pd.DataFrame({'global_score':global_score, 'global_label':global_label, 'target_name':target_name, 'model_path': model_path})
pearson = df.groupby('target_name').corr(method='pearson')['global_score'].mean(level=1)['global_label']
spearman = df.groupby('target_name').corr(method='spearman')['global_score'].mean(level=1)['global_label']
csv_out_name = out_dir/(epoch+label+'_df.csv')
df.to_csv(csv_out_name)
roc_out_name = out_dir/(epoch+'iteration_'+label+'_roc.png')
y_score = F.sigmoid(gather_y).data
plot_roc(y_true=gather_t, y_score=y_score, out_name=roc_out_name)
roc_auc = metrics.roc_auc_score(gather_t, y_score)
np.savez((out_dir/epoch).with_suffix('.npz'), local_label=gather_t, local_score=y_score, protein_id=gather_protein_id)
with reporter.report_scope(observation):
reporter.report({'roc_auc_'+label: roc_auc}, self._targets['main'])
reporter.report({'loss': F.sigmoid_cross_entropy(gather_y, gather_t).data},
self._targets['main'])
reporter.report({'accuracy': F.binary_accuracy(gather_y, gather_t).data}, self._targets['main'])
reporter.report({'pearson': pearson}, self._targets['main'])
reporter.report({'spearman': spearman}, self._targets['main'])
return observation
class MultiClassification_Evaluator(Evaluator):
"""Evaluator which calculates auc and correlation
Note that this Evaluator is only applicable to binary classification task.
Args:
iterator: Dataset iterator for the dataset to calculate pearson.
It can also be a dictionary of iterators. If this is just an
iterator, the iterator is registered by the name ``'main'``.
target: Link object or a dictionary of links to evaluate. If this is
just a link object, the link is registered by the name ``'main'``.
converter: Converter function to build input arrays and true label.
:func:`~chainer.dataset.concat_examples` is used by default.
It is expected to return input arrays of the form
`[x_0, ..., x_n, t]`, where `x_0, ..., x_n` are the inputs to
the evaluation function and `t` is the true label.
device: Device to which the training data is sent. Negative value
indicates the host memory (CPU).
eval_hook: Function to prepare for each evaluation process. It is
called at the beginning of the evaluation. The evaluator extension
object is passed at each call.
eval_func: Evaluation function called at each iteration. The target
link to evaluate as a callable is used by default.
name (str): name of this extension. When `name` is None,
`default_name='validation'` which is defined in super class
`Evaluator` is used as extension name. This name affects to the
reported key name.
pos_labels (int or list): labels of the positive class, other classes
are considered as negative.
ignore_labels (int or list or None): labels to be ignored.
`None` is used to not ignore all labels.
Attributes:
converter: Converter function.
device: Device to which the training data is sent.
eval_hook: Function to prepare for each evaluation process.
eval_func: Evaluation function called at each iteration.
pos_labels (list): labels of the positive class
ignore_labels (list): labels to be ignored.
"""
def __init__(self, iterator, target, comm, label_name, class_num,
converter=convert.concat_examples,
device=None, eval_hook=None, eval_func=None, name=None,
pos_labels=1, ignore_labels=None, path_data=None):
super(MultiClassification_Evaluator, self).__init__(
iterator, target, converter=converter, device=device,
eval_hook=eval_hook, eval_func=eval_func)
self.rank = comm.rank
self.class_num = class_num
self.name = name
self.pos_labels = _to_list(pos_labels)
self.ignore_labels = _to_list(ignore_labels)
self.comm = comm
self.label_name = label_name
self.path_data = path_data
def __call__(self, trainer=None):
"""Executes the evaluator extension.
Unlike usual extensions, this extension can be executed without passing
a trainer object. This extension reports the performance on validation
dataset using the :func:`~chainer.report` function. Thus, users can use
this extension independently from any trainer by manually configuring
a :class:`~chainer.Reporter` object.
Args:
trainer (~chainer.training.Trainer): Trainer object that invokes
this extension. It can be omitted in case of calling this
extension manually.
Returns:
dict: Result dictionary that contains mean statistics of values
reported by the evaluation function.
"""
# set up a reporter
reporter = reporter_module.Reporter()
if self.name is not None:
prefix = self.name + '/'
else:
prefix = ''
for name, target in six.iteritems(self._targets):
reporter.add_observer(prefix + name, target)
reporter.add_observers(prefix + name, target.namedlinks(skipself=True))
with reporter:
with configuration.using_config('train', False):
result = self.evaluate_corr(trainer=trainer)
reporter_module.report(result)
return result
def evaluate_corr(self, trainer):
iterator = self._iterators['main']
eval_func = self.eval_func or self._targets['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
y_total = np.array([]).reshape([0, self.class_num])
t_total = np.array([], dtype=np.int)
protein_id_total = np.array([], dtype=np.int)
for batch in it:
in_arrays = self.converter(batch, self.device)
with chainer.no_backprop_mode(), chainer.using_config('train', False):
y = eval_func(*in_arrays[:-2])
t = in_arrays[-2]
protein_id = in_arrays[-1]
# y = F.sigmoid(y)
y_data = cuda.to_cpu(y.data)
t_data = cuda.to_cpu(t)
protein_id = cuda.to_cpu(protein_id)
y_total = np.vstack([y_total, y_data])
t_total = np.concatenate([t_total, t_data])
protein_id_total = np.concatenate([protein_id_total, protein_id])
updater = trainer.updater
epoch = str(updater.epoch)
out_dir = Path(trainer.out)
observation = {}
gather_data = self.comm.gather(np.hstack([t_total.reshape(-1,1), y_total, protein_id_total.reshape(-1,1)]))
if self.rank == 0:
gather_data = np.concatenate(gather_data)
gather_t = gather_data[:, 0].astype(np.int)
gather_y = gather_data[:, 1:-1].astype(np.float32)
gather_protein_id = gather_data[:, -1].astype(np.int)
global_score = []
global_label = []
target_name = []
model_path = []
for row, item in self.path_data.iterrows():
model_index = np.where(gather_protein_id==row)[0]
if len(model_index) > 0:
local_score = np.argmax(gather_y[model_index], axis=1)/self.class_num
global_score.append(np.mean(local_score))
global_label.append(item['gdtts'])
target_name.append(item['dir_name'])
model_path.append(item['path'])
df = pd.DataFrame({'global_score':global_score, 'global_label':global_label, 'target_name':target_name, 'model_path': model_path})
pearson = df.groupby('target_name').corr(method='pearson')['global_score'].mean(level=1)['global_label']
spearman = df.groupby('target_name').corr(method='spearman')['global_score'].mean(level=1)['global_label']
csv_out_name = out_dir/(epoch+'_df.csv')
df.to_csv(csv_out_name)
np.savez((out_dir/epoch).with_suffix('.npz'), local_label=gather_t, local_score=y_score, protein_id=gather_protein_id)
with reporter.report_scope(observation):
reporter.report({'loss': F.softmax_cross_entropy(gather_y, gather_t).data},
self._targets['main'])
reporter.report({'accuracy': F.accuracy(gather_y, gather_t).data}, self._targets['main'])
reporter.report({'pearson': pearson}, self._targets['main'])
reporter.report({'spearman': spearman}, self._targets['main'])
return observation
class Regression_Evaluator(Evaluator):
"""Evaluator which calculates correlation
Args:
iterator: Dataset iterator for the dataset to calculate pearson.
It can also be a dictionary of iterators. If this is just an
iterator, the iterator is registered by the name ``'main'``.
target: Link object or a dictionary of links to evaluate. If this is
just a link object, the link is registered by the name ``'main'``.
converter: Converter function to build input arrays and true label.
:func:`~chainer.dataset.concat_examples` is used by default.
It is expected to return input arrays of the form
`[x_0, ..., x_n, t]`, where `x_0, ..., x_n` are the inputs to
the evaluation function and `t` is the true label.
device: Device to which the training data is sent. Negative value
indicates the host memory (CPU).
eval_hook: Function to prepare for each evaluation process. It is
called at the beginning of the evaluation. The evaluator extension
object is passed at each call.
eval_func: Evaluation function called at each iteration. The target
link to evaluate as a callable is used by default.
name (str): name of this extension. When `name` is None,
`default_name='validation'` which is defined in super class
`Evaluator` is used as extension name. This name affects to the
reported key name.
pos_labels (int or list): labels of the positive class, other classes
are considered as negative.
ignore_labels (int or list or None): labels to be ignored.
`None` is used to not ignore all labels.
Attributes:
converter: Converter function.
device: Device to which the training data is sent.
eval_hook: Function to prepare for each evaluation process.
eval_func: Evaluation function called at each iteration.
pos_labels (list): labels of the positive class
ignore_labels (list): labels to be ignored.
"""
def __init__(self, iterator, target, comm, label_name, converter=convert.concat_examples,
device=None, eval_hook=None, eval_func=None, name=None,
pos_labels=1, ignore_labels=None, path_data=None):
super(Regression_Evaluator, self).__init__(
iterator, target, converter=converter, device=device,
eval_hook=eval_hook, eval_func=eval_func)
self.rank = comm.rank
self.name = name
self.pos_labels = _to_list(pos_labels)
self.ignore_labels = _to_list(ignore_labels)
self.comm = comm
self.label_name = label_name
self.path_data = path_data
def __call__(self, trainer=None):
"""Executes the evaluator extension.
Unlike usual extensions, this extension can be executed without passing
a trainer object. This extension reports the performance on validation
dataset using the :func:`~chainer.report` function. Thus, users can use
this extension independently from any trainer by manually configuring
a :class:`~chainer.Reporter` object.
Args:
trainer (~chainer.training.Trainer): Trainer object that invokes
this extension. It can be omitted in case of calling this
extension manually.
Returns:
dict: Result dictionary that contains mean statistics of values
reported by the evaluation function.
"""
# set up a reporter
reporter = reporter_module.Reporter()
if self.name is not None:
prefix = self.name + '/'
else:
prefix = ''
for name, target in six.iteritems(self._targets):
reporter.add_observer(prefix + name, target)
reporter.add_observers(prefix + name, target.namedlinks(skipself=True))
with reporter:
with configuration.using_config('train', False):
result = self.evaluate_corr(trainer=trainer)
reporter_module.report(result)
return result
def evaluate_corr(self, trainer):
iterator = self._iterators['main']
eval_func = self.eval_func or self._targets['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
y_total = np.array([]).reshape([0, len(self.label_name)])
t_total = np.array([]).reshape([0, len(self.label_name)])
protein_id_total = np.array([]).reshape([0, len(self.label_name)])
for batch in it:
in_arrays = self.converter(batch, self.device)
with chainer.no_backprop_mode(), chainer.using_config('train', False):
y = eval_func(*in_arrays[:-2])
t = in_arrays[-2]
protein_id = in_arrays[-1]
# y = F.sigmoid(y)
y_data = cuda.to_cpu(y.data)
t_data = cuda.to_cpu(t)
protein_id = cuda.to_cpu(protein_id)
y_total = np.vstack([y_total, y_data])
t_total = np.vstack([t_total, t_data])
protein_id_total = np.vstack([protein_id_total, protein_id])
updater = trainer.updater
epoch = str(updater.epoch)
out_dir = Path(trainer.out)
observation = {}
for label_index, label in enumerate(self.label_name):
y = y_total[:, label_index]
t = t_total[:, label_index]
protein_id = protein_id_total[:, label_index]
index = np.where(t != -1)[0]
y = y[index]
t = t[index]
protein_id = protein_id[index]
gather_data = self.comm.gather(np.vstack([t, y, protein_id]))
if self.rank == 0:
gather_data = np.concatenate(gather_data, axis=1)
gather_t = np.array(gather_data[0], dtype=np.float32)
gather_y = np.array(gather_data[1], dtype=np.float32)
gather_protein_id = np.array(gather_data[2], dtype=np.int)
global_score = []
global_label = []
target_name = []
model_path = []
for row, item in self.path_data.iterrows():
model_index = np.where(gather_protein_id==row)[0]
if len(model_index) > 0:
global_score.append(np.mean(gather_y[model_index]))
global_label.append(item['gdtts'])
target_name.append(item['dir_name'])
model_path.append(item['path'])
df = pd.DataFrame({'global_score':global_score, 'global_label':global_label, 'target_name':target_name, 'model_path': model_path})
pearson = df.groupby('target_name').corr(method='pearson')['global_score'].mean(level=1)['global_label']
spearman = df.groupby('target_name').corr(method='spearman')['global_score'].mean(level=1)['global_label']
csv_out_name = out_dir/(epoch+label+'_df.csv')
df.to_csv(csv_out_name)
np.savez((out_dir/epoch).with_suffix('.npz'), local_label=gather_t, local_score=y_score, protein_id=gather_protein_id)
with reporter.report_scope(observation):
reporter.report({'loss': F.mean_squared_error(gather_y, gather_t).data},
self._targets['main'])
reporter.report({'accuracy': F.r2_score(gather_y, gather_t).data}, self._targets['main'])
reporter.report({'pearson': pearson}, self._targets['main'])
reporter.report({'spearman': spearman}, self._targets['main'])
return observation
| 2.46875
| 2
|
src/IoT_Client.py
|
manuandru/Reti2021-Smart-Meter-IoT
| 2
|
12783705
|
# -*- coding: utf-8 -*-
"""
@author: Manuel
"""
import socket
import time
import sys
import json
import sensor_version.config as config
from IoT_Client_functions import read_data_from_sensor
from sensor_version.data_message import message
udp_timeout = 2
udp_delay = 1
if len(sys.argv) != 2:
print('Error: need client number')
sys.exit(1)
client_number = int(sys.argv[1])
client_ip = config.arp_table[client_number]
server_address = (config.gateway_UDP_ip, config.gateway_UDP_port)
while True:
print('Reading data from sensor...')
hour, temperature, humidity = read_data_from_sensor()
data = message(client_number, hour, temperature, humidity)
data.set_ip_address(client_ip)
OK = False
while not OK:
try:
# socket create
udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
print(f'Sending data to {server_address}...')
data.sending_time(time.time_ns())
data_bytes = json.dumps(data.__dict__)
t0 = time.time_ns()
udp_socket.sendto(data_bytes.encode('utf8'), server_address)
t = time.time_ns()
dt = t - t0
print('Socket time:', dt, 'ns')
print('Waiting for response...')
udp_socket.settimeout(udp_timeout)
server_response, server = udp_socket.recvfrom(1024)
if server_response.decode() == 'OK':
OK = True
else:
raise Exception('Wrong Response')
except Exception as error:
print(f'Error: {error}')
print('Try sending again...')
time.sleep(udp_delay)
finally:
udp_socket.close()
print('Data are correctly sent\n')
time.sleep(5)
| 3.03125
| 3
|
src/phase-3/old_test/test_postgres.py
|
spininertia/graph-mining-rdbms
| 0
|
12783706
|
<reponame>spininertia/graph-mining-rdbms
import sys
import time
import psycopg2
if __name__ == "__main__" :
conn = psycopg2.connect(database="mydb", host="127.0.0.1", user="postgres")
print "Testing postgres."
| 2.140625
| 2
|
rin/models/message/components/cache.py
|
an-dyy/Rin
| 13
|
12783707
|
<filename>rin/models/message/components/cache.py
from __future__ import annotations
from typing import TYPE_CHECKING
from ...cacheable import Cache, Cacheable
if TYPE_CHECKING:
from .actionrow import Component
__all__ = ("ComponentCache",)
class ComponentCache(Cacheable):
"""A components cache."""
cache: Cache[Component]
| 1.8125
| 2
|
src/calculations_diff_to_int_test.py
|
danOSU/emulator-validation
| 4
|
12783708
|
<reponame>danOSU/emulator-validation
##!/usr/bin/env python3
#
import numpy as np
from collections.abc import Iterable
import h5py
import sys, os, glob
from configurations import *
from calculations_file_format_single_event import result_dtype, Qn_species, Qn_diff_pT_cuts
filename=sys.argv[1]
data = np.fromfile(filename, dtype=result_dtype)
system = 'Pb-Pb-2760'
Qn_rap_range=2.
mid_pT_bins=[(Qn_diff_pT_cuts[i]+Qn_diff_pT_cuts[i+1])/2. for i in range(0,len(Qn_diff_pT_cuts)-1)]
print(data['ALICE'].dtype)
Qn_diff=data['d_flow_pid']
#print(v1)
#
##print(Qn_diff['pion'])
##
##print(np.add(Qn_diff['pion'],Qn_diff['kaon']))
#
Qn_diff_ch=np.zeros(Qn_diff['pion'].shape,dtype=Qn_diff['pion'].dtype)
## Get charged hadron Q_n
#for species, pid in Qn_species:
# weight=1
# if (species== 'Sigma'):
# weight=2
# tmp_Qn_id=Qn_diff[species]
## print(tmp_Qn_id['N'])
## Qn_diff_ch['N']=Qn_diff_ch['N']+weight*tmp_Qn_id['N']
# Qn_diff_ch['N']+=weight*tmp_Qn_id['N']
# Qn_diff_ch['Qn']+=weight*tmp_Qn_id['Qn']
# Get charged hadron Q_n between pseudorapidity cuts
def integrated_jacobian(m, pT, etaCut):
m2=m*m
pT2=pT*pT
cosh2EtaCut=np.cosh(2*etaCut)
sinhEtaCut=np.sinh(etaCut)
return np.log((np.sqrt(2*m2 + pT2 + pT2*cosh2EtaCut) + np.sqrt(2)*pT*sinhEtaCut)/(np.sqrt(2*m2 + pT2 + pT2*cosh2EtaCut) - np.sqrt(2)*pT*sinhEtaCut)) #/(2.*etaCut)
masses={
'pion':0.138,
'kaon':0.494,
'proton':0.938,
'Sigma':1.189,
'Xi':1.318
}
etaCut=0.8
pTminCut=0.2
#print(Qn_diff['pion']['N'][0][:][0])
#print(Qn_diff['pion']['N'][0][:,0])
Qn_ch=np.zeros(1,dtype=[('N', '<f8', 4), ('Qn', '<c16', (4,5))])
#print(Qn_ch['Qn'])
#
#print(Qn_diff['pion']['Qn'][0][:,0])
#
#exit(1)
#print(Qn_diff['pion']['N'][0][:][0].shape)
for species, pid in Qn_species:
weight=1
if (species== 'Sigma'):
weight=2
for i, pT in enumerate(mid_pT_bins):
if (pT<pTminCut):
continue
rapidity_to_pseudorapidity_jacobian=integrated_jacobian(masses[species],pT,etaCut)
Qn_ch['N']+=Qn_diff[species]['N'][0][:,i]/Qn_rap_range*weight*rapidity_to_pseudorapidity_jacobian
Qn_ch['Qn']+=Qn_diff[species]['Qn'][0][:,i]/Qn_rap_range*weight*rapidity_to_pseudorapidity_jacobian
#print(i,pT,Qn_diff[species]['Qn'][0,:,i])
# tmp_Qn_id=Qn_diff[species]
## print(tmp_Qn_id['N'])
## Qn_diff_ch['N']=Qn_diff_ch['N']+weight*tmp_Qn_id['N']
# Qn_diff_ch['N']+=weight*tmp_Qn_id['N']
# Qn_diff_ch['Qn']+=weight*tmp_Qn_id['Qn']
print("Q0_ch",np.divide(data['ALICE']['flow']['N'],Qn_ch['N']))
#print("Qn_ch",data['ALICE']['flow']['Qn'][0,:,0:5],Qn_ch['Qn'])
print("Qn_ch",np.divide(data['ALICE']['flow']['Qn'][0,:,0:5],Qn_ch['Qn']))
for species, pid in Qn_species:
alt=species
if (species== 'Sigma'):
alt='Sigma0'
print('mult ',species,": ",data['ALICE']['dN_dy'][alt],np.divide(np.sum(Qn_diff[species]['N'],axis=2)/Qn_rap_range,data['ALICE']['nsamples']))
print('mean pT ', species,": ",data['ALICE']['mean_pT'][alt],
np.divide(np.sum(np.multiply(Qn_diff[species]['N'],mid_pT_bins),axis=2),np.sum(Qn_diff[species]['N'],axis=2)))
# np.sum(np.multiply(Qn_diff[species]['N'],mid_pT_bins),axis=2)/np.sum(Qn_diff[species]['N'],axis=2) )
#[np.average(mid_pT_bins, axis=2, weights=Qn_diff[species]['N'][i]) for i in range(0,4)])
# print('test ', species, ": ", Qn_diff[species]['N'], mid_pT_bins)
## Loop over data structure
## Assumes that "data" is a numpy array with dtype given
## by the array "structure" (though the latter is not a dtype object)
#def print_data_structure(data, structure):
#
# n_items=len(structure)
#
# if (n_items > 0):
# for n, item in enumerate(structure):
# tmp_struct=structure[n]
# # If the item has substructure, recurse on it
# if (not isinstance(tmp_struct[1], str)) and (isinstance(tmp_struct[1], Iterable)):
# print(tmp_struct[0])
# print_data_structure(data[tmp_struct[0]],tmp_struct[1])
# # If no substructure, just output the result
# else:
# print(tmp_struct[0],data[tmp_struct[0]])
#
#print_data_structure(data, result_dtype)
#
| 1.789063
| 2
|
food/addforms.py
|
SandeshGhi/FoodApp
| 0
|
12783709
|
from django import forms
from django.db import models
from django.forms import fields
from .models import Item
class ItemForm(forms.ModelForm):
class Meta:
model = Item
fields = ['itemName', 'itemDescription', 'itemPrice', 'itemImage']
| 2.140625
| 2
|
lib-dynload/lz4r07/setup.py
|
tabulon-ext/dedupsqlfs
| 22
|
12783710
|
<gh_stars>10-100
import sys
from distutils.command.build_ext import build_ext
from distutils.core import setup
from distutils.extension import Extension
from distutils import ccompiler
__version__ = "0.7.0.1"
LZ4_VERSION = "r131"
if sys.version_info < (2,6):
sys.stderr.write("ERROR: Python 2.5 and older are not supported, and probably never will be.\n")
sys.exit(1)
EXTRA_OPT=0
if "--extra-optimization" in sys.argv:
# Support legacy output format functions
EXTRA_OPT=1
sys.argv.remove("--extra-optimization")
if ccompiler.get_default_compiler() == "msvc":
extra_compile_args = ["/Wall"]
if EXTRA_OPT:
extra_compile_args.insert(0, "/O2")
else:
extra_compile_args.insert(0, "/Ot")
else:
extra_compile_args = ["-std=c99", "-Wall"]
if EXTRA_OPT:
extra_compile_args.insert(0, "-march=native")
extra_compile_args.insert(0, "-O3")
else:
extra_compile_args.insert(0, "-O2")
if ccompiler.get_default_compiler() == "msvc":
define_macros = [("LZ4_VERSION","\\\"%s\\\"" % LZ4_VERSION)]
else:
extra_compile_args.extend(["-W", "-Wundef", "-DFORTIFY_SOURCE=2", "-fstack-protector",])
define_macros = [("LZ4_VERSION","\"%s\"" % LZ4_VERSION)]
lz4mod = Extension(
'_lz4r07',
[
'src/lz4.c',
'src/lz4hc.c',
'src/python-lz4.c'
],
extra_compile_args=extra_compile_args,
define_macros=define_macros,
)
setup(
name='_lz4r07',
version=__version__,
description="LZ4 Bindings for Python",
long_description=open('README.rst', 'r').read(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/steeve/python-lz4',
packages=[],
package_dir={'': 'src'},
ext_modules=[lz4mod,],
cmdclass = {
'build_ext': build_ext,
},
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Programming Language :: C',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
)
| 2.015625
| 2
|
random/stocks_simulator.py
|
Dmendoza3/Phyton
| 0
|
12783711
|
import time
import random
value = random.randint(0, 1000)
while True:
if random.randint(0, 100) > 50:
print("lose everything")
| 3.375
| 3
|
server/api/resources/personality/__init__.py
|
NUS-CS-MComp/cs-cloud-computing-music-personality
| 2
|
12783712
|
from .insights import PersonalityScore
from .inference import Inference
__all__ = ["PersonalityScore", "Inference"]
| 1.054688
| 1
|
run.py
|
TyroneZeka/Flask-Blog
| 0
|
12783713
|
<reponame>TyroneZeka/Flask-Blog
from app import app
from db import db
db.init_app(app)
@app.before_first_request
def create_tables():
db.create_all(app=app)
| 2.03125
| 2
|
apiApp/models.py
|
ToneHome/fetchYapi
| 0
|
12783714
|
from django.db import models
# Create your models here.
class ProjectCat(models.Model):
ProjectId = models.IntegerField()
CarId = models.IntegerField()
CartName = models.CharField(max_length=225)
class ApiName(models.Model):
ProjectId = models.IntegerField()
CarId = models.IntegerField()
ApiId = models.IntegerField()
ApiName = models.CharField(max_length=225)
| 2.234375
| 2
|
deps/ts_proto_deps.bzl
|
heartless-clown/rules_proto
| 249
|
12783715
|
<reponame>heartless-clown/rules_proto
"""
GENERATED FILE - DO NOT EDIT (created via @build_stack_rules_proto//cmd/depsgen)
"""
load("@build_bazel_rules_nodejs//:index.bzl", "npm_install", "yarn_install")
def _maybe(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)
def ts_proto_deps():
npm_ts_proto() # via <TOP>
npm_tsc() # via <TOP>
def npm_ts_proto():
_maybe(
npm_install,
name = "npm_ts_proto",
package_json = "@build_stack_rules_proto//plugin/stephenh/ts-proto:package.json",
package_lock_json = "@build_stack_rules_proto//plugin/stephenh/ts-proto:package-lock.json",
symlink_node_modules = False,
)
def npm_tsc():
_maybe(
yarn_install,
name = "npm_tsc",
package_json = "@build_stack_rules_proto//rules/ts:package.json",
yarn_lock = "@build_stack_rules_proto//rules/ts:yarn.lock",
frozen_lockfile = True,
)
| 1.476563
| 1
|
tfm_server/political_clasification/forms.py
|
ricardocancar/TFM
| 0
|
12783716
|
# -*- coding: utf-8 -*-
from django import forms
from .models import PoliticalClasification
class PoliticalClasificationForm(forms.ModelForm):
class Meta:
model = PoliticalClasification
fields = [
'political',
'content',
'numbers'
]
class RawPoliticalClasificationForm(forms.Form):
political = forms.CharField()
content = forms.CharField()
numbers = forms.DecimalField()
| 2.0625
| 2
|
ippon/models/cup_fight_test.py
|
morynicz/ippon_back
| 0
|
12783717
|
import datetime
import django
from django.db import IntegrityError
from django.test import TestCase
import ippon.models
import ippon.models.club as cl
import ippon.models.cup_fight
import ippon.models.cup_fight as cfm
import ippon.models.fight
import ippon.models.player as plm
import ippon.models.team_fight as tfm
import ippon.models.tournament as tm
class TestCupFights(TestCase):
def setUp(self):
self.tournament = tm.Tournament.objects.create(
name='T1',
webpage='http://w1.co',
description='d1',
city='c1',
date=datetime.date(year=2021, month=1, day=1),
address='a1',
team_size=1,
group_match_length=3,
ko_match_length=3,
final_match_length=3,
finals_depth=0,
age_constraint=5,
age_constraint_value=20,
rank_constraint=5,
rank_constraint_value=7,
sex_constraint=1)
self.tournament.save()
self.cup_phase = self.tournament.cup_phases.create(name="CP",
fight_length=3,
final_fight_length=5)
self.t1 = self.tournament.teams.create(name='t1')
self.t2 = self.tournament.teams.create(name='t2')
self.team_fight1 = self.tournament.team_fights.create(aka_team=self.t1,
shiro_team=self.t2)
self.cup_fight = self.cup_phase.cup_fights.create(team_fight=self.team_fight1)
class CupFightFollowingFightTests(TestCupFights):
def setUp(self):
super(CupFightFollowingFightTests, self).setUp()
def test_fight_throws_no_such_fight_when_het_following_called_on_final(self):
with self.assertRaises(cfm.NoSuchFightException):
self.cup_fight.get_following_fight()
def test_cup_fight_which_is_previous_on_aka_side_returns_following_fight(self):
following_aka = self.cup_phase.cup_fights.create(team_fight=self.team_fight1, previous_aka_fight=self.cup_fight)
self.assertEqual(self.cup_fight.get_following_fight(), following_aka)
class CupFightSiblingTests(TestCupFights):
def setUp(self):
super(CupFightSiblingTests, self).setUp()
self.t3 = self.tournament.teams.create(name='t3')
self.t4 = self.tournament.teams.create(name='t4')
self.tf_aka = self.tournament.team_fights.create(aka_team=self.t3,
shiro_team=self.t4)
self.cf_aka = self.cup_phase.cup_fights.create(team_fight=self.tf_aka)
self.cf_parent = self.cup_phase.cup_fights.create(previous_aka_fight=self.cf_aka,
previous_shiro_fight=self.cup_fight)
def test_cup_fight_when_winner_is_set_and_sibling_has_winner_already_set_creates_team_fight_in_parent(self):
self.cf_aka.team_fight.winner = 1
self.cf_aka.team_fight.save()
self.cup_fight.team_fight.winner = 2
self.cup_fight.team_fight.save()
self.cf_parent.refresh_from_db()
self.assertIsNotNone(self.cf_parent.team_fight)
self.assertEqual(self.cf_parent.team_fight.aka_team, self.t3)
self.assertEqual(self.cf_parent.team_fight.shiro_team, self.t2)
def test_when_aka_fight_winner_is_set_and_shiro_sibling_doesnt_have_winner_yet_doesnt_change_parent(self):
self.cf_aka.team_fight.winner = 1
self.cf_aka.team_fight.save()
self.cf_parent.refresh_from_db()
self.assertIsNone(self.cf_parent.team_fight)
def test_when_shiro_fight_winner_is_set_and_aka_sibling_doesnt_have_winner_yet_doesnt_change_parent(self):
self.cup_fight.team_fight.winner = 1
self.cup_fight.team_fight.save()
self.cf_parent.refresh_from_db()
self.assertIsNone(self.cf_parent.team_fight)
def test_when_shiro_fight_winner_is_changed_and_parent_was_laready_created_but_still_in_prep_change_parent(self):
self.cf_aka.team_fight.winner = 1
self.cf_aka.team_fight.save()
self.cup_fight.team_fight.winner = 2
self.cup_fight.team_fight.save()
self.cf_parent.refresh_from_db()
old_parent_tf_id = self.cf_parent.team_fight.id
self.cf_aka.team_fight.winner = 2
self.cf_aka.team_fight.save()
self.cf_parent.refresh_from_db()
current_parent_tf = self.cf_parent.team_fight
self.assertEqual(old_parent_tf_id, current_parent_tf.id)
self.assertEqual(current_parent_tf.aka_team, self.t4)
self.assertEqual(current_parent_tf.shiro_team, self.t2)
class CupPhaseTests(TestCase):
def setUp(self) -> None:
self.tournament = tm.Tournament.objects.create(
name='T1',
webpage='http://w1.co',
description='d1',
city='c1',
date=datetime.date(year=2021, month=1, day=1),
address='a1',
team_size=1,
group_match_length=3,
ko_match_length=3,
final_match_length=3,
finals_depth=0,
age_constraint=5,
age_constraint_value=20,
rank_constraint=5,
rank_constraint_value=7,
sex_constraint=1)
self.tournament.save()
c = cl.Club.objects.create(
name='cn1',
webpage='http://cw1.co',
description='cd1',
city='cc1')
self.cup_phase = self.tournament.cup_phases.create(name="CP",
fight_length=3,
final_fight_length=5)
self.t1 = self.tournament.teams.create(name='t1')
self.t2 = self.tournament.teams.create(name='t2')
self.team_fight1 = self.tournament.team_fights.create(aka_team=self.t1,
shiro_team=self.t2)
self.cf1 = self.cup_phase.cup_fights.create(team_fight=self.team_fight1)
self.p1 = plm.Player.objects.create(name='pn1', surname='ps1', rank=7,
birthday=datetime.date(year=2001, month=1, day=1), sex=1, club_id=c)
self.p2 = plm.Player.objects.create(name='pn2', surname='ps2', rank=7,
birthday=datetime.date(year=2001, month=1, day=1), sex=1, club_id=c)
self.p3 = plm.Player.objects.create(name='pn3', surname='ps3', rank=7,
birthday=datetime.date(year=2001, month=1, day=1), sex=1, club_id=c)
self.p4 = plm.Player.objects.create(name='pn4', surname='ps4', rank=7,
birthday=datetime.date(year=2001, month=1, day=1), sex=1, club_id=c)
self.p5 = plm.Player.objects.create(name='pn5', surname='ps5', rank=7,
birthday=datetime.date(year=2001, month=1, day=1), sex=1, club_id=c)
self.p6 = plm.Player.objects.create(name='pn6', surname='ps6', rank=7,
birthday=datetime.date(year=2001, month=1, day=1), sex=1, club_id=c)
self.p7 = plm.Player.objects.create(name='pn7', surname='ps6', rank=7,
birthday=datetime.date(year=2001, month=1, day=1), sex=1, club_id=c)
self.p8 = plm.Player.objects.create(name='pn8', surname='ps6', rank=7,
birthday=datetime.date(year=2001, month=1, day=1), sex=1, club_id=c)
self.t1.team_members.create(player=self.p1)
self.t1.team_members.create(player=self.p2)
self.t1.team_members.create(player=self.p3)
self.t1.team_members.create(player=self.p7)
self.t2.team_members.create(player=self.p4)
self.t2.team_members.create(player=self.p5)
self.t2.team_members.create(player=self.p6)
self.t2.team_members.create(player=self.p8)
self.f1 = self.team_fight1.fights.create(aka=self.p1, shiro=self.p4)
self.f2 = self.team_fight1.fights.create(aka=self.p2, shiro=self.p5)
self.f3 = self.team_fight1.fights.create(aka=self.p3, shiro=self.p6)
self.f4 = self.team_fight1.fights.create(aka=self.p7, shiro=self.p8)
def test_destruction_of_cup_phase_is_impossible_when_there_are_some_fights_in_it(self):
with self.assertRaises(django.db.models.ProtectedError) as pe:
self.cup_phase.delete()
self.assertTrue(tfm.TeamFight.objects.filter(cup_fight=self.cf1).count())
self.assertTrue(ippon.models.cup_fight.CupFight.objects.filter(cup_phase=self.cup_phase).count())
self.assertTrue(ippon.models.fight.Fight.objects.filter(team_fight=self.team_fight1).count())
| 2.390625
| 2
|
src/project_euler/P003_largest_prime_factor/solution_01_tests.py
|
lakshmikanth-tesla/ProgrammingProblems
| 1
|
12783718
|
<reponame>lakshmikanth-tesla/ProgrammingProblems<gh_stars>1-10
import unittest
from src.project_euler.P003_largest_prime_factor.solution_01 import answer
class TestSolution(unittest.TestCase):
def testcase_001(self):
N = 600851475143
expected = 6857
self.assertEqual(answer(N), expected)
if __name__ == '__main__':
unittest.main()
| 2.578125
| 3
|
var/spack/repos/builtin/packages/libssh/package.py
|
mrzv/spack
| 2
|
12783719
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libssh(CMakePackage):
"""libssh: the SSH library"""
homepage = "https://www.libssh.org"
url = "https://red.libssh.org/attachments/download/218/libssh-0.7.5.tar.xz"
version('0.7.5', 'd3fc864208bf607ad87cdee836894feb')
depends_on('openssl')
depends_on('zlib')
| 1.109375
| 1
|
src/bdbd/src/bdbd/test/lagtest.py
|
rkent/BDBD
| 0
|
12783720
|
<filename>src/bdbd/src/bdbd/test/lagtest.py<gh_stars>0
#!/usr/bin/env python
# ROS node to implement changePose action server
# generally follows http://wiki.ros.org/actionlib_tutorials/Tutorials/Writing%20a%20Simple%20Action%20Server%20using%20the%20Execute%20Callback%20%28Python%29
import time
import rospy
from bdbd_common.utils import fstr
from nav_msgs.msg import Odometry
def odom_cb(odometry):
global ros_start
global sys_start
now = float(odometry.header.stamp.secs + 1.0e-9 * odometry.header.stamp.nsecs)
if ros_start is None:
ros_start = now
sys_start = time.time()
else:
lag = (time.time() - sys_start) - (now - ros_start)
print(fstr({'lag ms': lag * 1000}))
# start executing the action, driven by odometry message receipt
ros_start = None
sys_start = None
rospy.init_node('lagtest')
odom_sub = rospy.Subscriber('/t265/odom/sample', Odometry, odom_cb)
while (not rospy.is_shutdown()):
time.sleep(.1)
| 2.21875
| 2
|
src/attention_neuro/attention_module.py
|
matln/Attentive-Filtering-Network
| 49
|
12783721
|
<filename>src/attention_neuro/attention_module.py
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from basic_layers import ResidualBlock, L1Penalty
class AttentionModule_stg0(nn.Module):
"""
attention module with softmax branch and trunk branch, Residual Attention Network CVPR 2017
--(trunk)-->RB(d=1)-->RB(d=1)-------------------------------------------------------------------------------------------------------------------------|
/ |
x -->RB(d=1)-| |
\ |
--(softmax)-->mp-->RB(d=2)-|-->mp-->RB(d=4)-|-->mp-->RB(d=8)-|-->mp-->RB(d=16)-->RB(d=1)-->up-+-->RB(d=1)-->up-+-->RB(d=1)-->up-+-->RB(d=1)-->up--| |
| | |--------------RB(d=1)-----------| | | | |
| |-------------------------------RB(d=1)----------------------------| | | |
|------------------------------------------------RB(d=1)---------------------------------------------| | |
| |
|---sigmoid<--conv1*1<--conv1*1<--| |
out<--RB(d=1)<--+--<--* |
|-----|-------------------------------------|
"""
def __init__(self, in_channels, out_channels, size1=(128,545), size2=(120,529), size3=(104,497), size4=(72,186), l1weight=0.2):
super(AttentionModule_stg0, self).__init__()
self.l1weight = l1weight
self.pre = ResidualBlock(in_channels, 1)
## trunk branch
self.trunk = nn.Sequential(
ResidualBlock(in_channels, 1),
ResidualBlock(in_channels, 1)
)
## softmax branch: bottom-up
self.mp1 = nn.MaxPool2d(kernel_size=3, stride=(1,1))
self.sm1 = ResidualBlock(in_channels, (4,8))
self.skip1 = ResidualBlock(in_channels, 1)
self.mp2 = nn.MaxPool2d(kernel_size=3, stride=(1,1))
self.sm2 = ResidualBlock(in_channels, (8,16))
self.skip2 = ResidualBlock(in_channels, 1)
self.mp3 = nn.MaxPool2d(kernel_size=3, stride=(1,2))
self.sm3 = ResidualBlock(in_channels, (16,32))
self.skip3 = ResidualBlock(in_channels, 1)
self.mp4 = nn.MaxPool2d(kernel_size=3, stride=(2,2))
self.sm4 = nn.Sequential(
ResidualBlock(in_channels, (16,32)),
ResidualBlock(in_channels, 1)
)
## softmax branch: top-down
self.up4 = nn.UpsamplingBilinear2d(size=size4)
self.sm5 = ResidualBlock(in_channels, 1)
self.up3 = nn.UpsamplingBilinear2d(size=size3)
self.sm6 = ResidualBlock(in_channels, 1)
self.up2 = nn.UpsamplingBilinear2d(size=size2)
self.sm7 = ResidualBlock(in_channels, 1)
self.up1 = nn.UpsamplingBilinear2d(size=size1)
# 1*1 convolution blocks
self.conv1 = nn.Sequential(
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, in_channels, kernel_size=1, stride=1, bias=False),
nn.BatchNorm2d(in_channels),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels, in_channels , kernel_size=1, stride=1, bias=False),
#nn.Sigmoid()
nn.Softmax2d()
)
self.post = ResidualBlock(in_channels, 1)
def forward(self, x):
#print('attention!')
x = self.pre(x)
#print('pre', x.size())
out_trunk = self.trunk(x)
#print('trunk', out_trunk.size())
out_mp1 = self.mp1(x)
#print('mp1', out_mp1.size())
out_sm1 = self.sm1(out_mp1)
#print('sm1', out_sm1.size())
out_skip1 = self.skip1(out_sm1)
#print('skip1', out_skip1.size())
out_mp2 = self.mp2(out_sm1)
#print('mp2', out_mp2.size())
out_sm2 = self.sm2(out_mp2)
#print('sm2', out_sm2.size())
out_skip2 = self.skip2(out_sm2)
#print('skip2', out_skip2.size())
out_mp3 = self.mp3(out_sm2)
#print('mp3', out_mp3.size())
out_sm3 = self.sm3(out_mp3)
#print('sm3', out_sm3.size())
out_skip3 = self.skip3(out_sm3)
#print('skip3', out_skip3.size())
out_mp4 = self.mp4(out_sm3)
#print('mp4', out_mp4.size())
out_sm4 = self.sm4(out_mp4)
#print('sm4', out_sm4.size())
out_up4 = self.up4(out_sm4)
#print('up4', out_up4.size())
out = out_up4 + out_skip3
#print('out', out.size())
out_sm5 = self.sm5(out)
#print('sm5', out_sm5.size())
out_up3 = self.up3(out_sm5)
#print('up3', out_up3.size())
out = out_up3 + out_skip2
#print('out', out.size())
out_sm6 = self.sm6(out)
#print('sm6', out_sm6.size())
out_up2 = self.up2(out_sm6)
#print('up2', out_up2.size())
out = out_up2 + out_skip1
#print('out', out.size())
out_sm7 = self.sm7(out)
#print('sm7', out_sm7.size())
out_up1 = self.up1(out_sm7)
#print('up1', out_up1.size())
out_conv1 = self.conv1(out_up1)
#print('conv1', out_conv1.size())
#out = (out_conv1) * out_trunk
out = (1 + out_conv1) * out_trunk
#print('out', out.size())
out_post = self.post(out)
#print('post', out_post.size())
return out_post
| 2.359375
| 2
|
protobot_dog_bringup/scripts/bring_up.py
|
robottime/protobot_dog
| 3
|
12783722
|
#!/usr/bin/env python
import rospy
import sys
import math
import time
import serial
from std_msgs.msg import Float64
from geometry_msgs.msg import Point
from threading import Lock
lock = Lock()
serial_com = None
def callback(data,callback_args):
global serial_com
lock.acquire()
#print data,callback_args
if(serial_com!=None):
p = data.data
if(callback_args==2) or (callback_args==3) or (callback_args==4) or (callback_args==5):
p = p*-1
serial_com.write('m '+str(callback_args)+' '+str(int(p*100)/100.0)+'\r\n')
serial_com.flushInput()
#time.sleep(0.05)
#print '1:',serial_com.readline()
#print '2:',serial_com.readline()
#print '3:',serial_com.readline()
#print '4:',serial_com.readline()
lock.release()
def shutdown():
global serial_com
lock.acquire()
if(serial_com!=None):
for i in range(8):
serial_com.write('m '+str(i)+' 0\r\n')
time.sleep(0.01)
serial_com.write('m '+str(i)+' 0\r\n')
time.sleep(0.01)
serial_com.close()
serial_com = None
lock.release()
def main():
global serial_com
rospy.init_node('protobot_dog',anonymous=True)
rospy.on_shutdown(shutdown)
port = rospy.get_param('~serial_port','/dev/ttyACM0')
baud = rospy.get_param('~serial_baudrate',9600)
try:
serial_com = serial.Serial(port,baud,timeout=0.02)
print 'serial ',port, ' open succeed at ',baud
except Exception,e:
print 'serial ',port, ' open failed at ',baud
print e
raise Exception
time.sleep(2)
serial_com.write('begin\r\n')
time.sleep(2)
serial_com.write('cd zen\r\n')
time.sleep(2)
print serial_com.readline()
print serial_com.readline()
print serial_com.readline()
print serial_com.readline()
serial_com.flushInput()
rf_upper_leg = rospy.Subscriber('rf_upper_joint_position_controller/command',Float64,callback,callback_args=0,queue_size=1)
rf_lower_leg = rospy.Subscriber('rf_lower_joint_position_controller/command',Float64,callback,callback_args=1,queue_size=1)
lf_upper_leg = rospy.Subscriber('lf_upper_joint_position_controller/command',Float64,callback,callback_args=2,queue_size=1)
lf_lower_leg = rospy.Subscriber('lf_lower_joint_position_controller/command',Float64,callback,callback_args=3,queue_size=1)
rb_upper_leg = rospy.Subscriber('rb_upper_joint_position_controller/command',Float64,callback,callback_args=6,queue_size=1)
rb_lower_leg = rospy.Subscriber('rb_lower_joint_position_controller/command',Float64,callback,callback_args=7,queue_size=1)
lb_upper_leg = rospy.Subscriber('lb_upper_joint_position_controller/command',Float64,callback,callback_args=4,queue_size=1)
lb_lower_leg = rospy.Subscriber('lb_lower_joint_position_controller/command',Float64,callback,callback_args=5,queue_size=1)
rospy.spin()
if __name__ == '__main__':
try:
print 'bring up a dog'
main()
except rospy.ROSInterruptException:
sys.exit()
| 2.5
| 2
|
train.py
|
don6105/OCR-Captcha-Recognition
| 0
|
12783723
|
<filename>train.py
#!/usr/bin/python3
import cv2
import numpy as np
import os
import pickle
import process_img
import download_img
img_area = 40 * 40
download_img.run('https://www.yiqifa.com/front/common/getcode')
process_img.run('img', 'char')
filenames = os.listdir("label")
samples = np.empty((0, img_area))
labels = []
for filename in filenames:
filepath = os.path.join("label", filename)
label = filename.split(".")[0].split("_")[-1]
labels.append(label)
im = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
sample = im.reshape((1, img_area)).astype(np.float32)
samples = np.append(samples, sample, 0)
samples = samples.astype(np.float32)
unique_labels = list(set(labels))
unique_ids = list(range(len(unique_labels)))
label_id_map = dict(zip(unique_labels, unique_ids))
id_label_map = dict(zip(unique_ids, unique_labels))
label_ids = list(map(lambda x: label_id_map[x], labels))
label_ids = np.array(label_ids).reshape((-1, 1)).astype(np.float32)
model = cv2.ml.KNearest_create()
model.train(samples, cv2.ml.ROW_SAMPLE, label_ids)
model.save('model.xml')
print('training finish. save as model.xml')
# 序列化变量到文件中
f = open('id_label_map.txt', 'wb')
pickle.dump(id_label_map, f)
f.close()
| 2.96875
| 3
|
tests/test_instrumentation/test_trace_back.py
|
piotrmaslanka/satella
| 12
|
12783724
|
import io
import pickle
import sys
import unittest
from satella.instrumentation import Traceback
class TestTraceback(unittest.TestCase):
def test_no_exc(self):
tb = Traceback()
byte = io.BytesIO()
byte2 = tb.pickle()
tb.pickle_to(byte)
byte.seek(0)
tb2 = Traceback.from_pickle(byte)
tb3 = Traceback.from_pickle(byte2)
self.assertEqual(tb, tb2)
self.assertEqual(tb2, tb3)
def test_json(self):
try:
raise ValueError(u'hello')
except ValueError:
tb = Traceback()
js = tb.to_json()
self.assertEqual(tb, Traceback.from_json(js))
def test_unserialize_error(self):
a = 'test'
b = pickle.dumps(a)
self.assertRaises(ValueError, lambda: Traceback.from_pickle(b))
def test_tb(self):
try:
loc = u'hello world'
raise ValueError(u'hello')
except ValueError:
tb = Traceback()
p_fmt = tb.pretty_format()
else:
self.fail('exception not raised')
self.assertTrue(p_fmt)
def test_issue_21(self):
try:
loc = u'hello world'
raise ValueError(u'hello')
except ValueError:
tb = Traceback()
a = tb.pickle()
self.assertIsInstance(pickle.loads(a), Traceback)
def test_normal_stack_frames(self):
tb = Traceback(list(sys._current_frames().values())[0])
tb.pretty_format()
def test_compression_happens(self):
try:
loc = ' ' * (10 * 1024 * 1024)
raise ValueError('hello')
except ValueError:
tb = Traceback()
self.assertLess(len(pickle.dumps(tb, -1)), 9 * 1024 * 1024)
| 2.4375
| 2
|
dAAMs/lda.py
|
yuxiang-zhou/DenseDeformableModel
| 0
|
12783725
|
<gh_stars>0
from numpy import linalg
from menpo.visualize import print_dynamic
from sklearn.utils.fixes import bincount
import itertools
import functools
import warnings
from sklearn.utils import check_X_y, check_array
from sklearn.utils.extmath import safe_sparse_dot
import numpy as np
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like, shape (n_features,)
Class means.
"""
means = []
classes = np.unique(y)
for group in classes:
Xg = X[y == group, :]
means.append(Xg.mean(0))
return np.asarray(means)
def lda(X, y, tol=0.00001, n_components=None):
"""SVD solver.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
X,y = check_X_y(X,y)
_, y_t = np.unique(y, return_inverse=True)
priors_ = bincount(y_t) / float(len(y))
classes_ = np.unique(y)
n_samples, n_features = X.shape
n_classes = len(classes_)
print_dynamic('Calculate Class Mean')
means_ = _class_means(X, y)
Xc = []
for idx, group in enumerate(classes_):
Xg = X[y == group, :]
Xc.append(Xg - means_[idx])
xbar_ = np.dot(priors_, means_)
Xc = np.concatenate(Xc, axis=0)
print_dynamic('# 1) within (univariate) scaling by with classes std-dev')
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
print_dynamic('# 2) Within variance scaling')
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, V = linalg.svd(X, full_matrices=False)
rank = np.sum(S > tol)
if rank < n_features:
warnings.warn("Variables are collinear.")
# Scaling of within covariance is: V' 1/S
scalings = (V[:rank] / std).T / S[:rank]
print_dynamic('# 3) Between variance scaling')
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * priors_) * fac)) *
(means_ - xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, V = linalg.svd(X, full_matrices=0)
rank = np.sum(S > tol * S[0])
scalings_ = np.dot(scalings, V.T[:, :rank])
coef = np.dot(means_ - xbar_, scalings_)
intercept_ = (-0.5 * np.sum(coef ** 2, axis=1)
+ np.log(priors_))
coef_ = np.dot(coef, scalings_.T)
intercept_ -= np.dot(xbar_, coef_.T)
return intercept_, coef_, classes_
def n_fold_generate(data, n_fold=4):
it = itertools.groupby(data, lambda x: x[0])
folded_data = [[] for i in range(n_fold)]
for grp in it:
for j,d in enumerate(chunk(list(grp[1]), n_fold)):
folded_data[j].append(d)
fdata = [functools.reduce(lambda x,y: x+y, f) for f in folded_data]
return fdata
def chunk(seq, num):
np.random.shuffle(seq)
avg = len(seq) / float(num)
out = []
last = 0.0
while last < len(seq):
out.append(seq[int(last):int(last + avg)])
last += avg
return out
def decision_function(X, intercept_, coef_):
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
scores = safe_sparse_dot(X, coef_.T, dense_output=True) + intercept_
scores = scores.ravel() if scores.shape[1] == 1 else scores
return scores
def predict(X, intercept_, coef_, classes_):
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
scores = safe_sparse_dot(X, coef_.T, dense_output=True) + intercept_
scores = scores.ravel() if scores.shape[1] == 1 else scores
if len(scores.shape) == 1:
indices = (scores > 0).astype(np.int)
else:
indices = scores.argmax(axis=1)
return classes_[indices]
| 2.375
| 2
|
export_util.py
|
abhinavsp0730/medium-to-fastpages
| 0
|
12783726
|
def save_md(text, path, filename):
with open(path + '/'+filename+'.md','w') as file:
file.write(text)
| 2.71875
| 3
|
example/messages/conf.py
|
jpush/jmessage-api-python-client
| 12
|
12783727
|
<filename>example/messages/conf.py
# please put your app_key and master_secret here
app_key = u'6be9204c30b9473e87bad4dc'
master_secret = u'<KEY>'
| 1.039063
| 1
|
fastapi/api/v1/async.py
|
zhangnian/fastapi
| 33
|
12783728
|
<gh_stars>10-100
from flask import Blueprint
from fastapi.utils.http_util import render_ok
from fastapi.worker.tasks import async_add
bp = Blueprint("async", __name__, url_prefix='/async')
@bp.route('/add')
def add():
future = async_add.delay(1, 2)
return render_ok({'task_id': future.id})
@bp.route('/status/<task_id>')
def status(task_id):
future = async_add.AsyncResult(task_id)
if not future.ready():
return render_ok({'status': 'pending'})
return render_ok({'result': future.result})
| 2.40625
| 2
|
main.py
|
waterpepene/SpotiBar
| 0
|
12783729
|
<reponame>waterpepene/SpotiBar<filename>main.py
from functions import *
from GUI import *
from math import floor
from datetime import timedelta
sp = authentication()
visibility = True # visibility of the window
def spotiBar():
def songDuration():
durationms = int(track["item"]["duration_ms"])
return str(timedelta(0, 0, 0, durationms))[2:].split(".")[0]
def progressTime(): # These functions convert time from ms to
progress = int(track["progress_ms"]) # M:S format to be shown on the start and
# end labels
return str(timedelta(0, 0, 0, progress))[2:].split(".")[0]
def barProgress():
song_seconds = floor(int(track["item"]["duration_ms"]) / 1000)
current_seconds = floor(int(track["progress_ms"]) / 1000) # This function updates the progress bar
# but it is crappy so i'll look for
updateProgressBar(song_seconds, current_seconds) # better ways
while True:
event, values = overlayW.read(timeout=500, timeout_key="update")
toggleWindowVisibility(visibility)
try:
track = sp.currently_playing()
imageurl = track["item"]["album"]["images"][1]["url"] # variables about currently playing song
songImage(imageurl)
songname, artistname = updateSongName(track), track['item']['album']['artists'][0]['name']
except TypeError:
pass
if event in "update":
try:
barProgress()
updateText("songname", songname + "\n" + artistname) # All these update the elements on the
updateText("-start-", progressTime()) # screen
updateText("-end-", songDuration())
updateSound(sp.current_playback()["device"]["volume_percent"])
sp.volume(slider(values))
overlayW["chosenplaylists"].update(values=readChosenPlaylists())
playUserChosenPlaylist(sp, values)
is_liked = checkIfCurrentSongIsLiked(sp)
updatePlayButton(track, sp)
except TypeError:
updateText("songname", "Start a song on\nSpotify to start.")
if event == "-next-":
sp.next_track()
updateText("songname", songname + "\n" + artistname)
updateText("-end-", songDuration())
if event == "-previous-":
sp.previous_track()
updateText("songname", songname + "\n" + artistname)
if event == "-play-":
sp.start_playback()
if event == "-pause-":
sp.pause_playback()
if event == "-sound1-" or event == "-soundmax-":
current_volume = sp.current_playback()["device"]["volume_percent"]
sp.volume(0)
updateVisibility("-soundmuted-", True, "-sound1-", "-soundmax-")
if event == "-soundmuted-":
sp.volume(current_volume)
if current_volume >= 70:
updateVisibility("-soundmax-", True)
else:
updateVisibility("-sound1-", True, "-soundmax-")
updateVisibility("-soundmuted-", False)
if event == "settings":
uris = playlistsToJSON(sp)
playlist_names = readPlJson()
names_chosen = playlistWin(playlist_names) # This updates the playlist names
chosen_playlists = {}
for item in names_chosen:
if item in playlist_names:
chosen_playlists[item] = uris[item]
writeChosenPlaylists(chosen_playlists)
if event == "liked":
addOrRemoveLikedSong(sp)
if event == "notliked":
addOrRemoveLikedSong(sp)
hideSpotify()
spotiBar()
| 2.84375
| 3
|
1390/main.py
|
pauvrepetit/leetcode
| 0
|
12783730
|
<gh_stars>0
# 1390. 四因数
#
# 20200729
# huao
# 就检查每个数 如果满足条件就加进去就ok
from typing import List
from math import sqrt
class Solution:
def sumFourDivisors(self, nums: List[int]) -> int:
sum = 0
for num in nums:
sum += self.check(num)
return sum
def check(self, num: int) -> bool:
count = 2
sum = 1 + num
for i in range(2, int(sqrt(num)) + 1):
if num % i == 0 and num // i != i:
count += 2
sum += i + num // i
if count > 4:
return 0
elif num % i == 0 and num // i == i:
return 0
if count == 4:
return sum
else:
return 0
sol = Solution()
print(sol.sumFourDivisors([21, 1, 2, 3, 4]))
| 3.328125
| 3
|
data_ingestion_service/migrations/0002_storedfiles_category.py
|
girleffect/core-data-ingestion-service
| 0
|
12783731
|
# Generated by Django 2.1.1 on 2019-01-16 09:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('data_ingestion_service', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='storedfiles',
name='category',
field=models.CharField(blank=True, choices=[('offline_survey_data', 'Offline Survey Data')], max_length=255, null=True),
),
]
| 1.734375
| 2
|
ampgo --username andrea.gavana@gmail.com/go_amp.py
|
andyfaff/amgo
| 9
|
12783732
|
from __future__ import print_function
import numpy
OPENOPT = SCIPY = True
try:
from openopt import NLP
except ImportError:
OPENOPT = False
try:
from scipy.optimize import minimize
except ImportError:
SCIPY = False
SCIPY_LOCAL_SOLVERS = ['Nelder-Mead', 'Powell', 'L-BFGS-B', 'TNC', 'SLSQP']
OPENOPT_LOCAL_SOLVERS = ['bobyqa', 'ptn', 'slmvm2', 'ralg', 'mma', 'auglag', 'sqlcp']
def AMPGO(objfun, x0, args=(), local='L-BFGS-B', local_opts=None, bounds=None, maxfunevals=None,
totaliter=20, maxiter=5, glbtol=1e-5, eps1=0.02, eps2=0.1, tabulistsize=5,
tabustrategy='farthest', fmin=-numpy.inf, disp=None):
"""
Finds the global minimum of a function using the AMPGO (Adaptive Memory Programming for
Global Optimization) algorithm.
:param `objfun`: Function to be optimized, in the form ``f(x, *args)``.
:type `objfun`: callable
:param `args`: Additional arguments passed to `objfun`.
:type `args`: tuple
:param `local`: The local minimization method (e.g. ``"L-BFGS-B"``). It can be one of the available
`scipy` local solvers or `OpenOpt` solvers.
:type `local`: string
:param `bounds`: A list of tuples specifying the lower and upper bound for each independent variable
[(`xl0`, `xu0`), (`xl1`, `xu1`), ...]
:type `bounds`: list
:param `maxfunevals`: The maximum number of function evaluations allowed.
:type `maxfunevals`: integer
:param `totaliter`: The maximum number of global iterations allowed.
:type `totaliter`: integer
:param `maxiter`: The maximum number of `Tabu Tunnelling` iterations allowed during each global iteration.
:type `maxiter`: integer
:param `glbtol`: The optimization will stop if the absolute difference between the current minimum objective
function value and the provided global optimum (`fmin`) is less than `glbtol`.
:type `glbtol`: float
:param `eps1`: A constant used to define an aspiration value for the objective function during the Tunnelling phase.
:type `eps1`: float
:param `eps2`: Perturbation factor used to move away from the latest local minimum at the start of a Tunnelling phase.
:type `eps2`: float
:param `tabulistsize`: The size of the tabu search list (a circular list).
:type `tabulistsize`: integer
:param `tabustrategy`: The strategy to use when the size of the tabu list exceeds `tabulistsize`. It can be
'oldest' to drop the oldest point from the tabu list or 'farthest' to drop the element farthest from
the last local minimum found.
:type `tabustrategy`: string
:param `fmin`: If known, the objective function global optimum value.
:type `fmin`: float
:param `disp`: If zero or defaulted, then no output is printed on screen. If a positive number, then status
messages are printed.
:type `disp`: integer
:returns: A tuple of 5 elements, in the following order:
1. **best_x** (`array_like`): the estimated position of the global minimum.
2. **best_f** (`float`): the value of `objfun` at the minimum.
3. **evaluations** (`integer`): the number of function evaluations.
4. **msg** (`string`): a message describes the cause of the termination.
5. **tunnel_info** (`tuple`): a tuple containing the total number of Tunnelling phases performed and the
successful ones.
:rtype: `tuple`
The detailed implementation of AMPGO is described in the paper
"Adaptive Memory Programming for Constrained Global Optimization" located here:
http://leeds-faculty.colorado.edu/glover/fred%20pubs/416%20-%20AMP%20(TS)%20for%20Constrained%20Global%20Opt%20w%20Lasdon%20et%20al%20.pdf
Copyright 2014 <NAME>
"""
if local not in SCIPY_LOCAL_SOLVERS + OPENOPT_LOCAL_SOLVERS:
raise Exception('Invalid local solver selected: %s'%local)
if local in SCIPY_LOCAL_SOLVERS and not SCIPY:
raise Exception('The selected solver %s is not available as there is no scipy installation'%local)
if local in OPENOPT_LOCAL_SOLVERS and not OPENOPT:
raise Exception('The selected solver %s is not available as there is no OpenOpt installation'%local)
x0 = numpy.atleast_1d(x0)
n = len(x0)
if bounds is None:
bounds = [(None, None)] * n
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
low = [0]*n
up = [0]*n
for i in range(n):
if bounds[i] is None:
l, u = -numpy.inf, numpy.inf
else:
l, u = bounds[i]
if l is None:
low[i] = -numpy.inf
else:
low[i] = l
if u is None:
up[i] = numpy.inf
else:
up[i] = u
if maxfunevals is None:
maxfunevals = max(100, 10*len(x0))
if tabulistsize < 1:
raise Exception('Invalid tabulistsize specified: %s. It should be an integer greater than zero.'%tabulistsize)
if tabustrategy not in ['oldest', 'farthest']:
raise Exception('Invalid tabustrategy specified: %s. It must be one of "oldest" or "farthest"'%tabustrategy)
iprint = 50
if disp is None or disp <= 0:
disp = 0
iprint = -1
low = numpy.asarray(low)
up = numpy.asarray(up)
tabulist = []
best_f = numpy.inf
best_x = x0
global_iter = 0
all_tunnel = success_tunnel = 0
evaluations = 0
if glbtol < 1e-8:
local_tol = glbtol
else:
local_tol = 1e-8
while 1:
if disp > 0:
print('\n')
print('='*72)
print('Starting MINIMIZATION Phase %-3d'%(global_iter+1))
print('='*72)
if local in OPENOPT_LOCAL_SOLVERS:
problem = NLP(objfun, x0, lb=low, ub=up, maxFunEvals=max(1, maxfunevals), ftol=local_tol, iprint=iprint)
problem.args = args
results = problem.solve(local)
xf, yf, num_fun = results.xf, results.ff, results.evals['f']
else:
options = {'maxiter': max(1, maxfunevals), 'disp': disp}
if local_opts is not None:
options.update(local_opts)
res = minimize(objfun, x0, args=args, method=local, bounds=bounds, tol=local_tol, options=options)
xf, yf, num_fun = res['x'], res['fun'], res['nfev']
maxfunevals -= num_fun
evaluations += num_fun
if yf < best_f:
best_f = yf
best_x = xf
if disp > 0:
print('\n\n ==> Reached local minimum: %s\n'%yf)
if best_f < fmin + glbtol:
if disp > 0:
print('='*72)
return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel)
if maxfunevals <= 0:
if disp > 0:
print('='*72)
return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel)
tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy)
tabulist.append(xf)
i = improve = 0
while i < maxiter and improve == 0:
if disp > 0:
print('-'*72)
print('Starting TUNNELLING Phase (%3d-%3d)'%(global_iter+1, i+1))
print('-'*72)
all_tunnel += 1
r = numpy.random.uniform(-1.0, 1.0, size=(n, ))
beta = eps2*numpy.linalg.norm(xf)/numpy.linalg.norm(r)
if numpy.abs(beta) < 1e-8:
beta = eps2
x0 = xf + beta*r
x0 = numpy.where(x0 < low, low, x0)
x0 = numpy.where(x0 > up , up , x0)
aspiration = best_f - eps1*(1.0 + numpy.abs(best_f))
tunnel_args = tuple([objfun, aspiration, tabulist] + list(args))
if local in OPENOPT_LOCAL_SOLVERS:
problem = NLP(tunnel, x0, lb=low, ub=up, maxFunEvals=max(1, maxfunevals), ftol=local_tol, iprint=iprint)
problem.args = tunnel_args
results = problem.solve(local)
xf, yf, num_fun = results.xf, results.ff, results.evals['f']
else:
options = {'maxiter': max(1, maxfunevals), 'disp': disp}
if local_opts is not None:
options.update(local_opts)
res = minimize(tunnel, x0, args=tunnel_args, method=local, bounds=bounds, tol=local_tol, options=options)
xf, yf, num_fun = res['x'], res['fun'], res['nfev']
maxfunevals -= num_fun
evaluations += num_fun
yf = inverse_tunnel(xf, yf, aspiration, tabulist)
if yf <= best_f + glbtol:
oldf = best_f
best_f = yf
best_x = xf
improve = 1
success_tunnel += 1
if disp > 0:
print('\n\n ==> Successful tunnelling phase. Reached local minimum: %s < %s\n'%(yf, oldf))
if best_f < fmin + glbtol:
return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel)
i += 1
if maxfunevals <= 0:
return best_x, best_f, evaluations, 'Maximum number of function evaluations exceeded', (all_tunnel, success_tunnel)
tabulist = drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy)
tabulist.append(xf)
if disp > 0:
print('='*72)
global_iter += 1
x0 = xf.copy()
if global_iter >= totaliter:
return best_x, best_f, evaluations, 'Maximum number of global iterations exceeded', (all_tunnel, success_tunnel)
if best_f < fmin + glbtol:
return best_x, best_f, evaluations, 'Optimization terminated successfully', (all_tunnel, success_tunnel)
def drop_tabu_points(xf, tabulist, tabulistsize, tabustrategy):
if len(tabulist) < tabulistsize:
return tabulist
if tabustrategy == 'oldest':
tabulist.pop(0)
else:
distance = numpy.sqrt(numpy.sum((tabulist-xf)**2, axis=1))
index = numpy.argmax(distance)
tabulist.pop(index)
return tabulist
def tunnel(x0, *args):
objfun, aspiration, tabulist = args[0:3]
fun_args = ()
if len(args) > 3:
fun_args = tuple(args[3:])
numerator = (objfun(x0, *fun_args) - aspiration)**2
denominator = 1.0
for tabu in tabulist:
denominator = denominator*numpy.sqrt(numpy.sum((x0 - tabu)**2))
ytf = numerator/denominator
return ytf
def inverse_tunnel(xtf, ytf, aspiration, tabulist):
denominator = 1.0
for tabu in tabulist:
denominator = denominator*numpy.sqrt(numpy.sum((xtf - tabu)**2))
numerator = ytf*denominator
yf = aspiration + numpy.sqrt(ytf*denominator)
return yf
if __name__ == '__main__':
import os
import go_benchmark
os.system('cls')
for tests in ['Bird']:
klass = getattr(go_benchmark, tests)()
x0 = klass.generator()
fmin = klass.fglob
bounds = klass.bounds
tolfun = 1e-6
xf, yf, fun_evals, msg, tt = AMPGO(klass.evaluator, x0, args=(), local='L-BFGS-B', bounds=bounds,
maxfunevals=20000, totaliter=2000, maxiter=5, eps1=0.02, eps2=0.1,
tabulistsize=5, tabustrategy='farthest', fmin=fmin, disp=1, glbtol=tolfun)
xb = numpy.asarray(klass.global_optimum)
if xb.ndim == 2:
xb = xb[0, :]
print('\n\n')
print('F_glob :', klass.evaluator(xb))
print('F_best :', yf)
print('X_best :', xf)
print('F_evals:', fun_evals)
print('Message:', msg)
print('Tunnels:', tt)
| 2.375
| 2
|
setup.py
|
harmsm/topiary
| 0
|
12783733
|
<filename>setup.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import os
import sys
from shutil import rmtree
from setuptools import find_packages, setup, Command, Extension
# Package meta-data.
NAME = 'topiary'
DESCRIPTION = 'A lightweight python package using pandas dataframes for phylogenetics.'
URL = 'https://github.com/harmslab/topiary'
EMAIL = '<EMAIL>'
AUTHOR = '<NAME>'
REQUIRES_PYTHON = '>=3.7.0'
VERSION = None
# What packages are required for this module to be executed?
REQUIRED = [
"biopython>=1.79",
"ete3>=3.1.2",
"opentree>=1.0.1",
"tqdm>=4.61.2",
"dendropy>=4.5.2",
"numpy>=1.21.1",
"pandas>=1.3.1",
"matplotlib>=3.4.2",
"pastml>=1.9.34",
]
here = os.path.abspath(os.path.dirname(__file__))
# Import the README and use it as the long-description.
# Note: this will only work if 'README.md' is present in your MANIFEST.in file!
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
if not VERSION:
with open(os.path.join(here, NAME, '__version__.py')) as f:
exec(f.read(), about)
else:
about['__version__'] = VERSION
# Where the magic happens:
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type='text/markdown',
author=AUTHOR,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
packages=find_packages(exclude=('tests',)),
install_requires=REQUIRED,
extras_require = {
'test': ['pytest'],
},
scripts=['bin/run-raxml','bin/setup-generax'],
include_package_data=True,
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
keywords='phylogenetics ASR'
)
| 1.96875
| 2
|
hello/migrations/0001_initial.py
|
fadeawaygod/DjangoTemplate
| 0
|
12783734
|
<reponame>fadeawaygod/DjangoTemplate
# Generated by Django 3.2.3 on 2021-05-21 01:48
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=20, verbose_name='title')),
('content', models.CharField(max_length=200, verbose_name='content')),
],
),
]
| 1.640625
| 2
|
car-controller/src/mainController/Controller/TrajectoryPlanning/TrajectoryPlanning.py
|
iisys-hof/autonomous-driving
| 0
|
12783735
|
# @PascalPuchtler
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import math
import sys
import time
import numpy as np
from planar import Polygon
from Controller.AreaMap.MapPoint import MapPoint
from Controller.MoveController.CarModel import CarModel
from .NearestNeighbor import NearestNeighbor
from .SupportPointChain import SupportPointChain
class TrajectoryPlanning:
def __init__(self, areaMap, emergencyStopQueue):
self.areaMap = areaMap
self.emergencyStopQueue = emergencyStopQueue
self.carModel = CarModel()
self.nearestNeighbor = NearestNeighbor()
self.supportPointChain = SupportPointChain()
self.reset()
self.maxDriveableSlope = 3
self.normalMode = True
self.minImageCount = 6
self.imageCount = 0
def reset(self):
self.newestSupportChain = []
self.callculatedNextMove = None
def nextMove(self):
self.imageCount +=1
if self.minImageCount > self.imageCount:
self.callculatedNextMove = {'x': 0, 'y': 0, 'm': 0}
return self.callculatedNextMove
nextMove = self.handleNextMove()
if not self.emergencyStopQueue.empty():
print('emergeny mode')
self.emergencyStopQueue.get()
self.normalMode = False
elif self.normalMode is False and nextMove is not None:
self.normalMode = True
print('reset Mode')
return {'command': 'resetSavety'}
if self.normalMode:
if nextMove is not None:
self.callculatedNextMove = nextMove
return nextMove
self.callculatedNextMove = {'x': 0, 'y': 0, 'm': 0}
return {'x': 0, 'y': 0, 'm': 0}
else:
self.callculatedNextMove = {'x': 0, 'y': 0, 'm': 0}
self.areaMap.reset()
self.imageCount=0
return {'x': 0, 'y': 0, 'm': 0}
def handleNextMove(self):
if not self.areaMap.isBorderAvailable():
# print('no border available')
return None
supportPoints = self.nearestNeighbor.getNearestNeighbor(self.areaMap.left, self.areaMap.right)
supportPointChain = self.supportPointChain.getSupportPointChain(supportPoints, self.areaMap.robotPosition)
self.newestSupportChain = supportPointChain
if len(supportPointChain)<=1:
print('no possible target in drive direction')
return None
nextMove = self.callculateNextTarget(self.areaMap.robotPosition, supportPointChain)
return nextMove
def callculateNextTarget(self,robotPosition, supportPointChain):
nextPoint = supportPointChain[1]
offsetNextPoint = robotPosition.getRelativeOffsetsToPoint(nextPoint[0],nextPoint[1])
if len(supportPointChain) >= 3:
secondPoint = supportPointChain[2]
offsetSecondPoint = robotPosition.getRelativeOffsetsToPoint(secondPoint[0],secondPoint[1])
slope = self.slope(offsetNextPoint, offsetSecondPoint)
if offsetNextPoint[1] < offsetSecondPoint[1]:
slope = -slope
else:
slope = 0
return {'x': offsetNextPoint[1], 'y': -offsetNextPoint[0], 'm': slope/2}
def slope(self, point1, point2):
m = (point2[0]-point1[0])/(point2[1]-point1[1])
m= np.clip(m, -self.maxDriveableSlope,self.maxDriveableSlope)
return m
| 2.140625
| 2
|
detect_mask_video.py
|
nehabheemisetty/Face-mask-detection-
| 0
|
12783736
|
<filename>detect_mask_video.py
# USAGE
# python detect_mask_video.py
# import the necessary packages
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model
from imutils.video import VideoStream
import numpy as np
import argparse
import imutils
import time
import cv2
import os
from skimage.metrics import structural_similarity as ssim
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import cv2
def match():
original = cv2.imread("data/original.JPG")
original = cv2.resize(original, (200,200))
# convert the images to grayscale
original = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
# In[5]:
dataset= pd.read_csv('data.csv',sep=',')
data = dataset.iloc[:, :]
data
x = data.iloc[:, :-1].values
d = dataset.iloc[:, 2]
#print(d[2])
# In[6]:
values = [];
for i in range(0,len(d)):
image = cv2.imread(str(d[i]))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
values.append(compare_images(original, image, "Original vs."+str(d[i])) )
min=values[0]
for i in range(1,len(d)):
if min>values[i]:
min=i
print(x[min])
fig = plt.figure("Match")
#plt.suptitle("MSE: %.2f, SSIM: %.2f" % (m, s))
#plt.suptitle(x[min])
plt.suptitle(str(x[min]))
#print(title);
# show first image
ax = fig.add_subplot(1, 2, 1)
plt.imshow(original, cmap = plt.cm.gray)
plt.axis("off")
# show the second image
ax = fig.add_subplot(1, 2, 2)
image = cv2.imread(str(d[min]))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plt.imshow(image, cmap = plt.cm.gray)
plt.axis("off")
# show the images
plt.show()
return str(x[min])
# In[2]:
def mse(imageA, imageB):
err = np.sum((imageA.astype("float") - imageB.astype("float")) ** 2)
err /= float(imageA.shape[0] * imageA.shape[1])
return err
# In[3]:
def compare_images(imageA, imageB, title):
m = mse(imageA, imageB)
s = ssim(imageA, imageB)
# setup the figure
# fig = plt.figure(title)
#plt.suptitle("MSE: %.2f, SSIM: %.2f" % (m, s))
#print(title);
# show first image
# ax = fig.add_subplot(1, 2, 1)
#plt.imshow(imageA, cmap = plt.cm.gray)
#plt.axis("off")
# show the second image
#ax = fig.add_subplot(1, 2, 2)
#plt.imshow(imageB, cmap = plt.cm.gray)
#plt.axis("off")
# show the images
# plt.show()
return m
def detect_and_predict_mask(frame, faceNet, maskNet):
# grab the dimensions of the frame and then construct a blob
# from it
(h, w) = frame.shape[:2]
blob = cv2.dnn.blobFromImage(frame, 1.0, (300, 300),
(104.0, 177.0, 123.0))
# pass the blob through the network and obtain the face detections
faceNet.setInput(blob)
detections = faceNet.forward()
# initialize our list of faces, their corresponding locations,
# and the list of predictions from our face mask network
faces = []
locs = []
preds = []
# loop over the detections
for i in range(0, detections.shape[2]):
# extract the confidence (i.e., probability) associated with
# the detection
confidence = detections[0, 0, i, 2]
# filter out weak detections by ensuring the confidence is
# greater than the minimum confidence
if confidence > args["confidence"]:
# compute the (x, y)-coordinates of the bounding box for
# the object
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])
(startX, startY, endX, endY) = box.astype("int")
# ensure the bounding boxes fall within the dimensions of
# the frame
(startX, startY) = (max(0, startX), max(0, startY))
(endX, endY) = (min(w - 1, endX), min(h - 1, endY))
# extract the face ROI, convert it from BGR to RGB channel
# ordering, resize it to 224x224, and preprocess it
face = frame[startY:endY, startX:endX]
face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
face = cv2.resize(face, (224, 224))
face = img_to_array(face)
face = preprocess_input(face)
# add the face and bounding boxes to their respective
# lists
faces.append(face)
locs.append((startX, startY, endX, endY))
# only make a predictions if at least one face was detected
if len(faces) > 0:
# for faster inference we'll make batch predictions on *all*
# faces at the same time rather than one-by-one predictions
# in the above `for` loop
faces = np.array(faces, dtype="float32")
preds = maskNet.predict(faces, batch_size=32)
# return a 2-tuple of the face locations and their corresponding
# locations
return (locs, preds)
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--face", type=str,
default="face_detector",
help="path to face detector model directory")
ap.add_argument("-m", "--model", type=str,
default="mask_detector.model",
help="path to trained face mask detector model")
ap.add_argument("-c", "--confidence", type=float, default=0.5,
help="minimum probability to filter weak detections")
args = vars(ap.parse_args())
# load our serialized face detector model from disk
print("[INFO] loading face detector model...")
prototxtPath = os.path.sep.join([args["face"], "deploy.prototxt"])
weightsPath = os.path.sep.join([args["face"],
"res10_300x300_ssd_iter_140000.caffemodel"])
faceNet = cv2.dnn.readNet(prototxtPath, weightsPath)
# load the face mask detector model from disk
print("[INFO] loading face mask detector model...")
maskNet = load_model(args["model"])
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 400 pixels
frame = vs.read()
frame = imutils.resize(frame, width=400)
# detect faces in the frame and determine if they are wearing a
# face mask or not
(locs, preds) = detect_and_predict_mask(frame, faceNet, maskNet)
# loop over the detected face locations and their corresponding
# locations
i=0
for (box, pred) in zip(locs, preds):
# unpack the bounding box and predictions
(startX, startY, endX, endY) = box
(mask, withoutMask) = pred
# determine the class label and color we'll use to draw
# the bounding box and text
label = "Mask" if mask > withoutMask else "No Mask"
color = (0, 255, 0) if label == "Mask" else (0, 0, 255)
capture = 1 if label=="Mask" else 0
# include the probability in the label
label = "{}: {:.2f}%".format(label, max(mask, withoutMask) * 100)
# display the label and bounding box rectangle on the output
# frame
cv2.putText(frame, label, (startX, startY - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)
cv2.rectangle(frame, (startX, startY), (endX, endY), color, 2)
# show the output frame
cv2.imshow("Frame", frame)
if capture== 0 :
print("The details of the person who is not wearing the mask are:")
name = './data/original'+'.JPG'
i+=1
cv2.imwrite(name,frame)
person=match()
print(person)
''' # In[4]:
# load the images -- the original, the original + contrast,
# and the original + photoshop
original = cv2.imread("data/original.JPG")
original = cv2.resize(original, (200,200))
# convert the images to grayscale
original = cv2.cvtColor(original, cv2.COLOR_BGR2GRAY)
# In[5]:
dataset= pd.read_csv('data.csv',sep=',')
data = dataset.iloc[:, :]
data
x = data.iloc[:, :-1].values
d = dataset.iloc[:, 2]
print(d[2])
# In[6]:
values = [];
for i in range(0,len(d)):
image = cv2.imread(str(d[i]))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
values.append(compare_images(original, image, "Original vs."+str(d[i])) )
min=values[0]
for i in range(1,len(d)):
if min>values[i]:
min=i
print(x[min])
fig = plt.figure("Match")
#plt.suptitle("MSE: %.2f, SSIM: %.2f" % (m, s))
#plt.suptitle(x[min])
plt.suptitle(str(x[min]))
#print(title);
# show first image
ax = fig.add_subplot(1, 2, 1)
plt.imshow(original, cmap = plt.cm.gray)
plt.axis("off")
# show the second image
ax = fig.add_subplot(1, 2, 2)
image = cv2.imread(str(d[min]))
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plt.imshow(image, cmap = plt.cm.gray)
plt.axis("off")
# show the images
plt.show()
#print(dict[min]);'''
#if(label=="No Mask"):
# cv2.imwrite("C:\Users\Neha\Desktop\Face-Mask-Detection-master\original.JPG")
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
| 2.796875
| 3
|
automator_v2.py
|
Zolhungaj/AMQ-List-Training-Tool
| 1
|
12783737
|
"""
This module downloads a lot of songs from anime music quiz
Dependencies:
ffmpeg
selenium
Firefox
geckodriver
"""
import os
import re
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
import time
import json
from pathlib import Path
import subprocess
import sqlite3
class Database:
def __init__(self, database_file):
self.database_file = database_file
conn = self.conn = sqlite3.connect(database_file)
conn.execute("""
CREATE TABLE IF NOT EXISTS downloaded(
source TEXT,
annSongId INTEGER
);""")
conn.commit()
def is_downloaded(self, song, source):
c = self.conn.cursor()
c.execute("""
SELECT source
FROM downloaded
WHERE source=(?) AND annSongId = (?)
""", (source, song["annSongId"],))
return c.fetchone() is not None
def add_downloaded(self, song, source):
self.conn.execute("""
INSERT INTO downloaded VALUES(?,?)
""", (source, song["annSongId"]))
self.conn.commit()
def update_anime_lists(driver, anilist="", kitsu=""):
driver.execute_script('document.getElementById("mpNewsContainer").innerHTML = "Updating AniList...";')
status = driver.find_element_by_id("mpNewsContainer")
driver.execute_script("""new Listener("anime list update result", function (result) {
if (result.success) {
document.getElementById("mpNewsContainer").innerHTML = "Updated Successful: " + result.message;
} else {
document.getElementById("mpNewsContainer").innerHTML = "Update Unsuccessful: " + result.message;
}
}).bindListener()""")
driver.execute_script("""
socket.sendCommand({
type: "library",
command: "update anime list",
data: {
newUsername: arguments[0],
listType: 'ANILIST'
}
});""", anilist)
while True:
if status.text != "Updating AniList...":
break
time.sleep(0.5)
driver.execute_script('document.getElementById("mpNewsContainer").innerHTML = "Updating Kitsu...";')
driver.execute_script("""
socket.sendCommand({
type: "library",
command: "update anime list",
data: {
newUsername: arguments[0],
listType: 'KITSU'
}
});""", kitsu)
while True:
if status.text != "Updating Kitsu...":
break
time.sleep(0.5)
def get_question_list(driver):
driver.execute_script('document.getElementById("mpNewsContainer").innerHTML = "Loading Expand...";')
script ="""new Listener("expandLibrary questions", function (payload) {
expandLibrary.tackyVariable = (JSON.stringify(payload.questions));
document.getElementById("mpNewsContainer").innerHTML = "Expand Loaded!"
}).bindListener();
socket.sendCommand({
type: "library",
command: "expandLibrary questions"
});"""
driver.execute_script(script)
status = driver.find_element_by_id("mpNewsContainer")
while True:
if status.text != "Loading Expand...":
break
time.sleep(0.5)
time.sleep(3)
pure_string = driver.execute_script('return expandLibrary.tackyVariable')
driver.execute_script('expandLibrary.tackyVariable = ""')
ret = json.loads(pure_string)
driver.execute_script('document.getElementById("mpNewsContainer").innerHTML = "";')
return ret
ffmpeg = "ffmpeg"
def main():
"""
the main function, where the magic happens
"""
with open("automator.config") as file:
data = file.readlines()
username = data[0][:-1]
password = data[1][:-1]
anilist = data[2][:-1]
kitsu = data[3][:-1]
global ffmpeg
ffmpeg = data[4][:-1]
outpath = data[5][:-1]
path = Path(__file__).parent.absolute()
if not outpath:
outpath = path.joinpath(Path('out'))
else:
outpath = Path(outpath)
driver = webdriver.Firefox(executable_path='geckodriver/geckodriver')
driver.get('https://animemusicquiz.com')
driver.find_element_by_id("loginUsername").send_keys(username)
driver.find_element_by_id("loginPassword").send_keys(password)
driver.find_element_by_id("loginButton").click()
time.sleep(10)
update_anime_lists(driver, anilist, kitsu)
questions = get_question_list(driver)
driver.execute_script("options.logout();")
driver.close()
database = Database("downloaded.db")
for question in questions:
annId = question["annId"]
name = question["name"]
songs = question["songs"]
for song in songs:
save(annId, name, song, outpath, database)
def save(annId, anime, song, outpath, database):
source_mp3 = song["examples"].get("mp3", None)
if not source_mp3:
return
if database.is_downloaded(song, source_mp3):
return
title = song["name"]
artist = song["artist"]
type = ["Unknown", "Opening", "Ending", "Insert"][song["type"]]
number = song["number"]
annSongId = song["annSongId"]
command = [
'"%s"' % ffmpeg,
"-y",
"-i", source_mp3,
"-vn",
"-c:a", "copy",
"-map_metadata", "-1",
"-metadata", 'title="%s"' % title,
"-metadata", 'artist="%s"' % artist,
"-metadata", 'track="%d"' % number,
"-metadata", 'disc="%d"' % song["type"],
"-metadata", 'genre="%s"' % type,
"-metadata", 'album="%s"' % anime,
'"%s"' % create_file_name(anime, type, number, title, artist, annId, annSongId, outpath)
]
execute_command(" ".join(command))
database.add_downloaded(song, source_mp3)
return True
def execute_command(command):
subprocess.call(command)
def create_file_name_Windows(animeTitle, songType, songNumber, songTitle, songArtist, annId, annSongId, path, allowance=255):
"""
Creates a windows-compliant filename by removing all bad characters
and maintaining the windows path length limit (which by default is 255)
"""
allowance -= len(str(path)) + 1 # by default, windows is sensitive to long total paths.
bad_characters = re.compile(r"\\|/|<|>|:|\"|\||\?|\*|&|\^|\$|" + '\0')
return create_file_name_common(animeTitle, songType, songNumber, songTitle, songArtist, annId, annSongId, path, bad_characters, allowance)
def create_file_name_POSIX(animeTitle, songType, songNumber, songTitle, songArtist, annId, annSongId, path, allowance=32767):
"""
Creates a POSIX-compliant filename by removing all bad characters
and maintaining the NTFS path length limit
"""
bad_characters = re.compile(r"/" + '\0')
return create_file_name_common(animeTitle, songType, songNumber, songTitle, songArtist, annId, annSongId, path, bad_characters, allowance)
def create_file_name_common(animeTitle, songType, songNumber, songTitle, songArtist, annId, annSongId, path, bad_characters, allowance=255):
if allowance > 255:
allowance = 255 # on most common filesystems, including NTFS a filename can not exceed 255 characters
# assign allowance for things that must be in the file name
allowance -= len(str(annId))
allowance -= len(str(annSongId))
allowance -= len("_-.mp3") # accounting for separators (-_) for annId annSongId, and .mp3
if allowance < 0:
raise ValueError("""It is not possible to give a reasonable file name, due to length limitations.
Consider changing location to somewhere with a shorter path.""")
# make sure that user input doesn't contain bad characters
animeTitle = bad_characters.sub("", animeTitle)
songType = bad_characters.sub('', songType)
songTitle = bad_characters.sub('', songTitle)
songArtist = bad_characters.sub('', songArtist)
song_number_string = ""
if songNumber:
song_number_string = "_" + str(songNumber)
ret = ""
for string in [animeTitle, songType + song_number_string, songTitle, songArtist]:
length = len(string)
if allowance - length < 0:
string = string[:allowance]
length = len(string)
ret += string
allowance -= length
if allowance - 1 > 1:
ret += "-"
else:
break
else:
ret = ret[:-1] # removes last "-"
ret = path.joinpath(Path(ret + "_" + str(annId) + "-" + str(annSongId) + ".mp3"))
return str(ret)
if os.name == "nt":
create_file_name = create_file_name_Windows
elif os.name == "posix":
create_file_name = create_file_name_POSIX
if __name__ == "__main__":
main()
| 2.84375
| 3
|
lib/sender/auth.py
|
JRS83/Osmedeus
| 8
|
12783738
|
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from lib.sender import send
from lib.core import utils
def login(options):
url = options.get('remote_api') + "/auth/api/token/"
body = {
"username": options.get('credentials')[0],
"password": options.get('credentials')[1]
}
r = send.send_post(url, body, is_json=True)
if r.json().get('access'):
utils.print_good("Authentication success")
jwt = '<PASSWORD> ' + r.json().get('access')
options['JWT'] = jwt
return options
utils.print_bad("Authentication failed")
return False
| 2.0625
| 2
|
project/editorial/views/platforms.py
|
cojennin/facet
| 25
|
12783739
|
""" Platform views for editorial app.
editorial/views/platformviews.py
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from braces.views import LoginRequiredMixin
from django.conf import settings
from django.core.urlresolvers import reverse
from django.views.generic import FormView
from editorial.forms import (
PlatformAccountFormSet,
)
# ----------------------------------------------------------------------#
# Platform Views
# ----------------------------------------------------------------------#
# ACCESS: Any user can edit their own platforms.
class UserPlatformAccountCreateView(LoginRequiredMixin, FormView):
"""Display formset to add social accounts to a user, organization or project."""
form_class = PlatformAccountFormSet
template_name = "editorial/platformaccounts_form.html"
def get_initial(self):
"""Pass user/organization to subform."""
# Older versions of Django don't have a nice way to pass things to forms within
# formsets except using initial data, so we shoe-horn it into here
return [{"user": self.request.user, 'organization': self.request.user.organization}]
def form_valid(self, form):
"""Save data."""
# One day, this may want to grow to handle deleting platform accounts, using a
# tick-to-delete. Or, with a newer Django version, this could move to extra_views,
# which has a nice built-in for formset editing.
for subform in form:
if subform.cleaned_data:
subform.save()
return super(UserPlatformAccountCreateView, self).form_valid(form)
def get_success_url(self):
"""Return to user profile."""
return reverse("user_edit", kwargs={"pk": self.request.user.id})
| 2.234375
| 2
|
quoraInsincere/read_data.py
|
JobQiu/Kaggle-NLP-Summary
| 1
|
12783740
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 27 00:52:19 2018
@author: xavier.qiu
"""
from common.load import *
from common.pd_util import *
from common.preprocess import *
from common.util import *
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np
import gc
import pickle
from tqdm import tqdm
class DataSet:
def __init__(self, embedding='glove', voc_len=105000, max_ques_len=72, cache=True):
"""
:param embedding:
"""
self.config = load_config()
self.embedding_type = embedding
self.voc_len = voc_len
self.max_ques_len = max_ques_len
if cache and os.path.exists(os.path.join(self.config["data_dir"], "y_train.pickle")):
with open(os.path.join(self.config["data_dir"], "x_train.pickle"), 'rb') as handle:
self.x_train = pickle.load(handle)
with open(os.path.join(self.config["data_dir"], "x_test.pickle"), 'rb') as handle:
self.x_test = pickle.load(handle)
with open(os.path.join(self.config["data_dir"], "y_train.pickle"), 'rb') as handle:
self.y_train = pickle.load(handle)
with open(os.path.join(self.config["data_dir"], "embedding_matrix.pickle"), 'rb') as handle:
self.embedding_matrix = pickle.load(handle)
return
print("Loading Train df")
self.train_df = pd.read_csv(os.path.join(self.config["data_dir"], "train.csv"))
print("Loading Test df")
self.test_df = pd.read_csv(os.path.join(self.config["data_dir"], "test.csv"))
self.preprocess("train")
self.preprocess("test")
self.word_index = None
# convert question_text to question_ids_list
self.word2indices()
print("Loading Embedding - {}".format(embedding))
self.embedding_index = load_embedding(self.embedding_type, word_index=self.word_index, voc_len = self.voc_len)
if self.embedding_type != "mix":
self.embedding_matrix = self.make_embed_matrix(self.embedding_index, self.word_index, self.voc_len)
else:
self.embedding_matrix = self.embedding_index
del self.word_index
del self.embedding_index
send_msg("Load Done")
gc.collect()
with open(os.path.join(self.config["data_dir"], "x_train.pickle"), 'wb') as handle:
pickle.dump(self.x_train, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.config["data_dir"], "x_test.pickle"), 'wb') as handle:
pickle.dump(self.x_test, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.config["data_dir"], "y_train.pickle"), 'wb') as handle:
pickle.dump(self.y_train, handle, protocol=pickle.HIGHEST_PROTOCOL)
with open(os.path.join(self.config["data_dir"], "embedding_matrix.pickle"), 'wb') as handle:
pickle.dump(self.embedding_matrix, handle, protocol=pickle.HIGHEST_PROTOCOL)
def make_embed_matrix(self, embeddings_index, word_index, len_voc):
all_embs = np.stack(embeddings_index.values())
emb_mean, emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
word_index = word_index
embedding_matrix = np.random.normal(emb_mean, emb_std, (len_voc, embed_size))
for word, i in tqdm(word_index.items()):
if i >= len_voc:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
return embedding_matrix
def word2indices(self):
t = Tokenizer(num_words=self.voc_len, filters='')
x_train = self.train_df["treated_question"].fillna("_na_").values
x_test = self.test_df["treated_question"].fillna("_na_").values
t.fit_on_texts(list(x_train))
self.word_index = t.word_index
# Tokenize the sentences
x_train = t.texts_to_sequences(x_train)
x_test = t.texts_to_sequences(x_test)
# Pad the sentences
x_train = pad_sequences(x_train, maxlen=self.max_ques_len)
x_test = pad_sequences(x_test, maxlen=self.max_ques_len)
# Get the target values
y_train = self.train_df['target'].values
self.x_train = x_train
self.x_test = x_test
self.y_train = y_train
def preprocess(self, data_set, filters=["punct", "contraction", "special characters", "misspell"]):
"""
:param filters:
:param data_set:
:return:
"""
if data_set == "train":
df = self.train_df
else:
df = self.test_df
print("Pre-processing {}".format(data_set))
df["treated_question"] = df["question_text"]
if "numbers" in filters:
print("Clean number ing ... ")
df["treated_question"] = df["treated_question"].apply(lambda x: deal_with_numbers(x))
if "punct" in filters:
print("Clean punct ing ... ")
df['treated_question'] = df['treated_question'].apply(lambda x: deal_with_punct(x))
if "lower" in filters:
print("Lowering ... ")
df['treated_question'] = df['treated_question'].apply(lambda x: x.lower())
if "special characters" in filters:
print("Clean special chars ing ... ")
df['treated_question'] = df['treated_question'].apply(lambda x: deal_with_special_characters(x))
if "misspell" in filters:
print("Clean misspell ing ...")
df['treated_question'] = df['treated_question'].apply(lambda x: deal_with_misspell(x))
| 2.03125
| 2
|
desec_dns_api.py
|
gerhard-tinned/desec-dns-api-cli
| 3
|
12783741
|
<filename>desec_dns_api.py
#!/usr/bin/python
"""
Author <NAME>
Version: 0.2.0
"""
from __future__ import print_function
import json
import requests
class deSEC_DNS_API(object):
"""
Class to handle the deSEC DNS APIT requests
Requires: api_url, api_token
"""
def __init__(self, api_url, api_token, debug=False):
"""
Initially set the base url and the auth header
Keyword arguments:
api_url -- The API url used to cennect to
api_token -- The API token used to authentiocate on the API
debug -- Enable / Disable debug output (default False)
"""
super(deSEC_DNS_API, self).__init__()
self.url_base = api_url
self.header = {'Authorization': 'Token ' + api_token}
self.debug = debug
self.http_body = None
self.http_code = None
self.http_errmsg = None
self.single_result = False
def http_request(self, url, header, method='GET', data=None):
"""
Function performing http requests
Keyword arguments:
url -- The api url to send the request to
header -- Headers to send with the HTTP request
method -- The HTTP method used for the request (default 'GET')
data -- The request data to be sent with the request (default None)
"""
self.http_errmsg = ''
if self.debug:
print("*** DEBUG: http-request : http-url : " + url)
print("*** DEBUG: http-request : http-method : " + method)
print("*** DEBUG: http-request : http-header : " + str(header))
print("*** DEBUG: http-request : http-data : " + str(data))
if data is None:
req_data = None
else:
# encode data if passed to the function
req_data = data.encode('utf-8')
# Set the request type (GET, POST, PATCH, DELETE)
try:
ret = requests.request(method=method, url=url, data=data, headers=header)
except requests.exceptions.RequestException as err:
self.http_code = err.code
self.http_errmsg = err.msg
self.http_body = err.read()
if self.debug:
print("*** DEBUG: http-response: http-code : " + str(self.http_code))
print("*** DEBUG: http-response: http-error : '" + str(err.code) + ": " + err.msg + "'")
print("*** DEBUG: http-response: http-body :\n" + self.http_body + "\n")
return False
self.http_body = ret.text
self.http_code = ret.status_code
if self.debug:
print("*** DEBUG: http-request : url : " + ret.geturl())
print("*** DEBUG: http-response: http-code : " + str(self.http_code))
print("*** DEBUG: http-response: http-header :\n" + str(ret.info()))
print("*** DEBUG: http-response: http-body :\n" + self.http_body + "\n")
return True
def get_response_dict(self):
"""
Function to get json response parsed
Return: array of dicts
"""
# decode http_body from json to dict
ret_dict = json.loads(self.http_body)
# if single result is expected, create an array to remain structure
if self.single_result:
ret_dict = [ret_dict]
if self.debug:
print("*** DEBUG: json2dict : ret_dict : " + str(ret_dict))
return ret_dict
def domain_list(self, zone=None):
"""
Function to request the domain list
Return: boolean (based on http_code)
Keyword arguments:
zone -- The domain name that should be filtered for
"""
# check for zone to filter result
url_addition = ''
self.single_result = False
if zone:
url_addition = zone + "/"
self.single_result = True
# compile request url
req_url = self.url_base + url_addition
# request the list from the api
self.http_request(url=req_url, header=self.header, data=None, method='GET')
# return code indicates success
if self.http_code < 300:
return True
else:
return False
def domain_create(self, zone):
"""
Function to create a new domain
Return: boolean (based on http_code)
Keyword arguments:
zone -- The domain name that should be created
"""
self.single_result = True
# compose POST data
post_data = dict()
post_data['name'] = zone
data = json.dumps(post_data)
# Extend headers with Content-Type
headers = self.header
headers['Content-Type'] = "application/json"
# compile request url
req_url = self.url_base
# request the list from the api
self.http_request(url=req_url, header=headers, data=data, method='POST')
# return code indicates success
if self.http_code < 300:
return True
else:
return False
def domain_delete(self, zone):
"""
Function to delete a domain
Return: boolean (based on http_code)
Keyword arguments:
zone -- The domain name that should be deleted
"""
# set zone to specify domain
url_addition = ''
url_addition = zone + "/"
# compile request url
req_url = self.url_base + url_addition
# request the list from the api
self.http_request(url=req_url, header=self.header, method='DELETE')
# return code indicates success
if self.http_code < 300:
return True
else:
return False
def rrset_list(self, zone, type=None, subname=None):
"""
Function to request the rrset list
Return: boolean (based on http_code)
Keyword arguments:
zone -- The domain that should be used
type -- The type of rrsets that should be shown (default None)
subname -- The subname of rrset that should be shown (default None)
"""
# check for filter arguments
url_addition = ''
self.single_result = False
if type:
url_addition = "?type=" + type
if subname:
url_addition = "?subname=" + subname
if type and subname:
url_addition = subname + ".../" + type + "/"
self.single_result = True
# compile request url
req_url = self.url_base + zone + "/rrsets/" + url_addition
# request the list from the api
self.http_request(url=req_url, header=self.header, data=None, method='GET')
# return code indicates success
if self.http_code < 300:
return True
else:
return False
def rrset_create(self, zone, type, subname, records, ttl):
"""
Function to create a new rrset
Return: boolean (based on http_code)
Keyword arguments:
zone -- The domain that should be used
type -- The type of rrsets that should be created
subname -- The subname of rrset that should be created
records -- The records that should be set for this rrset
ttl -- The ttl that should be set for this rrset
"""
self.single_result = True
# compose POST data
post_data = dict()
post_data['subname'] = subname
post_data['type'] = type
post_data['ttl'] = ttl
post_data['records'] = records.split(",")
data = json.dumps(post_data)
if self.debug:
print("*** DEBUG: data=" + data)
# Extend headers with Content-Type
headers = self.header
headers['Content-Type'] = "application/json"
# compile request url
req_url = self.url_base + zone + "/rrsets/"
# request the list from the api
self.http_request(url=req_url, header=headers, data=data, method='POST')
# return code indicates success
if self.http_code < 300:
return True
else:
return False
def rrset_delete(self, zone, type, subname):
"""
Function to delete a new rrset
Return: boolean (based on http_code)
Keyword arguments:
zone -- The domain that should be used
type -- The type of rrsets that should be deleted
subname -- The subname of rrset that should be deleted
"""
self.single_result = True
# compile request url
req_url = self.url_base + zone + "/rrsets/" + subname + ".../" + type + "/"
# request the list from the api
self.http_request(url=req_url, header=self.header, data=None, method='DELETE')
# return code indicates success
if self.http_code < 300:
return True
else:
return False
def rrset_modify(self, zone, type, subname, records=None, ttl=None):
"""
Function to modify a new rrset
Return: boolean (based on http_code)
Keyword arguments:
zone -- The domain that should be used
type -- The type of rrsets that should be modified
subname -- The subname of rrset that should be modified
records -- The records that should be set for this rrset (default None)
ttl -- The ttl that should be set for this rrset (default None)
"""
self.single_result = True
# compose POST data
post_data = dict()
if ttl:
post_data['ttl'] = ttl
if records:
post_data['records'] = records.split(",")
data = json.dumps(post_data)
# Extend headers with Content-Type
headers = self.header
headers['Content-Type'] = "application/json"
# compile request url
req_url = self.url_base + zone + "/rrsets/" + subname + ".../" + type + "/"
# request the list from the api
self.http_request(url=req_url, header=headers, data=data, method='PATCH')
# return code indicates success
if self.http_code < 300:
return True
else:
return False
| 3.296875
| 3
|
tests/master/data/database/dal/test_ioc.py
|
m2bright/rapid
| 4
|
12783742
|
"""
Copyright (c) 2015 <NAME> and Bamboo HR LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from unittest.case import TestCase
from nose.tools.trivial import eq_
from rapid.lib.framework.ioc import IOC
from rapid.lib.framework.injectable import Injectable
class TrialClass(object):
def has_trial(self):
return False
class OtherClass(object):
def has_something(self):
return True
class TestClass(Injectable):
__injectables__ = {"trial": TrialClass, "other": OtherClass}
def __init__(self, trial, other, oneMore):
self.trial = trial
self.other = other
self.oneMore = oneMore
class TestIOC(TestCase):
def test_multi_dependency(self):
testclass = IOC.get_class_instance(TestClass, "Something")
eq_(False, testclass.trial.has_trial())
| 1.84375
| 2
|
reactive/charm_staticip.py
|
VariableDeclared/charm-staticip
| 0
|
12783743
|
<gh_stars>0
from charms.reactive import when, when_not, set_flag
import os
import sys
sys.path.insert(0, os.path.join(os.environ['CHARM_DIR'], 'lib'))
from charmhelpers.core import (
hookenv,
host
)
@when_not('charm-staticip.installed')
def install_charm_staticip():
# Do your setup here.
#
# If your charm has other dependencies before it can install,
# add those as @when() clauses above., or as additional @when()
# decorated handlers below
#
# See the following for information about reactive charms:
#
# * https://jujucharms.com/docs/devel/developer-getting-started
# * https://github.com/juju-solutions/layer-basic#overview
#
set_flag('charm-staticip.installed')
@when('config-changed')
def configure_staticip():
network_config = hookenv.network_get(
hookenv.config('binding')
)
nics = {}
for nic in network_config.get('bind-addresses'):
nics.update({
[nic.get('interfacename')]: {
'addresses': [
nic.get('addresses')[0]
],
'gatewayv4': None, # TODO: Find a reliable way of getting
# the gateway IP.
'dhcp': 'no',
}
})
netplan_config = {
'network': {
'version': 2,
'renderer': "networkd",
'ethernets': {
}
}
}
with open('/etc/netplan/00-set-staticip.yaml', 'w') as npfile:
npfile.wirte(netplan_config)
# TODO: Run `netplan apply`
| 1.90625
| 2
|
model/openacademy_course.py
|
davepadrino/openacademy-project
| 0
|
12783744
|
<reponame>davepadrino/openacademy-project
# -*- coding: utf-8 -*-
from openerp import models, fields, api
'''
Creacion del modelo del curso
'''
class Course(models.Model):
'''
Crea el modelo del curso
Aparecerá un campo search y unas opciones de filtrado y busqueda avanzada que son heredadas de "models" como parametro (models.Model)
'''
_name = 'openacademy.course' #nombre del modelo odoo, se usa '.', en BD se cambia por '_'
name = fields.Char(string='Title', required=True) #campo field reservado pra identificar el nombre del registro, el string es lo que se vera en el formulario
description = fields.Text(string='Description') #asume required=False, el string es lo que se vera en el formulario
responsible_id = fields.Many2one('res.users', ondelete='set null', string="Responsible", index=True) #un curso tiene un responsable
session_ids = fields.One2many('openacademy.session', 'course_id')
'''
copy es un metodo interno de Odoo, pero al llamar a la funcion super (en el return), permitiendo llamar a la funcion copy original
pero indicandole que no se altere su funcionamiento nativo.
El default recibe el nombre de los campos ue se vana duplicar
copied_count invoca a search_count, devuelve un entero con el numero de coincidencias que tenga, reibe un domain
dict() crea un diccionario desde el campo que reciba
'''
@api.multi
def copy(self, default=None):
default = dict(default or {})
copied_count = self.search_count(
[('name', '=like', u"Copia de {}%".format(self.name))])
if not copied_count:
new_name = u"Copia de {}".format(self.name)
else:
new_name = u"Copia de {} ({})".format(self.name, copied_count)
default['name'] = new_name
return super(Course, self).copy(default)
'''
Permite hacer _sql_constraints a nivel de base de datos
'''
_sql_constraints = [
('name_description_check',
'CHECK(name != description)',
"El titulo del curso no puede ser la descripcion"),
('name_unique',
'UNIQUE(name)',
"El titulo del curso debe ser unico"),
]
| 2.296875
| 2
|
snow-dots/utilities/time.py
|
cpizzica/Lab-Matlab-Control
| 6
|
12783745
|
<gh_stars>1-10
#! python3
import sys, time
print( time.time() )
| 1.9375
| 2
|
doltcli/__init__.py
|
sidphbot/doltcli
| 6
|
12783746
|
from .dolt import (
Branch,
Commit,
Dolt,
DoltException,
DoltHubContext,
KeyPair,
Remote,
Status,
Table,
_execute,
)
from .types import BranchT, CommitT, DoltT, KeyPairT, RemoteT, StatusT, TableT
from .utils import (
CREATE,
FORCE_CREATE,
REPLACE,
UPDATE,
columns_to_rows,
detach_head,
read_columns,
read_columns_sql,
read_rows,
read_rows_sql,
set_dolt_path,
write_columns,
write_file,
write_rows,
)
| 1.226563
| 1
|
languages/python/sqlalchemy-oso/tests/test_partial.py
|
connec/oso
| 2,167
|
12783747
|
<reponame>connec/oso
from polar import Variable
from sqlalchemy.orm import Session
from sqlalchemy_oso.partial import partial_to_filter
from .models import User
def test_partial_to_query_filter(oso, engine):
oso.load_str('ok(_: User{username:"gwen"});')
session = Session(bind=engine)
gwen = User(username="gwen")
session.add(gwen)
steve = User(username="steve")
session.add(steve)
result = oso.query_rule("ok", Variable("actor"), accept_expression=True)
partial = next(result)["bindings"]["actor"]
filter = partial_to_filter(partial, session, User, oso.get_class)
q = list(session.query(User).filter(filter))
assert q == [gwen]
| 2.578125
| 3
|
manimlib/constants.py
|
parmentelat/manim
| 1
|
12783748
|
import numpy as np
# Sizes relevant to default camera frame
ASPECT_RATIO = 16.0 / 9.0
FRAME_HEIGHT = 8.0
FRAME_WIDTH = FRAME_HEIGHT * ASPECT_RATIO
FRAME_Y_RADIUS = FRAME_HEIGHT / 2
FRAME_X_RADIUS = FRAME_WIDTH / 2
DEFAULT_PIXEL_HEIGHT = 1080
DEFAULT_PIXEL_WIDTH = 1920
DEFAULT_FRAME_RATE = 30
SMALL_BUFF = 0.1
MED_SMALL_BUFF = 0.25
MED_LARGE_BUFF = 0.5
LARGE_BUFF = 1
DEFAULT_MOBJECT_TO_EDGE_BUFFER = MED_LARGE_BUFF
DEFAULT_MOBJECT_TO_MOBJECT_BUFFER = MED_SMALL_BUFF
# All in seconds
DEFAULT_POINTWISE_FUNCTION_RUN_TIME = 3.0
DEFAULT_WAIT_TIME = 1.0
ORIGIN = np.array((0., 0., 0.))
UP = np.array((0., 1., 0.))
DOWN = np.array((0., -1., 0.))
RIGHT = np.array((1., 0., 0.))
LEFT = np.array((-1., 0., 0.))
IN = np.array((0., 0., -1.))
OUT = np.array((0., 0., 1.))
X_AXIS = np.array((1., 0., 0.))
Y_AXIS = np.array((0., 1., 0.))
Z_AXIS = np.array((0., 0., 1.))
# Useful abbreviations for diagonals
UL = UP + LEFT
UR = UP + RIGHT
DL = DOWN + LEFT
DR = DOWN + RIGHT
TOP = FRAME_Y_RADIUS * UP
BOTTOM = FRAME_Y_RADIUS * DOWN
LEFT_SIDE = FRAME_X_RADIUS * LEFT
RIGHT_SIDE = FRAME_X_RADIUS * RIGHT
PI = np.pi
TAU = 2 * PI
DEGREES = TAU / 360
FFMPEG_BIN = "ffmpeg"
JOINT_TYPE_MAP = {
"auto": 0,
"round": 1,
"bevel": 2,
"miter": 3,
}
# Related to Text
START_X = 30
START_Y = 20
NORMAL = "NORMAL"
ITALIC = "ITALIC"
OBLIQUE = "OBLIQUE"
BOLD = "BOLD"
DEFAULT_STROKE_WIDTH = 4
# Colors
COLOR_MAP = {
"BLUE_E": "#1C758A",
"BLUE_D": "#29ABCA",
"BLUE_C": "#58C4DD",
"BLUE_B": "#9CDCEB",
"BLUE_A": "#C7E9F1",
"TEAL_E": "#49A88F",
"TEAL_D": "#55C1A7",
"TEAL_C": "#5CD0B3",
"TEAL_B": "#76DDC0",
"TEAL_A": "#ACEAD7",
"GREEN_E": "#699C52",
"GREEN_D": "#77B05D",
"GREEN_C": "#83C167",
"GREEN_B": "#A6CF8C",
"GREEN_A": "#C9E2AE",
"YELLOW_E": "#E8C11C",
"YELLOW_D": "#F4D345",
"YELLOW_C": "#FFFF00",
"YELLOW_B": "#FFEA94",
"YELLOW_A": "#FFF1B6",
"GOLD_E": "#C78D46",
"GOLD_D": "#E1A158",
"GOLD_C": "#F0AC5F",
"GOLD_B": "#F9B775",
"GOLD_A": "#F7C797",
"RED_E": "#CF5044",
"RED_D": "#E65A4C",
"RED_C": "#FC6255",
"RED_B": "#FF8080",
"RED_A": "#F7A1A3",
"MAROON_E": "#94424F",
"MAROON_D": "#A24D61",
"MAROON_C": "#C55F73",
"MAROON_B": "#EC92AB",
"MAROON_A": "#ECABC1",
"PURPLE_E": "#644172",
"PURPLE_D": "#715582",
"PURPLE_C": "#9A72AC",
"PURPLE_B": "#B189C6",
"PURPLE_A": "#CAA3E8",
"GREY_E": "#222222",
"GREY_D": "#444444",
"GREY_C": "#888888",
"GREY_B": "#BBBBBB",
"GREY_A": "#DDDDDD",
"WHITE": "#FFFFFF",
"BLACK": "#000000",
"GREY_BROWN": "#736357",
"DARK_BROWN": "#8B4513",
"LIGHT_BROWN": "#CD853F",
"PINK": "#D147BD",
"LIGHT_PINK": "#DC75CD",
"GREEN_SCREEN": "#00FF00",
"ORANGE": "#FF862F",
}
PALETTE = list(COLOR_MAP.values())
locals().update(COLOR_MAP)
for name in [s for s in list(COLOR_MAP.keys()) if s.endswith("_C")]:
locals()[name.replace("_C", "")] = locals()[name]
| 2.25
| 2
|
Lec3/flaskappIsitChristmas/application.py
|
tim-ballard/CS50W
| 0
|
12783749
|
from flask import Flask, render_template
from datetime import date
app = Flask(__name__)
@app.route("/")
def check():
today = date.today()
newyearcheck = today.month == 1 and today.day == 1
return render_template("index.html", newyearcheck=newyearcheck)
| 2.546875
| 3
|
api/src/abim_search_api.py
|
wellcomecollection/amplify
| 3
|
12783750
|
<reponame>wellcomecollection/amplify
import requests
import re
from re import finditer
def format_search_string(text):
'''Format search string for search_record method's SEARCH expression.
:param text: string
:return: string (formatted)
'''
print(text)
result = ''
if text != None:
if text == []:
result = ''
elif type(text) is list:
for piece in text:
piece = piece.lower().replace(' ', '+')
result += piece + str(' ')
else:
result = text.lower().replace(' ', '+')
print(result)
return result
def search_record(author=None, title=None, publisher=None, date=None):
'''Search record on Library Hub.
:param author: string
:param title: string
:param publisher: string
:param date: string
:return: JSON
'''
BASE_URL = 'http://indianmedicine.eldoc.ub.rug.nl/cgi/search/archive/advanced?'
SEARCH = 'screen=Search&' \
'dataset=archive&' \
'_action_search=Search&' \
'title_merge=ALL&' \
'title='+format_search_string(title)+'&' \
'titleorder_merge=ALL&' \
'titleorder=&' \
'authors_merge=ALL&' \
'authors='+format_search_string(author)+'&' \
'authorsorder_merge=ALL&' \
'authorsorder=&' \
'date='+format_search_string(date)+'&' \
'description_merge=ALL&' \
'description=&' \
'annote_merge=ALL&' \
'annote=&' \
'note_location_merge=ALL&' \
'note_location=&' \
'note_description_merge=ALL&' \
'note_description=&' \
'note_checked_merge=ALL&' \
'note_checked=&' \
'publisher_merge=ALL&' \
'publisher='+format_search_string(publisher)+'&' \
'identifier_referenceID_merge=ALL&' \
'identifier_referenceID=&' \
'satisfyall=ALL&' \
'order=-date%2Fauthorsorder%2Ftitleorder'
print(SEARCH)
response = requests.get(BASE_URL + SEARCH)
return response.text
def parse_response(response):
results = []
start_positions = []
end_positions = []
for match in finditer("""<tr class="ep_search_result">
<td style="padding-left: 0.5em">""", response):
# print(match.span(), match.group())
start_positions.append(match.span()[1])
for match in finditer("""Publication]""", response):
# print(match.span(), match.group())
end_positions.append(match.span()[0])
for i in range(len(start_positions)):
document = str(response[start_positions[i]:end_positions[i]-1])
document = document.replace("""</td>\n <td style="padding-left: 0.5em">\n \n\n\n """, '')
document = document.replace("""</em></a>\n\n\n""", '')
results.append({'document': document})
# results = [{'document': '1. <NAME> and \nB. <NAME>\n \n\n(2011)\n\n<a href="http://indianmedicine.eldoc.ub.rug.nl/62735/"><em>Medicinal plant wealth of Palakonda hill ranges, Kadapa district, Andhra Pradesh, India. '},
# {'document': '2. Chakre, <NAME>.\n \n\n(2010)\n\n<a href="http://indianmedicine.eldoc.ub.rug.nl/55569/"><em>The Wealth of India -- a CSIR\'s encyclopaedia of information resource on economic plants, animals and minerals. '}]
splitted_results = []
for i in results:
splitted = re.split("""\n\n<a href="|\n\n|"><em>""", i['document'])
splitted_results.append({'author': splitted[0], 'date': splitted[1], 'link': splitted[2], 'title': splitted[3]})
print(splitted_results)
return splitted_results
def pull_details(link):
'''Pull MARC details from ABIM links.
:return: list
'''
BASE_URL = link
response = requests.get(BASE_URL).text
results = []
start_positions = []
end_positions = []
for match in finditer("""<tr>
<th align="right">""", response):
# print(match.span(), match.group())
start_positions.append(match.span()[1])
for match in finditer("""</td>
</tr>""", response):
# print(match.span(), match.group())
end_positions.append(match.span()[0] + 1)
for i in range(len(start_positions)):
document = str(response[start_positions[i]:end_positions[i + 1] - 1])
document = document.replace("""</th>\n <td valign="top">""", '')
# document = document.replace("""</em></a>\n\n\n""", '')
results.append({'document': document})
# results = [{'document': 'Author(s)Wealth of India'}, {'document': 'Author(s) order (without accents)Wealth of India'}, {'document': 'Title order (without accents)The wealth of India -- A dictionary of Indian raw materials and industrial products -- Second Supplement Series (Raw Materials) volume 1: A--F volume 2: G--Ph'}, {'document': 'Type of publicationBook (monograph)'}, {'document': 'LanguageEnglish'}, {'document': 'Description of the publicationvol. 1: XVIII, 382, 40 (indexes) p.; vol.2: XVIII, 282, VII (Appendix), 41 (indexes) p.'}, {'document': 'Locationpresent'}, {'document': 'Checkedyes'}, {'document': 'PublisherNational Institute of Science Communication and Information Resources -- Council of Scientific and Industrial Research, New Delhi'}]
splitted_results = []
for i in results:
splitted = re.split(""":""", i['document'])
splitted_results.append({'field': splitted[0], 'detail': splitted[1]})
return splitted_results
| 2.9375
| 3
|