blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bbae3698bee755a86e113f6ff4e7d52fe4f8a1ca
|
7b12eb45c1ea76ad9c186b858b5dfebf2c5b862a
|
/.history/DEBER_20210905000023.py
|
9516b0bda58c56e4e39bbf9f8a08dc9dc32c935e
|
[
"MIT"
] |
permissive
|
Alopezm5/PROYECTO-PARTE-1
|
a1dce04009b24852c1c60e69bdf602ad3af0574b
|
bd7a8594edf08d41c6ca544cf6bac01ea4fcb684
|
refs/heads/main
| 2023-07-25T11:22:17.994770
| 2021-09-07T03:27:34
| 2021-09-07T03:27:34
| 403,670,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,447
|
py
|
import os
class Empresa():
def __init__(self,nom="",ruc=0,dire="",tele=0,ciud="",tipEmpr=""):
self.nombre=nom
self.ruc=ruc
self.direccion=dire
self.telefono=tele
self.ciudad=ciud
self.tipoEmpresa=tipEmpr
def datosEmpresa(self):#3
self.nombre=input("Ingresar nombre de la empresa: ")
self.ruc=int(input("Ingresar ruc de la empresa: "))
self.direccion=input("Ingresar la direccion de la empresa: ")
self.telefono=int(input("Ingresar el numero de telefono de la empresa: "))
self.ciudad=input("Ingresar ciudad donde esta la empresa: ")
self.tipoEmpresa=input("Ingresar tipo de empresa publica o privada: ")
def mostrarEmpresa(self):
print("")
print("Empresa")
print("La empresa de nombre {}\n De RUC #{} \n Está ubicada en {}\n Se puede comunicar al #{}\n Está empresa esta en la ciudad de {}\n Es una entidad {}".format(self.nombre,self.ruc,self.direccion, self.telefono,self.ciudad, self.tipoEmpresa))
class Empleado(Empresa):
def __init__(self,nom="",cedu=0,dire="",tele=0,email="",estado="",profe=""):
self.nombre=nom
self.cedula=cedu
self.direccion=dire
self.telefono=tele
self.correo=email
self.estadocivil=estado
self.profesion=profe
def empleado(self):
self.nombre=input("Ingresar nombre del empleado: ")
self.cedula=int(input("Ingresar numero de cedula del empleado: "))
self.direccion=input("Ingresar la direccion del empleado: ")
self.telefono=int(input("Ingresar numero de contacto del empleado: "))
self.correo=input("Ingresar correo personal del empleado: ")
def empleadoObrero(self):
self.estadocivil=input("Ingresar estado civil del empleado: ")
def empleadoOficina(self):
self.profesion=input("Ingresar profesion del empleado: ")
def mostrarempleado(self):
print("El empleado: {} con # de C.I. {} \n Con direccion {}, y numero de contacto{}\n Y correo {}".format(self.nombre,self.cedula,self.direccion,self.telefono,self.correo))
class Departamento(Empleado):
def __init__(self,dep=""):
self.departamento=dep
def departa(self):
self.departamento=input("Ingresar el departamento al que pertenece el empleado: ")
def mostrarDeparta(self):
print("El empleado pertenece al departamento de: {}".format(self.departamento))
class Pagos(Empleado):
def __init__(self, desper=0,valhora=0,hotraba=0,extra=0,suel=0,hrecar=0,hextra=0,pres=0,mcou=0,valho=0,sobtiem=0,comofi=0,antobre=0,iemple=0,cuopres=0,tot=0,liquid=0,cuota=0,anti=0,comi=0,fNomina="",fIngreso="",iess=0):
self.permisos=desper
self.valorhora=valhora
self.horastrabajadas=hotraba
self.valextra=extra
self.sueldo= suel
self.horasRecargo= hrecar
self.horasExtraordinarias=hextra
self.prestamo= pres
self.mesCuota= mcou
self.valor_hora= valho
self.sobretiempo=sobtiem
self.comEmpOficina = comofi
self.antiEmpObrero = antobre
self.iessEmpleado = iemple
self.cuotaPrestamo=cuopres
self.totdes = tot
self.liquidoRecibir = liquid
self.mesCuota=cuota
self.antiguedad=anti
self.comision=comi
self.fechaNomina=fNomina
self.fechaIngreso=fIngreso
self.iess=iess
def pagoNormal(self):
self.sueldo=float(input("Ingresar sueldo del trabajador: $ "))
self.prestamo=float(input("Ingresar monto del prestamo que ha generado el empleado: $ "))
self.mesCuota=int(input("Ingresar meses a diferir el prestamo: "))
self.comision=float(input("Ingresar valor de la comsion: "))
self.antiguedad=int(input("Ingresar antiguedad: "))
self.iess=float(input("Ingresar valor del iees recordar que debe ser porcentuado Ejemplo si quiere decir 20% debe ingresar 0.20"))
def pagoExtra(self):
self.horasRecargo=int(input("Ingresar horas de recargo: "))
self.horasExtraordinarias=int(input("Ingresar horas extraordinarias: "))
self.fechaNomina=float(input("Ingresar fecha de nomida (formato año-mes-dia): "))
self.fechaIngreso=float(input("Ingresar fecha de ingreso (formato año-mes-dia): "))
def calculoSueldo(self):
self.valor_hora=self.sueldo/240
self.sobretiempo= self.valor_hora * (self.horasRecargo*0.50+self.horasExtraordinarias*2)
self.comEmpOficina = self.comision*self.sueldo
self.antiEmpObrero = self.antiguedad*(self.fechaNomina - self.fechaIngreso)/365*self.sueldo
self.iessEmpleado = self.iess*(self.sueldo+self.sobretiempo)
self.cuotaPrestamo=self.prestamo/self.mesCuota
self.toting = self.sueldo+self.sobretiempo+ self.comEmpOficina + self.antiEmpObrero
self.totdes = self.iessEmpleado + self.prestamo
self.liquidoRecibir = self.toting - self.totdes
def mostrarSueldo(self):
print("El empleado tiene un sueldo de ${}")
emp=Empresa()
emp.datosEmpresa()
os.system ("cls")
emple=Empleado()
emple.empleado()
os.system ("cls")
emple.empleadoObrero()
emple.empleadoOficina()
os.system ("cls")
depa=Departamento()
depa.departa()
pag=Pagos()
pag.pagoNormal()
pag.pagoExtra()
pag.calculoSueldo()
os.system ("cls")
emp.mostrarEmpresa()
print("")
emple.mostrarempleado()
print("")
pag.mostrarSueldo()
|
[
"85761855+Alopezm5@users.noreply.github.com"
] |
85761855+Alopezm5@users.noreply.github.com
|
c2154d3a5fe4c8670860e1c2b5ea7301a892ea20
|
780b6cca690a213ac908b1cd5faef5366a18dc4e
|
/314_print_names_to_columns/save1_nopass.py
|
8cb6c53bb39aa700c4f9bc48b51e4735762b74ba
|
[] |
no_license
|
katkaypettitt/pybites-all
|
899180a588e460b343c00529c6a742527e4ea1bc
|
391c07ecac0d92d5dc7c537bcf92eb6c1fdda896
|
refs/heads/main
| 2023-08-22T16:33:11.171732
| 2021-10-24T17:29:44
| 2021-10-24T17:29:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 330
|
py
|
from typing import List # not needed when we upgrade to 3.9
def print_names_to_columns(names: List[str], cols: int = 2) -> None:
name_list = [f'| {name:{9}}' for name in names]
output = ''
for i in range(0, len(name_list), cols):
output += ' '.join(name_list[i: i + cols]) + '\n'
print(output)
|
[
"70788275+katrinaalaimo@users.noreply.github.com"
] |
70788275+katrinaalaimo@users.noreply.github.com
|
010885dad083a7b1ec9ebb80c5c3d64b92989605
|
37930870719caede967fdf6905c032e22d086e8b
|
/scripts/imaging/chaining/slam/light_parametric__mass_light_dark__source_parametric.py
|
80e4df39df68667dc5cd365fcf51cfac21c6f9f0
|
[] |
no_license
|
Cywtim/autolens_workspace
|
cbede944c0f85ee95cd7362fee957ef77e701280
|
da40cafee8dc26e5d8b1041888fb280598e74a5e
|
refs/heads/master
| 2023-04-05T14:22:06.091992
| 2021-04-15T20:29:28
| 2021-04-15T20:29:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,711
|
py
|
"""
SLaM (Source, Light and Mass): Light Parametric + Mass Total + Source Parametric
================================================================================
SLaM pipelines break the analysis down into multiple pipelines which focus on modeling a specific aspect of the strong
lens, first the Source, then the (lens) Light and finally the Mass. Each of these pipelines has it own inputs which
which customize the model and analysis in that pipeline.
The models fitted in earlier pipelines determine the model used in later pipelines. For example, if the SOURCE PIPELINE
uses a parametric `EllSersic` profile for the bulge, this will be used in the subsequent MASS LIGHT DARK PIPELINE.
Using a SOURCE PARAMETRIC PIPELINE, LIGHT PIPELINE and a MASS LIGHT DARK PIPELINE this SLaM script fits `Imaging` of
a strong lens system, where in the final model:
- The lens galaxy's light is a bulge `EllSersic`.
- The lens galaxy's stellar mass distribution is a bulge tied to the light model above.
- The lens galaxy's dark matter mass distribution is modeled as a `EllNFWMCRLudlow`.
- The source galaxy's light is a parametric `EllSersic`.
This runner uses the SLaM pipelines:
`source_parametric/source_parametric__with_lens_light`
`light_parametric/with_lens_light`
`mass_total/mass_light_dark`
Check them out for a detailed description of the analysis!
"""
# %matplotlib inline
# from pyprojroot import here
# workspace_path = str(here())
# %cd $workspace_path
# print(f"Working Directory has been set to `{workspace_path}`")
import os
import sys
from os import path
import autofit as af
import autolens as al
import autolens.plot as aplt
sys.path.insert(0, os.getcwd())
import slam
"""
__Dataset__
Load the `Imaging` data, define the `Mask2D` and plot them.
"""
dataset_name = "light_sersic__mass_mlr_nfw__source_sersic"
dataset_path = path.join("dataset", "imaging", "with_lens_light", dataset_name)
imaging = al.Imaging.from_fits(
image_path=path.join(dataset_path, "image.fits"),
noise_map_path=path.join(dataset_path, "noise_map.fits"),
psf_path=path.join(dataset_path, "psf.fits"),
pixel_scales=0.1,
)
mask = al.Mask2D.circular(
shape_native=imaging.shape_native, pixel_scales=imaging.pixel_scales, radius=3.0
)
imaging = imaging.apply_mask(mask=mask)
imaging_plotter = aplt.ImagingPlotter(imaging=imaging)
imaging_plotter.subplot_imaging()
"""
__Paths__
The path the results of all chained searches are output:
"""
path_prefix = path.join("imaging", "slam", dataset_name)
"""
__Redshifts__
The redshifts of the lens and source galaxies, which are used to perform unit converions of the model and data (e.g.
from arc-seconds to kiloparsecs, masses to solar masses, etc.).
"""
redshift_lens = 0.5
redshift_source = 1.0
"""
__HYPER SETUP__
The `SetupHyper` determines which hyper-mode features are used during the model-fit.
"""
setup_hyper = al.SetupHyper(
hyper_galaxies_lens=False,
hyper_galaxies_source=False,
hyper_image_sky=None,
hyper_background_noise=None,
)
"""
__SOURCE PARAMETRIC PIPELINE (with lens light)__
The SOURCE PARAMETRIC PIPELINE (with lens light) uses three searches to initialize a robust model for the
source galaxy's light, which in this example:
- Uses a parametric `EllSersic` bulge.
- Uses an `EllIsothermal` model for the lens's total mass distribution with an `ExternalShear`.
__Settings__:
- Mass Centre: Fix the mass profile centre to (0.0, 0.0) (this assumption will be relaxed in the MASS LIGHT DARK
PIPELINE).
"""
analysis = al.AnalysisImaging(dataset=imaging)
bulge = af.Model(al.lp.EllSersic)
bulge.centre = (0.0, 0.0)
source_parametric_results = slam.source_parametric.with_lens_light(
path_prefix=path_prefix,
analysis=analysis,
setup_hyper=setup_hyper,
lens_bulge=bulge,
lens_disk=None,
mass=af.Model(al.mp.EllIsothermal),
shear=af.Model(al.mp.ExternalShear),
source_bulge=af.Model(al.lp.EllSersic),
mass_centre=(0.0, 0.0),
redshift_lens=redshift_lens,
redshift_source=redshift_source,
)
"""
__LIGHT PARAMETRIC PIPELINE__
The LIGHT PARAMETRIC PIPELINE uses one search to fit a complex lens light model to a high level of accuracy, using the
lens mass model and source light model fixed to the maximum log likelihood result of the SOURCE PARAMETRIC PIPELINE.
In this example it:
- Uses a parametric `EllSersic` bulge [Do not use the results of the SOURCE PARAMETRIC PIPELINE to initialize priors].
- Uses an `EllIsothermal` model for the lens's total mass distribution [fixed from SOURCE PARAMETRIC PIPELINE].
- Uses the `EllSersic` model representing a bulge for the source's light [fixed from SOURCE PARAMETRIC PIPELINE].
- Carries the lens redshift, source redshift and `ExternalShear` of the SOURCE PIPELINE through to the MASS
PIPELINE [fixed values].
"""
bulge = af.Model(al.lp.EllSersic)
light_results = slam.light_parametric.with_lens_light(
path_prefix=path_prefix,
analysis=analysis,
setup_hyper=setup_hyper,
source_results=source_parametric_results,
lens_bulge=bulge,
lens_disk=None,
)
"""
__MASS LIGHT DARK PIPELINE (with lens light)__
The MASS LIGHT DARK PIPELINE (with lens light) uses one search to fits a complex lens mass model to a high level of
accuracy, using the source model of the SOURCE PIPELINE and the lens light model of the LIGHT PARAMETRIC PIPELINE to
initialize the model priors . In this example it:
- Uses a parametric `EllSersic` bulge for the lens galaxy's light and its stellar mass [12 parameters: fixed from
LIGHT PARAMETRIC PIPELINE].
- The lens galaxy's dark matter mass distribution is a `EllNFWMCRLudlow` whose centre is aligned with bulge of
the light and stellar mass mdoel above [5 parameters].
- Uses the `EllSersic` model representing a bulge for the source's light [priors initialized from SOURCE
PARAMETRIC PIPELINE].
- Carries the lens redshift, source redshift and `ExternalShear` of the SOURCE PARAMETRIC PIPELINE through to the MASS
LIGHT DARK PIPELINE.
"""
analysis = al.AnalysisImaging(dataset=imaging)
lens_bulge = af.Model(al.lmp.EllSersic)
dark = af.Model(al.mp.EllNFWMCRLudlow)
dark.centre = lens_bulge.centre
mass_results = slam.mass_light_dark.with_lens_light(
path_prefix=path_prefix,
analysis=analysis,
setup_hyper=setup_hyper,
source_results=source_parametric_results,
light_results=light_results,
lens_bulge=lens_bulge,
lens_disk=None,
lens_envelope=None,
dark=dark,
)
"""
Finish.
"""
|
[
"james.w.nightingale@durham.ac.uk"
] |
james.w.nightingale@durham.ac.uk
|
d1564abb5583ba7d937b0d846491cf7aa40a1cb2
|
00ef8e1eb57b73427508b20aadf0266da6b1f900
|
/rlf/exp_mgr/viz_utils.py
|
f323dee2afc60a42bb37336d3b28e50fe18fb7b4
|
[] |
no_license
|
amy12xx/rl-toolkit
|
f4643935cc8afd960356bfeae74c233d2596dea9
|
8254df8346752ea0226ae2064cc1eabc839567b0
|
refs/heads/master
| 2023-08-14T00:56:52.270642
| 2021-09-28T15:59:32
| 2021-09-28T15:59:32
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,503
|
py
|
"""
Utilities for manipulating images, rendering images, and rendering videos.
"""
import os
import os.path as osp
from argparse import Namespace
from typing import List, Optional, Union
import cv2
import matplotlib.pyplot as plt
import numpy as np
import rlf.rl.utils as rutils
try:
import wandb
except:
pass
def append_text_to_image(
image: np.ndarray, lines: List[str], from_bottom: bool = False
) -> np.ndarray:
"""
Args:
image: The NxMx3 frame to add the text to.
lines: The list of strings (new line separated) to add to the image.
Returns:
image: (np.array): The modified image with the text appended.
"""
h, w, c = image.shape
font_size = 0.5
font_thickness = 1
font = cv2.FONT_HERSHEY_SIMPLEX
blank_image = np.zeros(image.shape, dtype=np.uint8)
if from_bottom:
y = image.shape[0]
else:
y = 0
for line in lines:
textsize = cv2.getTextSize(line, font, font_size, font_thickness)[0]
if from_bottom:
y -= textsize[1] + 10
else:
y += textsize[1] + 10
x = 10
cv2.putText(
blank_image,
line,
(x, y),
font,
font_size,
(255, 255, 255),
font_thickness,
lineType=cv2.LINE_AA,
)
final = image + blank_image
return final
def save_agent_obs(frames, imdim, vid_dir, name):
use_dir = osp.join(vid_dir, name + "_frames")
if not osp.exists(use_dir):
os.makedirs(use_dir)
if imdim != 1:
raise ValueError("Only gray scale is supported right now")
for i in range(frames.shape[0]):
for frame_j in range(frames.shape[1]):
fname = f"{i}_{frame_j}.jpg"
frame = frames[i, frame_j].cpu().numpy()
cv2.imwrite(osp.join(use_dir, fname), frame)
print(f"Wrote observation sequence to {use_dir}")
def save_mp4(frames, vid_dir, name, fps=60.0, no_frame_drop=False, should_print=True):
frames = np.array(frames)
if len(frames[0].shape) == 4:
new_frames = frames[0]
for i in range(len(frames) - 1):
new_frames = np.concatenate([new_frames, frames[i + 1]])
frames = new_frames
if not osp.exists(vid_dir):
os.makedirs(vid_dir)
vid_file = osp.join(vid_dir, name + ".mp4")
if osp.exists(vid_file):
os.remove(vid_file)
w, h = frames[0].shape[:-1]
videodims = (h, w)
fourcc = cv2.VideoWriter_fourcc("m", "p", "4", "v")
video = cv2.VideoWriter(vid_file, fourcc, fps, videodims)
for frame in frames:
frame = frame[..., 0:3][..., ::-1]
video.write(frame)
video.release()
if should_print:
print(f"Rendered to {vid_file}")
def plot_traj_data(
pred: np.ndarray,
real: np.ndarray,
save_name: str,
log_name: str,
save_path_info: Union[Namespace, str],
step: int,
y_axis_name: str = "State %i",
no_wb: Optional[bool] = None,
title: str = "",
ylim=None,
):
"""
Plots each state dimension of a trajectory comparing a predicted and real trajectory.
:param pred: Shape [H, D] for a trajectory of length H and state dimension D.
D plots will be created.
:param real: Shape [H, D].
:param save_name: Appended to log_name. This should likely be unique so
files on the disk are not overriden. Include file extension.
:param log_name: Has %i in the name to dynamically insert the state dimension.
Should NOT be unique so the log key is updated.
:param save_path_info: The save path will either be extracted from the args or the
path passed as a string.
:param y_axis_name: string with %i to dynamically insert state dimension.
"""
save_name = log_name + "_" + save_name
if isinstance(save_path_info, str):
save_path = osp.join(save_path_info, save_name)
else:
save_path = osp.join(rutils.get_save_dir(save_path_info), save_name)
if no_wb is None:
if not isinstance(save_path_info, Namespace) and "no_wb" not in vars(
save_path_info
):
raise ValueError(
f"Could not find property `no_wb` in the passed `save_path_info`"
)
no_wb = save_path_info.no_wb
per_state_mse = np.mean((pred - real) ** 2, axis=0)
per_state_sqrt_mse = np.sqrt(per_state_mse)
H, state_dim = real.shape
for state_i in range(state_dim):
use_save_path = save_path % state_i
plt.plot(np.arange(H), real[:, state_i], label="Real")
plt.plot(np.arange(H), pred[:, state_i], label="Pred")
plt.grid(b=True, which="major", color="lightgray", linestyle="--")
plt.xlabel("t")
plt.ylabel(y_axis_name % state_i)
if ylim is not None:
plt.ylim(ylim)
if isinstance(title, list):
use_title = title[state_i]
else:
use_title = title
if len(use_title) != 0:
use_title += "\n"
use_title += "MSE %.4f, SQRT MSE %.4f" % (
per_state_mse[state_i],
per_state_sqrt_mse[state_i],
)
plt.title(use_title)
plt.legend()
rutils.plt_save(use_save_path)
if not no_wb:
use_full_log_name = log_name % state_i
wandb.log(
{use_full_log_name: [wandb.Image(use_save_path)]},
step=step,
)
return np.mean(per_state_mse)
|
[
"me@andrewszot.com"
] |
me@andrewszot.com
|
2aeb217b02dbe82cdc5445f4bec4aafb01b07802
|
68049b03dbbd9a3d778571794472e07c05fb00ad
|
/python/courses/jose_portilla/flask/sandbox/10_databases/10_1_flask_and_databases_practice/setupdatabase.py
|
e2f38694c1a0eb91547cf484e4e8aa594a19934b
|
[] |
no_license
|
tjkhara/notes
|
c9e96ecea6efed860c521eb7df562c5715091aea
|
5602a25ba23104e4154700108f1b8a3a0144f712
|
refs/heads/master
| 2023-01-20T07:42:47.129359
| 2020-11-24T06:43:24
| 2020-11-24T06:43:24
| 285,811,022
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
from basic import db, Puppy
# creates all the tables
# takes classes and converts them into tables
db.create_all()
sam = Puppy('Sammy', 3)
frank = Puppy('Frankie', 4)
miles = Puppy('Miles', 10)
# These will say none because they are not in the database yet
# They don't have any ids
print(sam.id)
print(frank.id)
print(miles.id)
# Add these two objects to the database
db.session.add_all([sam, frank, miles])
# commit changes
db.session.commit()
print(sam.id)
print(frank.id)
print(miles.id)
|
[
"tkhara@gmail.com"
] |
tkhara@gmail.com
|
e4fd0b88f086e8155bee37b5546c0096f7760d3e
|
e78154abbb8bacf5afccda9da371684cbeabad36
|
/envs/ALPHA-POPEGO/lib/python2.5/site-packages/ipython-0.8.2-py2.5.egg/IPython/Release.py
|
c22250cf389d6cc8e86540e756de11ec217a66b1
|
[
"BSD-3-Clause"
] |
permissive
|
enterstudio/popego
|
1a196fabc374c0f45764e5c74bd7752236424040
|
2d09e793d9d2f297139edb325b8a70ddda9b2705
|
refs/heads/master
| 2021-04-09T16:39:40.781634
| 2016-10-14T16:53:47
| 2016-10-14T16:53:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,806
|
py
|
# -*- coding: utf-8 -*-
"""Release data for the IPython project.
$Id: Release.py 2855 2007-11-06 06:53:49Z vivainio $"""
#*****************************************************************************
# Copyright (C) 2001-2006 Fernando Perez <fperez@colorado.edu>
#
# Copyright (c) 2001 Janko Hauser <jhauser@zscout.de> and Nathaniel Gray
# <n8gray@caltech.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
# Name of the package for release purposes. This is the name which labels
# the tarballs and RPMs made by distutils, so it's best to lowercase it.
name = 'ipython'
# For versions with substrings (like 0.6.16.svn), use an extra . to separate
# the new substring. We have to avoid using either dashes or underscores,
# because bdist_rpm does not accept dashes (an RPM) convention, and
# bdist_deb does not accept underscores (a Debian convention).
revision = '2876M'
#version = '0.8.2.svn.r' + revision.rstrip('M')
version = '0.8.2'
description = "An enhanced interactive Python shell."
long_description = \
"""
IPython provides a replacement for the interactive Python interpreter with
extra functionality.
Main features:
* Comprehensive object introspection.
* Input history, persistent across sessions.
* Caching of output results during a session with automatically generated
references.
* Readline based name completion.
* Extensible system of 'magic' commands for controlling the environment and
performing many tasks related either to IPython or the operating system.
* Configuration system with easy switching between different setups (simpler
than changing $PYTHONSTARTUP environment variables every time).
* Session logging and reloading.
* Extensible syntax processing for special purpose situations.
* Access to the system shell with user-extensible alias system.
* Easily embeddable in other Python programs.
* Integrated access to the pdb debugger and the Python profiler.
The latest development version is always available at the IPython subversion
repository_.
.. _repository: http://ipython.scipy.org/svn/ipython/ipython/trunk#egg=ipython-dev
"""
license = 'BSD'
authors = {'Fernando' : ('Fernando Perez','fperez@colorado.edu'),
'Janko' : ('Janko Hauser','jhauser@zscout.de'),
'Nathan' : ('Nathaniel Gray','n8gray@caltech.edu'),
'Ville' : ('Ville Vainio','vivainio@gmail.com')
}
url = 'http://ipython.scipy.org'
download_url = 'http://ipython.scipy.org/dist'
platforms = ['Linux','Mac OSX','Windows XP/2000/NT','Windows 95/98/ME']
keywords = ['Interactive','Interpreter','Shell']
|
[
"santisiri@gmail.com"
] |
santisiri@gmail.com
|
31298541903089b84d357150a735501103053981
|
0a57f05221d425119cb2994c5686a95e01b33d46
|
/ex21.py
|
67a0f965521d5f8cce8027401d93c01786fc9214
|
[] |
no_license
|
luroto/lpthw
|
371ad2de422e7656b9f18461808d28847d17971f
|
e89329477d0c5c5b34d7998832b395c05385876b
|
refs/heads/master
| 2022-06-02T17:56:01.873932
| 2020-05-02T17:52:11
| 2020-05-02T17:52:11
| 260,742,781
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
def add(a,b):
print(f"ADDING {a} + {b}")
return a + b
def substract(a, b):
print(f"SUBSRACTING {a} - {b}")
return a - b
def multiply(a, b):
print(f"MULTIPLYING {a} * {b}")
return a * b
def divide(a, b):
print(f"DIVIDING {a} / {b}")
return (a / b)
print("Let's do some math with just functions")
age = add(30, 5)
height = substract(78, 4)
weight = multiply(90, 2)
iq = divide(100, 2)
print(f"Age: {age}, Height: {height}, Weight {weight}, IQ {iq}")
# A puzzle for the extra credit, type it in anyway
print("Here's a puzzle")
what = add(age, substract(height, multiply(weight, divide(iq, 2))))
print("That becomes: ", what, "Can you do it by hand?")
|
[
"774@holbertonschool.com"
] |
774@holbertonschool.com
|
6a6ebe3550b44d0e3ce445ed0151ed8f95c18ec0
|
7889f7f0532db6a7f81e6f8630e399c90438b2b9
|
/2.1.2/_downloads/boxplot_demo1.py
|
aac441baa4f86269d657f3d8b96bfebf095017f7
|
[] |
no_license
|
matplotlib/matplotlib.github.com
|
ef5d23a5bf77cb5af675f1a8273d641e410b2560
|
2a60d39490941a524e5385670d488c86083a032c
|
refs/heads/main
| 2023-08-16T18:46:58.934777
| 2023-08-10T05:07:57
| 2023-08-10T05:08:30
| 1,385,150
| 25
| 59
| null | 2023-08-30T15:59:50
| 2011-02-19T03:27:35
| null |
UTF-8
|
Python
| false
| false
| 7,720
|
py
|
"""
========
Boxplots
========
Visualizing boxplots with matplotlib.
The following examples show off how to visualize boxplots with
Matplotlib. There are many options to control their appearance and
the statistics that they use to summarize the data.
"""
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.patches import Polygon
# Fixing random state for reproducibility
np.random.seed(19680801)
# fake up some data
spread = np.random.rand(50) * 100
center = np.ones(25) * 50
flier_high = np.random.rand(10) * 100 + 100
flier_low = np.random.rand(10) * -100
data = np.concatenate((spread, center, flier_high, flier_low), 0)
fig, axs = plt.subplots(2, 3)
# basic plot
axs[0, 0].boxplot(data)
axs[0, 0].set_title('basic plot')
# notched plot
axs[0, 1].boxplot(data, 1)
axs[0, 1].set_title('notched plot')
# change outlier point symbols
axs[0, 2].boxplot(data, 0, 'gD')
axs[0, 2].set_title('change outlier\npoint symbols')
# don't show outlier points
axs[1, 0].boxplot(data, 0, '')
axs[1, 0].set_title("don't show\noutlier points")
# horizontal boxes
axs[1, 1].boxplot(data, 0, 'rs', 0)
axs[1, 1].set_title('horizontal boxes')
# change whisker length
axs[1, 2].boxplot(data, 0, 'rs', 0, 0.75)
axs[1, 2].set_title('change whisker length')
fig.subplots_adjust(left=0.08, right=0.98, bottom=0.05, top=0.9,
hspace=0.4, wspace=0.3)
# fake up some more data
spread = np.random.rand(50) * 100
center = np.ones(25) * 40
flier_high = np.random.rand(10) * 100 + 100
flier_low = np.random.rand(10) * -100
d2 = np.concatenate((spread, center, flier_high, flier_low), 0)
data.shape = (-1, 1)
d2.shape = (-1, 1)
# Making a 2-D array only works if all the columns are the
# same length. If they are not, then use a list instead.
# This is actually more efficient because boxplot converts
# a 2-D array into a list of vectors internally anyway.
data = [data, d2, d2[::2, 0]]
# Multiple box plots on one Axes
fig, ax = plt.subplots()
ax.boxplot(data)
plt.show()
###############################################################################
# Below we'll generate data from five different probability distributions,
# each with different characteristics. We want to play with how an IID
# bootstrap resample of the data preserves the distributional
# properties of the original sample, and a boxplot is one visual tool
# to make this assessment
numDists = 5
randomDists = ['Normal(1,1)', ' Lognormal(1,1)', 'Exp(1)', 'Gumbel(6,4)',
'Triangular(2,9,11)']
N = 500
norm = np.random.normal(1, 1, N)
logn = np.random.lognormal(1, 1, N)
expo = np.random.exponential(1, N)
gumb = np.random.gumbel(6, 4, N)
tria = np.random.triangular(2, 9, 11, N)
# Generate some random indices that we'll use to resample the original data
# arrays. For code brevity, just use the same random indices for each array
bootstrapIndices = np.random.random_integers(0, N - 1, N)
normBoot = norm[bootstrapIndices]
expoBoot = expo[bootstrapIndices]
gumbBoot = gumb[bootstrapIndices]
lognBoot = logn[bootstrapIndices]
triaBoot = tria[bootstrapIndices]
data = [norm, normBoot, logn, lognBoot, expo, expoBoot, gumb, gumbBoot,
tria, triaBoot]
fig, ax1 = plt.subplots(figsize=(10, 6))
fig.canvas.set_window_title('A Boxplot Example')
fig.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = ax1.boxplot(data, notch=0, sym='+', vert=1, whis=1.5)
plt.setp(bp['boxes'], color='black')
plt.setp(bp['whiskers'], color='black')
plt.setp(bp['fliers'], color='red', marker='+')
# Add a horizontal grid to the plot, but make it very light in color
# so we can use it for reading data values but not be distracting
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.set_title('Comparison of IID Bootstrap Resampling Across Five Distributions')
ax1.set_xlabel('Distribution')
ax1.set_ylabel('Value')
# Now fill the boxes with desired colors
boxColors = ['darkkhaki', 'royalblue']
numBoxes = numDists*2
medians = list(range(numBoxes))
for i in range(numBoxes):
box = bp['boxes'][i]
boxX = []
boxY = []
for j in range(5):
boxX.append(box.get_xdata()[j])
boxY.append(box.get_ydata()[j])
boxCoords = list(zip(boxX, boxY))
# Alternate between Dark Khaki and Royal Blue
k = i % 2
boxPolygon = Polygon(boxCoords, facecolor=boxColors[k])
ax1.add_patch(boxPolygon)
# Now draw the median lines back over what we just filled in
med = bp['medians'][i]
medianX = []
medianY = []
for j in range(2):
medianX.append(med.get_xdata()[j])
medianY.append(med.get_ydata()[j])
ax1.plot(medianX, medianY, 'k')
medians[i] = medianY[0]
# Finally, overplot the sample averages, with horizontal alignment
# in the center of each box
ax1.plot([np.average(med.get_xdata())], [np.average(data[i])],
color='w', marker='*', markeredgecolor='k')
# Set the axes ranges and axes labels
ax1.set_xlim(0.5, numBoxes + 0.5)
top = 40
bottom = -5
ax1.set_ylim(bottom, top)
ax1.set_xticklabels(np.repeat(randomDists, 2),
rotation=45, fontsize=8)
# Due to the Y-axis scale being different across samples, it can be
# hard to compare differences in medians across the samples. Add upper
# X-axis tick labels with the sample medians to aid in comparison
# (just use two decimal places of precision)
pos = np.arange(numBoxes) + 1
upperLabels = [str(np.round(s, 2)) for s in medians]
weights = ['bold', 'semibold']
for tick, label in zip(range(numBoxes), ax1.get_xticklabels()):
k = tick % 2
ax1.text(pos[tick], top - (top*0.05), upperLabels[tick],
horizontalalignment='center', size='x-small', weight=weights[k],
color=boxColors[k])
# Finally, add a basic legend
fig.text(0.80, 0.08, str(N) + ' Random Numbers',
backgroundcolor=boxColors[0], color='black', weight='roman',
size='x-small')
fig.text(0.80, 0.045, 'IID Bootstrap Resample',
backgroundcolor=boxColors[1],
color='white', weight='roman', size='x-small')
fig.text(0.80, 0.015, '*', color='white', backgroundcolor='silver',
weight='roman', size='medium')
fig.text(0.815, 0.013, ' Average Value', color='black', weight='roman',
size='x-small')
plt.show()
###############################################################################
# Here we write a custom function to bootstrap confidence intervals.
# We can then use the boxplot along with this function to show these intervals.
def fakeBootStrapper(n):
'''
This is just a placeholder for the user's method of
bootstrapping the median and its confidence intervals.
Returns an arbitrary median and confidence intervals
packed into a tuple
'''
if n == 1:
med = 0.1
CI = (-0.25, 0.25)
else:
med = 0.2
CI = (-0.35, 0.50)
return med, CI
inc = 0.1
e1 = np.random.normal(0, 1, size=(500,))
e2 = np.random.normal(0, 1, size=(500,))
e3 = np.random.normal(0, 1 + inc, size=(500,))
e4 = np.random.normal(0, 1 + 2*inc, size=(500,))
treatments = [e1, e2, e3, e4]
med1, CI1 = fakeBootStrapper(1)
med2, CI2 = fakeBootStrapper(2)
medians = [None, None, med1, med2]
conf_intervals = [None, None, CI1, CI2]
fig, ax = plt.subplots()
pos = np.array(range(len(treatments))) + 1
bp = ax.boxplot(treatments, sym='k+', positions=pos,
notch=1, bootstrap=5000,
usermedians=medians,
conf_intervals=conf_intervals)
ax.set_xlabel('treatment')
ax.set_ylabel('response')
plt.setp(bp['whiskers'], color='k', linestyle='-')
plt.setp(bp['fliers'], markersize=3.0)
plt.show()
|
[
"tcaswell@gmail.com"
] |
tcaswell@gmail.com
|
c8c91b8f93916d59a46e5052ed5bf42d766b5c99
|
e914da03391c81b69ae47c3dfaabb119259eb66f
|
/aon_decoder.py
|
4f5b351c967cae4c175b18aac6ed5d606fc3f548
|
[] |
no_license
|
koder-ua/python_practice
|
25f67e7c2333c0f96a2a711947e87951769570db
|
a68b8fc9c12e841b7355c745db6d104205ea568f
|
refs/heads/master
| 2021-01-22T04:22:58.642582
| 2015-12-15T14:16:40
| 2015-12-15T14:16:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,930
|
py
|
#!/usr/bin/env python
# -*- coding:utf8 -*-
"""
Homework for Automatic Number Identification (ANI)
https://github.com/koder-ua/python-classes/blob/master/slides/pdf/FF_tasks.pdf
Slide #7
"""
def decode(string):
"""
ANI decoder:
- combine repeated characters (2333# -> 3)
- remove single characters (1234 -> None)
- repeat last character before "##" (33## -> 33")
:param string: string
:return string: processed string
"""
# split all repeated symbols as a standalone strings
# string = ["".join(grp) for _, grp in itertools.groupby(string)]
splitted_string = []
n = 0
k = 0
while n < len(string):
while k < len(string) - 1:
if string[k] == string[k + 1]:
k += 1
else:
break
k += 1
splitted_string.append(string[n:k])
n = k
# get first character from splitted strings + remove single-length strings
string = "".join([i[0] for i in splitted_string if len(i) != 1])
result = ""
for i, v in enumerate(string):
if v == "#":
if i == 0 and len(string) > 1: # checking leading '#' in string
continue
elif i == 0:
return None
else:
result += string[i - 1]
else:
result += string[i]
return result
def test_decode():
assert decode("") == ""
assert decode("1") == ""
assert decode("11111") == "1"
assert decode("11#") == "1"
assert decode("11##") == "11"
assert decode("11122234###55") == "1225"
assert decode("##") is None
assert decode("12345##") is None
assert decode("221133444##") == "21344"
assert decode("###33###22##") == "3322"
assert decode("###33###22##1#") == "3322"
print("Passed successfully")
def main():
"main"
test_decode()
return 0
if __name__ == "__main__":
exit(main())
|
[
"vitaliy@kulanov.org.ua"
] |
vitaliy@kulanov.org.ua
|
8cc39834a3986a41c0b6c4717eda289d67aa0f2a
|
7d3cb9e6ac0f2a0f217fb8ad77076fd4f719a437
|
/xen_signature/apps/pdf_to_image/migrations/0003_auto_20181020_1658.py
|
75cf4f4498115f51b134898cac32d0c1bc38dea3
|
[] |
no_license
|
FlashBanistan/django-xen-signature
|
b88b0698b00390e019ebb419d74043f1e36777ba
|
b390e9aa069c89021e63e41a554489ccf9d685a5
|
refs/heads/master
| 2020-04-02T06:11:24.486660
| 2018-10-26T17:17:20
| 2018-10-26T17:17:20
| 154,135,107
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 741
|
py
|
# Generated by Django 2.1.2 on 2018-10-20 16:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pdf_to_image', '0002_auto_20181020_1657'),
]
operations = [
migrations.RenameField(
model_name='documentimage',
old_name='image_height',
new_name='height',
),
migrations.RenameField(
model_name='documentimage',
old_name='image_width',
new_name='width',
),
migrations.AlterField(
model_name='documentimage',
name='image',
field=models.ImageField(height_field='height', upload_to='', width_field='width'),
),
]
|
[
"FlashBanistan66@gmail.com"
] |
FlashBanistan66@gmail.com
|
29aa7eefb7323c5953972bcecbf05797b238b684
|
e42cce21fbb3c4fe3f271c2029d9659270a968ab
|
/vmrunapi/vmrunapi.py
|
cde0c05a165dbfc2cd3c7b87f6803f601bfd2453
|
[] |
no_license
|
cloudbase/maas-hacks
|
d086a91338e45121dafb33734ba4977e31851dbc
|
0e2cc5537ff64376505c1e9e77dcdf3657fc4d78
|
refs/heads/master
| 2016-09-06T13:02:15.808249
| 2014-04-30T00:24:58
| 2014-04-30T00:24:58
| 17,869,386
| 5
| 0
| null | 2014-05-06T01:23:22
| 2014-03-18T14:43:58
|
Python
|
UTF-8
|
Python
| false
| false
| 3,400
|
py
|
#!/usr/bin/python
import flask
import os
import re
import subprocess
import sys
if sys.platform == 'win32':
from win32com.shell import shell
from win32com.shell import shellcon
app = flask.Flask(__name__)
STARTED = "started"
STOPPED = "stopped"
def _get_matching_vmx_path(path, mac_address):
mac_address_re = re.compile(r'^ethernet(\d+)\.address(\s*)=(\s*)\"%s\"$' %
mac_address.upper())
for root, dirs, file_names in os.walk(path):
for file_name in file_names:
if os.path.splitext(file_name)[1].lower() == '.vmx':
vmx_path = os.path.join(root, file_name)
with open(vmx_path, 'rb') as f:
for l in f:
if mac_address_re.match(l):
return vmx_path
def _get_vmx_base_path():
if sys.platform == 'darwin':
return os.path.expanduser("~/Documents/Virtual Machines")
elif sys.platform == 'win32':
documents_dir = shell.SHGetFolderPath(0, shellcon.CSIDL_PERSONAL,
None, 0)
return os.path.join(documents_dir, "Virtual Machines")
else:
return os.path.expanduser("~/vmware")
def _get_vmrun():
if sys.platform == 'darwin':
return ("/Applications/VMware Fusion.app/Contents/Library/vmrun",
"fusion")
else:
# Make sure to have vmrun in the PATH
return ("vmrun", "ws")
def _execute_process(args):
p = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=False)
(out, err) = p.communicate()
return (out, err, p.returncode)
def _exec_vmrun_cmd(cmd, vmx_path=None):
(vmrun_path, vmrun_type) = _get_vmrun()
args = [vmrun_path, "-T", vmrun_type, cmd]
if vmx_path:
args.append(vmx_path)
(out, err, exit_code) = _execute_process(args)
if exit_code:
raise Exception("vmrun failed: %s" % out)
return out
@app.route('/vmrun/vm/find_by_mac_address/<string:mac_address>',
methods = ['GET'])
def get_vmx_path_bymac_address(mac_address):
base_path = _get_vmx_base_path()
vmx_path = _get_matching_vmx_path(base_path, mac_address)
if not vmx_path:
flask. abort(404)
else:
return vmx_path
def _get_json_vmx_path():
if not flask.request.json:
flask.abort(400)
vmx_path = flask.request.json.get('vmx_path')
if not vmx_path:
flask.abort(400)
if not os.path.exists(vmx_path):
flask.abort(404)
return vmx_path
@app.route('/vmrun/vm/start', methods = ['POST'])
def start_vm():
vmx_path = _get_json_vmx_path()
_exec_vmrun_cmd("start", vmx_path)
return STARTED
@app.route('/vmrun/vm/stop', methods = ['POST'])
def stop_vm():
vmx_path = _get_json_vmx_path()
_exec_vmrun_cmd("stop", vmx_path)
return STARTED
@app.route('/vmrun/vm/status', methods = ['POST'])
def get_vm_status():
status = STOPPED
vmx_path = _get_json_vmx_path()
running_vmx_paths = _exec_vmrun_cmd("list").split("\n")[1:-1]
for running_vmx_path in running_vmx_paths:
if vmx_path == running_vmx_path:
status = STARTED
break
return status
if __name__ == '__main__':
app.run(host="0.0.0.0", port=6000, debug = True)
|
[
"apilotti@cloudbasesolutions.com"
] |
apilotti@cloudbasesolutions.com
|
57c5f0267b758e4eb4c42389e10c758178243ed3
|
c703b8ac3b5545857f6c95efa2d61eaf7a664021
|
/iPERCore/models/networks/discriminators/patch_dis.py
|
83491547d5a6977f9e719ae8d16041dd07558ae4
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license",
"Apache-2.0",
"BSD-2-Clause"
] |
permissive
|
iPERDance/iPERCore
|
d29681d229b3098b3517b1abf4f7ea65f579de73
|
fcf9a18ffd66bf3fdd3eea4153a3bc4785131848
|
refs/heads/main
| 2023-07-30T15:04:15.835396
| 2023-04-12T14:21:23
| 2023-04-12T14:21:23
| 313,664,064
| 2,520
| 339
|
Apache-2.0
| 2023-05-12T03:26:52
| 2020-11-17T15:36:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,757
|
py
|
# Copyright (c) 2020-2021 impersonator.org authors (Wen Liu and Zhixin Piao). All rights reserved.
import torch
import torch.nn as nn
import functools
class PatchDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=32, n_layers=3, max_nf_mult=8,
norm_type="batch", use_sigmoid=False):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(PatchDiscriminator, self).__init__()
norm_layer = self._get_norm_layer(norm_type)
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func != nn.BatchNorm2d
else:
use_bias = norm_layer != nn.BatchNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, max_nf_mult)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, max_nf_mult)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
def _get_norm_layer(self, norm_type="batch"):
if norm_type == "batch":
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == "instance":
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)
elif norm_type == "batchnorm2d":
norm_layer = nn.BatchNorm2d
else:
raise NotImplementedError(f"normalization layer [{norm_type}] is not found")
return norm_layer
def forward(self, input):
"""Standard forward."""
return self.model(input)
|
[
"liuwen@shanghaitech.edu.cn"
] |
liuwen@shanghaitech.edu.cn
|
e0a7315e974496146f931f1dccb8aff89ce1264d
|
1ca94f20401cc0bd33a7a935dea2f3c66776dbe4
|
/users/models.py
|
b8680d9a1d986368544da5d9676214693646fa7a
|
[] |
no_license
|
liangsongyou/news-18
|
468d06a854e3bf6b5389e6efbb2b1a812d45fef6
|
45619e32d7f950d75949912ee8c570903f6c39f3
|
refs/heads/master
| 2020-04-11T15:59:26.136085
| 2018-12-15T13:54:16
| 2018-12-15T13:54:16
| 161,909,795
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 171
|
py
|
from django.contrib.auth.models import AbstractUser
from django.db import models
class CustomUser(AbstractUser):
age = models.PositiveIntegerField(default=0)
|
[
"yuebei58@gmail.com"
] |
yuebei58@gmail.com
|
2cb549fab7ccf5db93a112f7980fa14fbc3ffbd0
|
8e7e51ff8b9c1103d10aa86c3d1cb446cfb25e4c
|
/djeniesecurity/djeniesecurity/urls.py
|
c409e1093e267c2e36d190bdc95028974c4ec905
|
[] |
no_license
|
huogerac/modulo4
|
b2c6e07f5e2928182a03edac503d0a4468736007
|
b30e056fb5a4703255982a349ed184beaea010fd
|
refs/heads/master
| 2021-01-17T21:25:03.926382
| 2013-09-23T10:21:51
| 2013-09-23T10:21:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 572
|
py
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'djeniesecurity.views.home', name='home'),
# url(r'^djeniesecurity/', include('djeniesecurity.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
)
urlpatterns += patterns('',
url(r'', include('cms.urls')),
url(r'', include('sms.urls')),
)
|
[
"huogerac@gmail.com"
] |
huogerac@gmail.com
|
b12c0fb45f697b54880348bc5234ea5e8967228d
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startCirq1197.py
|
1ebb841cfc54a0fc26e0f2bd3522d7dfdaa63405
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,855
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=5
# total number=51
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=3
c.append(cirq.H.on(input_qubit[1])) # number=4
c.append(cirq.H.on(input_qubit[2])) # number=5
c.append(cirq.H.on(input_qubit[3])) # number=6
c.append(cirq.H.on(input_qubit[4])) # number=21
for i in range(2):
c.append(cirq.H.on(input_qubit[0])) # number=1
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[0])) # number=17
c.append(cirq.H.on(input_qubit[1])) # number=18
c.append(cirq.H.on(input_qubit[2])) # number=19
c.append(cirq.H.on(input_qubit[3])) # number=20
c.append(cirq.H.on(input_qubit[0])) # number=31
c.append(cirq.CZ.on(input_qubit[1],input_qubit[0])) # number=32
c.append(cirq.H.on(input_qubit[0])) # number=33
c.append(cirq.H.on(input_qubit[1])) # number=44
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=45
c.append(cirq.H.on(input_qubit[1])) # number=46
c.append(cirq.X.on(input_qubit[1])) # number=41
c.append(cirq.H.on(input_qubit[1])) # number=48
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=49
c.append(cirq.H.on(input_qubit[1])) # number=50
c.append(cirq.X.on(input_qubit[0])) # number=26
c.append(cirq.CNOT.on(input_qubit[1],input_qubit[0])) # number=27
c.append(cirq.H.on(input_qubit[1])) # number=37
c.append(cirq.CZ.on(input_qubit[0],input_qubit[1])) # number=38
c.append(cirq.H.on(input_qubit[1])) # number=39
c.append(cirq.X.on(input_qubit[1])) # number=35
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=36
c.append(cirq.X.on(input_qubit[2])) # number=11
c.append(cirq.X.on(input_qubit[3])) # number=12
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[2])) # number=43
c.append(cirq.CNOT.on(input_qubit[3],input_qubit[2])) # number=47
c.append(cirq.X.on(input_qubit[0])) # number=13
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=22
c.append(cirq.X.on(input_qubit[1])) # number=23
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[1])) # number=24
c.append(cirq.X.on(input_qubit[2])) # number=15
c.append(cirq.X.on(input_qubit[1])) # number=29
c.append(cirq.Y.on(input_qubit[4])) # number=28
c.append(cirq.X.on(input_qubit[3])) # number=16
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
return ''.join(str(int(b)) for b in bits)
if __name__ == '__main__':
qubit_count = 5
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq1197.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
e1241643f1fdabd9675e8ec25ea0a5b2350349a4
|
62d6a37e1fb1b224b53e14a1cf151ef0571aa20f
|
/tests/fixtures/tests.py
|
abc94a63d35f2bfe008a3e1bdcf4d4b144ec1bb5
|
[] |
no_license
|
katrid/orun
|
4fa0f291a1ef43f16bc1857a170fc0b2e5e06739
|
bfc6dae06182124ba75b1f3761d81ba8ca387dea
|
refs/heads/master
| 2023-08-30T03:58:34.570527
| 2023-08-09T04:05:30
| 2023-08-09T04:05:30
| 66,562,767
| 14
| 4
| null | 2023-01-06T22:29:37
| 2016-08-25T14:01:44
|
Python
|
UTF-8
|
Python
| false
| false
| 2,024
|
py
|
from orun.test import TestCase
from orun.apps import apps
from orun.db import connection
class FixturesTest(TestCase):
fixtures = {
'fixtures': [
'fixtures.author.csv', 'fixtures.author.tsv', 'data.xml', 'fixtures.book.tsv', 'fixtures.book.csv',
'metadata.%(db_vendor)s.sql',
],
}
def test_load_data(self):
Author = apps['fixtures.author']
Book = apps['fixtures.book']
objs = list(Author.objects.all())
self.assertEqual(len(objs), 9)
book = Book.objects.get(pk=1)
self.assertEqual(book.author.name, 'Xml Author 1')
book = Book.objects.get(pk=2)
self.assertEqual(book.author.name, 'Author 2')
def test_xml_objects(self):
Object = apps['ir.object']
obj1 = Object.objects.get_object('fixtures/xml/author/1')
self.assertEqual(obj1.name, 'fixtures/xml/author/1')
author1 = obj1.content_object
self.assertEqual(author1.name, 'Xml Author 1')
self.assertEqual(obj1.name, 'fixtures/xml/author/1')
obj2 = Object.objects.get_object('fixtures/xml/author/2')
author2 = obj2.content_object
self.assertEqual(obj2.name, 'fixtures/xml/author/2')
self.assertEqual(author2.name, 'Xml Author 2')
# test deleted
with self.assertRaises(Object.DoesNotExist):
Object.objects.get_object('fixtures/xml/author/4/delete')
Author = apps['fixtures.author']
with self.assertRaises(Author.DoesNotExist):
Author.objects.get(name='Xml Author 4')
def test_sql_fixtures(self):
with connection.cursor() as cursor:
# Testing created view
cursor.execute('''select * from books order by id''')
books = cursor.fetchall()
self.assertEqual(len(books), 2)
self.assertEqual(books[0][0], 1)
self.assertEqual(books[1][0], 2)
def test_web_fixtures(self):
View = apps['ui.view']
views = View.objects.all()
|
[
"alexandre@katrid.com"
] |
alexandre@katrid.com
|
1e6895e6f359a03fff2e6129c7a5e162e1c1d48a
|
4ad53199feb82d911bd2edbe0b5713da8c1909c1
|
/pytablewriter/style/__init__.py
|
6be6ff1844a0928139b11cb5ac086bac8216c4f9
|
[
"MIT"
] |
permissive
|
thombashi/pytablewriter
|
9bf8b73da0eb18dba835e951021fd581958a4d12
|
49f9da777625a5b920c2c87c5e086d33d19a80d4
|
refs/heads/master
| 2023-08-19T05:13:15.333317
| 2023-07-01T08:03:47
| 2023-07-01T08:03:47
| 59,484,958
| 609
| 43
|
MIT
| 2021-09-20T15:26:45
| 2016-05-23T13:25:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,006
|
py
|
from dataproperty import Align, Format
from ._cell import Cell
from ._font import FontSize, FontStyle, FontWeight
from ._style import DecorationLine, Style, ThousandSeparator, VerticalAlign
from ._styler import (
GFMarkdownStyler,
HtmlStyler,
LatexStyler,
MarkdownStyler,
NullStyler,
ReStructuredTextStyler,
TextStyler,
get_align_char,
)
from ._styler_interface import StylerInterface
from ._theme import ColSeparatorStyleFilterFunc, StyleFilterFunc, Theme, fetch_theme, list_themes
__all__ = (
"Align",
"Format",
"Cell",
"FontSize",
"FontStyle",
"FontWeight",
"Style",
"ThousandSeparator",
"VerticalAlign",
"DecorationLine",
"GFMarkdownStyler",
"HtmlStyler",
"LatexStyler",
"MarkdownStyler",
"NullStyler",
"ReStructuredTextStyler",
"StylerInterface",
"TextStyler",
"ColSeparatorStyleFilterFunc",
"StyleFilterFunc",
"Theme",
"get_align_char",
"fetch_theme",
"list_themes",
)
|
[
"tsuyoshi.hombashi@gmail.com"
] |
tsuyoshi.hombashi@gmail.com
|
c4ab791f131770d16025600c9969fa275bcb485e
|
6527b66fd08d9e7f833973adf421faccd8b765f5
|
/yuancloud/recicler/localizaciones/l10n_be_invoice_bba/__init__.py
|
8c3517b22a87f1e464f6866fc7d7621f263d5a7d
|
[] |
no_license
|
cash2one/yuancloud
|
9a41933514e57167afb70cb5daba7f352673fb4d
|
5a4fd72991c846d5cb7c5082f6bdfef5b2bca572
|
refs/heads/master
| 2021-06-19T22:11:08.260079
| 2017-06-29T06:26:15
| 2017-06-29T06:26:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
# -*- encoding: utf-8 -*-
# Part of YuanCloud. See LICENSE file for full copyright and licensing details.
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
import partner
import invoice
|
[
"liuganghao@lztogether.com"
] |
liuganghao@lztogether.com
|
ae825fe3516b3c4458a8137c101f289786af735c
|
3ced55b04ec82df5257f0e3b500fba89ddf73a8a
|
/src/stk/molecular/topology_graphs/cage/two_plus_four/two_plus_four.py
|
80aa18537329f8d918ee7fea003280f088245115
|
[
"MIT"
] |
permissive
|
rdguerrerom/stk
|
317282d22f5c4c99a1a8452023c490fd2f711357
|
1ac2ecbb5c9940fe49ce04cbf5603fd7538c475a
|
refs/heads/master
| 2023-08-23T21:04:46.854062
| 2021-10-16T14:01:38
| 2021-10-16T14:01:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,112
|
py
|
"""
Two Plus Four
=============
"""
from ..cage import Cage
from ..vertices import LinearVertex, NonLinearVertex
from ...topology_graph import Edge
class TwoPlusFour(Cage):
"""
Represents a capsule cage topology graph.
Unoptimized construction
.. moldoc::
import moldoc.molecule as molecule
import stk
bb1 = stk.BuildingBlock(
smiles='BrCCBr',
functional_groups=[stk.BromoFactory()],
)
bb2 = stk.BuildingBlock(
smiles='Brc1c(Br)cc(Br)c(Br)c1',
functional_groups=[stk.BromoFactory()],
)
cage = stk.ConstructedMolecule(
topology_graph=stk.cage.TwoPlusFour((bb1, bb2)),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
cage.get_atoms(),
cage.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in cage.get_bonds()
),
)
:class:`.Collapser` optimized construction
.. moldoc::
import moldoc.molecule as molecule
import stk
bb1 = stk.BuildingBlock(
smiles='BrCCBr',
functional_groups=[stk.BromoFactory()],
)
bb2 = stk.BuildingBlock(
smiles='Brc1c(Br)cc(Br)c(Br)c1',
functional_groups=[stk.BromoFactory()],
)
cage = stk.ConstructedMolecule(
topology_graph=stk.cage.TwoPlusFour(
building_blocks=(bb1, bb2),
optimizer=stk.Collapser(),
),
)
moldoc_display_molecule = molecule.Molecule(
atoms=(
molecule.Atom(
atomic_number=atom.get_atomic_number(),
position=position,
) for atom, position in zip(
cage.get_atoms(),
cage.get_position_matrix(),
)
),
bonds=(
molecule.Bond(
atom1_id=bond.get_atom1().get_id(),
atom2_id=bond.get_atom2().get_id(),
order=bond.get_order(),
) for bond in cage.get_bonds()
),
)
Nonlinear building blocks with four functional groups are
required for this topology.
Linear building blocks with two functional groups are required for
this topology.
When using a :class:`dict` for the `building_blocks` parameter,
as in :ref:`cage-topology-graph-examples`:
*Multi-Building Block Cage Construction*, a
:class:`.BuildingBlock`, with the following number of functional
groups, needs to be assigned to each of the following vertex ids:
| 4-functional groups: (0, 1)
| 2-functional groups: (2, 3, 4, 5)
See :class:`.Cage` for more details and examples.
"""
_vertex_prototypes = (
NonLinearVertex(0, [0, 0, -1]),
NonLinearVertex(1, [0, 0, 1]),
LinearVertex(2, [2, 0, 0], False),
LinearVertex(3, [-2, 0, 0], False),
LinearVertex(4, [0, 2, 0], False),
LinearVertex(5, [0, -2, 0], False),
)
_edge_prototypes = (
Edge(0, _vertex_prototypes[2], _vertex_prototypes[0]),
Edge(1, _vertex_prototypes[2], _vertex_prototypes[1]),
Edge(2, _vertex_prototypes[3], _vertex_prototypes[0]),
Edge(3, _vertex_prototypes[3], _vertex_prototypes[1]),
Edge(4, _vertex_prototypes[4], _vertex_prototypes[0]),
Edge(5, _vertex_prototypes[4], _vertex_prototypes[1]),
Edge(6, _vertex_prototypes[5], _vertex_prototypes[0]),
Edge(7, _vertex_prototypes[5], _vertex_prototypes[1])
)
_num_windows = 4
_num_window_types = 1
|
[
"noreply@github.com"
] |
rdguerrerom.noreply@github.com
|
6305425047bc6275d2a171616fbdffe8a360ec2c
|
674f5dde693f1a60e4480e5b66fba8f24a9cb95d
|
/armulator/armv6/opcodes/concrete/rsb_register_shifted_register_a1.py
|
584074016b2edaaf59d9ac2ff84cb51509bec935
|
[
"MIT"
] |
permissive
|
matan1008/armulator
|
75211c18ebc9cd9d33a02890e76fc649483c3aad
|
44f4275ab1cafff3cf7a1b760bff7f139dfffb07
|
refs/heads/master
| 2023-08-17T14:40:52.793120
| 2023-08-08T04:57:02
| 2023-08-08T04:57:02
| 91,716,042
| 29
| 7
|
MIT
| 2023-08-08T04:55:59
| 2017-05-18T16:37:55
|
Python
|
UTF-8
|
Python
| false
| false
| 837
|
py
|
from armulator.armv6.bits_ops import substring, bit_at
from armulator.armv6.opcodes.abstract_opcodes.rsb_register_shifted_register import RsbRegisterShiftedRegister
from armulator.armv6.shift import decode_reg_shift
class RsbRegisterShiftedRegisterA1(RsbRegisterShiftedRegister):
@staticmethod
def from_bitarray(instr, processor):
rm = substring(instr, 3, 0)
type_o = substring(instr, 6, 5)
rs = substring(instr, 11, 8)
rd = substring(instr, 15, 12)
rn = substring(instr, 19, 16)
s = bit_at(instr, 20)
if rd == 0b1111 or rn == 0b1111 or rm == 0b1111 or rs == 0b1111:
print('unpredictable')
else:
shift_t = decode_reg_shift(type_o)
return RsbRegisterShiftedRegisterA1(instr, setflags=s, m=rm, s=rs, d=rd, n=rn, shift_t=shift_t)
|
[
"matan1008@gmail.com"
] |
matan1008@gmail.com
|
8d26a6f969809cb725345cdc97e909cdc61f535b
|
97a39cfdbd0ae4310eef729785630438278d3279
|
/manage.py
|
4dfa3b998a58a9b60a40062cf56854fe68d23419
|
[
"Apache-2.0"
] |
permissive
|
cvlucian/confidant
|
e9ddf15885ec6a4442422a00d7c9d2a84f8dfa20
|
8e273fb813d57ae831343f7d047b32a8f62458cb
|
refs/heads/master
| 2021-01-13T09:37:39.757319
| 2020-09-23T14:35:53
| 2020-09-23T14:35:53
| 72,053,900
| 1
| 0
|
NOASSERTION
| 2020-09-23T14:36:19
| 2016-10-26T23:44:55
|
Python
|
UTF-8
|
Python
| false
| false
| 809
|
py
|
from flask.ext.script import Manager
import confidant.workarounds # noqa
from confidant import app
from scripts.utils import ManageGrants
from scripts.utils import RevokeGrants
from scripts.bootstrap import GenerateSecretsBootstrap
from scripts.bootstrap import DecryptSecretsBootstrap
manager = Manager(app.app)
# Ensure KMS grants are setup for services
manager.add_command("manage_kms_auth_grants", ManageGrants)
# Revoke all KMS grants
manager.add_command("revoke_all_kms_auth_grants", RevokeGrants)
# Generate encrypted blob from a file
manager.add_command("generate_secrets_bootstrap", GenerateSecretsBootstrap)
# Show the YAML formatted secrets_bootstrap in a decrypted form
manager.add_command("decrypt_secrets_bootstrap", DecryptSecretsBootstrap)
if __name__ == "__main__":
manager.run()
|
[
"rlane@lyft.com"
] |
rlane@lyft.com
|
78df3320c27ab2b3e2c072df6c4e2ef16a3b7759
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_116/1469.py
|
8fbe59658076b2a46a7c77ed1bf039f34b16f0ae
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,234
|
py
|
import numpy as np
def checkWin(p):
#2 is X, 3 is O, check using the MOD method
if p == 0:
return "no"
if p % 2 == 0 and p % 3 != 0:
return 'X'
if p % 2 != 0 and p % 3 == 0:
return 'O'
else:
return 'draw'
def solve(filename):
fin = open(filename + '.in', 'r')
fout = open(filename + '.out', 'w')
T = int(fin.readline())
for case in xrange(T):
answer = ""
board = np.zeros((4, 4), np.int)
for i in xrange(4):
line = fin.readline().strip()
for j in xrange(4):
if line[j] == 'X':
board[i, j] = 2
elif line[j] == 'O':
board[i, j] = 3
elif line[j] == 'T':
board[i, j] = 1
#check rows and columns
prods = []
for i in xrange(4):
row_prod = np.prod(board[i, :])
col_prod = np.prod(board[:, i])
prods.append(checkWin(row_prod))
prods.append(checkWin(col_prod))
#print checkWin(row_prod), checkWin(col_prod)
#diagonals
prod_diag1 = 1
prod_diag2 = 1
for i in xrange(4):
prod_diag1 *= board[i, i]
prod_diag2 *= board[i, 3 - i]
prods.append(checkWin(prod_diag1))
prods.append(checkWin(prod_diag2))
#check answers
if 'no' in prods:
if 'X' not in prods and 'O' not in prods:
answer = 'Game has not completed'
elif 'X' in prods and 'O' not in prods:
answer = 'X won'
elif 'X' not in prods and 'O' in prods:
answer = 'O won'
else:
if 'X' not in prods and 'O' not in prods:
answer = 'Draw'
elif 'X' in prods and 'O' not in prods:
answer = 'X won'
elif 'X' not in prods and 'O' in prods:
answer = 'O won'
print answer
fout.write(('Case #%d: ' % (case + 1)) + str(answer) + '\n')
fin.readline()
fin.close()
fout.close()
if __name__ == "__main__":
# solve("A-tiny")
# solve("A-small-attempt0")
solve("A-large")
#solve("input")
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
0dc35183393d83eb31bf25b1f1f39d1850886c4d
|
17ef1c7483843540ce4d063708afa65430b9301f
|
/tests/test_allocate.py
|
b4281fbb11694e1dbc38fd7af714e2195439f9b5
|
[
"MIT"
] |
permissive
|
CivicKnowledge/synpums
|
e01f8815c5fe118ec748c248b84c862a1db15a3f
|
dd3793388862aa7b43eee2fc2aa96fcf21014267
|
refs/heads/main
| 2023-01-03T09:04:37.021235
| 2020-10-31T00:17:15
| 2020-10-31T00:17:15
| 304,128,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 835
|
py
|
import unittest
import warnings
import pandas as pd
import rowgenerators as rg
from synpums import *
from synpums.util import *
warnings.filterwarnings("ignore")
state = 'RI'
year = 2018
release = 5
cache_dir = '/tmp/synpums'
class TestAllocate(unittest.TestCase):
def test_basic(self):
tasks = AllocationTask.get_tasks(cache_dir, 'RI', ignore_completed=False)
task = tasks[24]
task.init()
print(task.m90_rms_error)
task.initialize_weights_sample()
print(f"te={task.total_error}, rms={task.m90_rms_error}")
args = dict(N=2000, min_iter=1000, step_size_max=15, step_size_min=1, reversal_rate=.4, max_ssm=150)
rows = task.vector_walk(**args)
print(f"te={task.total_error}, rms={task.m90_rms_error}")
if __name__ == '__main__':
unittest.main()
|
[
"eric@civicknowledge.com"
] |
eric@civicknowledge.com
|
5a5e0ce76558c3b94ad2149478844745d1f5087a
|
67f19ebb1fb3189e4c2f99484c1dc13af5099edb
|
/wii_packages/enso/gage_don_h/gage_don_h.py
|
08da11557b1626666c779f60cf484d446bd3aa80
|
[] |
no_license
|
delguoqing/PyLMPlayer
|
609c4fe35e56e4ce3ce30eeb2e9244aad5ea1609
|
db8a1edf70ac1c11deffddc458788b3a2c2078df
|
refs/heads/master
| 2021-01-22T05:06:00.491732
| 2013-09-13T04:54:23
| 2013-09-13T04:54:23
| 8,878,510
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
def func0(this, _global):
this.stop()
def func1(this, _global):
this.gotoAndPlay("fever")
def func2(this, _global):
if 2 <= this.fever_gage._play_head <= 23:
this.fever_gage.gotoAndPlay("toNormal")
this.stop()
def func3(this, _global):
this.fever_gage.gotoAndPlay("toFever")
this.stop()
DATA = (
func0,
func1,
func2,
func3,
)
|
[
"delguoqing@hotmail.com"
] |
delguoqing@hotmail.com
|
8d13198a10bafeba6b94dad3cf02953c983de332
|
67325192c1e528a39d457f11e61b480d68826708
|
/mods/mcpython/Item/gold_block.py
|
248d5e0998a17f7d438e81b093ded15dc48a62bd
|
[
"MIT"
] |
permissive
|
vashistaarav1611/mcpython-a-minecraft-clone-in-python
|
5851b377b54fd2b28c106112c7b18f397b71ab50
|
c16cd66f319efdeec4130e1a43f5a857caf1ea13
|
refs/heads/master
| 2023-02-01T22:48:51.787106
| 2020-12-21T15:02:25
| 2020-12-21T15:02:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 222
|
py
|
from .Item import *
class GoldBlock(Item):
def getName(self):
return "minecraft:gold_block"
def getTexturFile(self):
return "./assets/textures/items/gold_block.png"
handler.register(GoldBlock)
|
[
"baulukas1301@googlemail.com"
] |
baulukas1301@googlemail.com
|
c2a50a2894a8886745a3b0cf6176b87cdd9ff324
|
bd14c979335112b7718b0feda18ebf0e3b40fe5c
|
/contest_093/b_small_and_large_integers_2nd.py
|
5090fc480a7ed5adb7ee90d373f591aadebb6a25
|
[] |
no_license
|
ababa831/atcoder_beginners
|
22c57b15333d110126d1b1afadc0ff5e8784fc4f
|
1a30882ce7f20f312045d5dc7bfaa5688cc8a88e
|
refs/heads/master
| 2023-03-07T15:47:19.750682
| 2020-03-04T19:53:45
| 2020-03-04T19:53:45
| 143,360,607
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 245
|
py
|
# Accepted
a, b, k = map(int, input().split())
lower_list = [i for i in range(a, a + k)]
upper_list = [i for i in range(b, b - k, -1)]
out_list = sorted(set(lower_list + upper_list))
for out in out_list:
if a <= out <= b:
print(out)
|
[
"flvonlineconverter@gmail.com"
] |
flvonlineconverter@gmail.com
|
dbc0f0130cf61ccefa2cb7304519c144f1dc48bf
|
a3c34ad9425cf9c16a09423278b81c20edd8d77a
|
/sms_frame/models/sms_compose.py
|
3cbd3cbd6f0bcc7f98da112e58038923a071a292
|
[] |
no_license
|
meswapnilwagh/Odoo9
|
d1dca7de18ac555abe2da96fb78f0d3bd3835650
|
91f1e545ab597ca89283b8dc5dbf3d7f5bd5df5b
|
refs/heads/9.0
| 2020-04-08T00:24:25.179940
| 2016-02-22T08:49:56
| 2016-02-22T08:49:56
| 52,294,854
| 0
| 1
| null | 2016-02-22T18:20:41
| 2016-02-22T18:20:40
| null |
UTF-8
|
Python
| false
| false
| 3,144
|
py
|
# -*- coding: utf-8 -*
from datetime import datetime
from openerp import api, fields, models
class SmsCompose(models.Model):
_name = "sms.compose"
error_message = fields.Char(readonly=True)
record_id = fields.Integer()
model = fields.Char()
sms_template_id = fields.Many2one('sms.template', string="Template")
from_mobile_id = fields.Many2one('sms.number', required=True, string="From Mobile")
to_number = fields.Char(required=True, string='To Mobile Number', readonly=True)
sms_content = fields.Text(string='SMS Content')
@api.onchange('sms_template_id')
def _onchange_sms_template_id(self):
"""Prefills from mobile, sms_account and sms_content but allow them to manually change the content after"""
if self.sms_template_id.id != False:
sms_rendered_content = self.env['sms.template'].render_template(self.sms_template_id.template_body, self.sms_template_id.model_id.model, self.record_id)
self.from_mobile_id = self.sms_template_id.from_mobile_verified_id.id
self.sms_content = sms_rendered_content
@api.multi
def send_entity(self):
"""Attempt to send the sms, if any error comes back show it to the user and only log the smses that successfully sent"""
self.ensure_one()
gateway_model = self.from_mobile_id.account_id.account_gateway_id.gateway_model_name
my_sms = self.from_mobile_id.account_id.send_message(self.from_mobile_id.mobile_number, self.to_number, self.sms_content.encode('utf-8'), self.model, self.record_id)
#use the human readable error message if present
error_message = ""
if my_sms.human_read_error != "":
error_message = my_sms.human_read_error
else:
error_message = my_sms.response_string
#display the screen with an error code if the sms/mms was not successfully sent
if my_sms.delivary_state == "failed":
return {
'type':'ir.actions.act_window',
'res_model':'sms.compose',
'view_type':'form',
'view_mode':'form',
'target':'new',
'context':{'default_to_number':self.to_number,'default_record_id':self.record_id,'default_model':self.model, 'default_error_message':error_message}
}
else:
my_model = self.env['ir.model'].search([('model','=',self.model)])
#for single smses we only record succesful sms, failed ones reopen the form with the error message
sms_message = self.env['sms.message'].create({'record_id': self.record_id,'model_id':my_model[0].id,'account_id':self.from_mobile_id.account_id.id,'from_mobile':self.from_mobile_id.mobile_number,'to_mobile':self.to_number,'sms_content':self.sms_content,'status_string':my_sms.response_string, 'direction':'O','message_date':datetime.utcnow(), 'status_code':my_sms.delivary_state, 'sms_gateway_message_id':my_sms.message_id})
try:
self.env[self.model].search([('id','=', self.record_id)]).message_post(body=self.sms_content, subject="SMS Sent")
except:
#Message post only works if CRM module is installed
pass
|
[
"steven@sythiltech.com"
] |
steven@sythiltech.com
|
10c70540a9623f4e0994a218263f3b689583ef58
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_049/ch25_2019_03_11_12_40_04_650432.py
|
1ccce705dfcb58b27c4c448e847adbc6418c6bc3
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
distancia=int(input('Qual a distância do trajeto? '))
def preco(distancia):
if distancia > 200:
return 100+(distancia-200)*0.45
else:
return distancia*0.5
print ("{:.2f}".format(preco(distancia))
|
[
"you@example.com"
] |
you@example.com
|
737ec07de6c5ea89bf1610e81acecb3e9200babb
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5708284669460480_0/Python/zdan/B.py
|
e89eff79728bb389faaa4be1f8d9b26f813576ea
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,651
|
py
|
import sys
import itertools
import numpy as np
def occurrences(string, target):
if len(target) > 1 and target[0] == target[-1]:
count = start = 0
while True:
start = string.find(target, start) + 1
if start > 0:
count += 1
else:
return count
else:
return string.count(target)
def solve(K, L, S, keyboard, target):
target_set = set(target)
keyboard_set = set(keyboard)
#keyboard_prob = {key: keyboard.count(key)/float(len(keyboard)) for key in keyboard_set}
if S < L:
return 0.
if not target_set.issubset(keyboard_set):
return 0.
if len(keyboard_set) == 1:
return 0.
total_combinations = max_bananas = payout = 0
for combination in itertools.product(keyboard, repeat=S):
total_combinations += 1
bananas = occurrences(''.join(combination), target)
payout += bananas
if max_bananas < bananas:
max_bananas = bananas
return max_bananas - float(payout)/total_combinations
if __name__ == '__main__':
filename_in = sys.argv[1]
filename_out = filename_in.partition('.')[0] + '.out'
with open(filename_out, "w") as fout:
with open(filename_in, "r") as fin:
T = int(fin.readline())
for case in range(1, T+1):
K, L, S = [int(x) for x in fin.readline().split()]
keyboard = fin.readline().strip()
target = fin.readline().strip()
print >> fout, "Case #%i:" % case, solve(K, L, S, keyboard, target)
|
[
"eewestman@gmail.com"
] |
eewestman@gmail.com
|
098f68ce0de1a4e85ab1ea096ed45ccf2fff3eeb
|
4bed9030031fc99f6ea3d5267bd9e773f54320f8
|
/sparse/repos/Calysto/matlab_kernel/setup.py
|
313419fcbb79751dd03972ceb291c85638644417
|
[
"BSD-3-Clause"
] |
permissive
|
yuvipanda/mybinder.org-analytics
|
c5f4b939541d29727bc8d3c023b4d140de756f69
|
7b654e3e21dea790505c626d688aa15640ea5808
|
refs/heads/master
| 2021-06-13T05:49:12.447172
| 2018-12-22T21:48:12
| 2018-12-22T21:48:12
| 162,839,358
| 1
| 1
|
BSD-3-Clause
| 2021-06-10T21:05:50
| 2018-12-22T20:01:52
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,680
|
py
|
import glob
from setuptools import setup, find_packages
with open('matlab_kernel/__init__.py', 'rb') as fid:
for line in fid:
line = line.decode('utf-8')
if line.startswith('__version__'):
version = line.strip().split()[-1][1:-1]
break
DISTNAME = 'matlab_kernel'
PACKAGE_DATA = {
DISTNAME: ['*.m'] + glob.glob('%s/**/*.*' % DISTNAME)
}
DATA_FILES = [
('share/jupyter/kernels/matlab', [
'%s/kernel.json' % DISTNAME
] + glob.glob('%s/images/*.png' % DISTNAME)
)
]
if __name__ == "__main__":
setup(name="matlab_kernel",
author="Steven Silvester, Antony Lee",
version=version,
url="https://github.com/Calysto/matlab_kernel",
license="BSD",
long_description=open("README.rst").read(),
classifiers=["Framework :: IPython",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: System :: Shells"],
packages=find_packages(include=["matlab_kernel", "matlab_kernel.*"]),
package_data=PACKAGE_DATA,
include_package_data=True,
data_files=DATA_FILES,
requires=["metakernel (>0.20.8)", "jupyter_client (>=4.4.0)",
"ipython (>=4.0.0)"],
install_requires=["metakernel>=0.20.8", "jupyter_client >=4.4.0",
"ipython>=4.0.0",
"backports.tempfile;python_version<'3.0'",
'wurlitzer>=1.0.2;platform_system!="Windows"']
)
|
[
"yuvipanda@gmail.com"
] |
yuvipanda@gmail.com
|
7c7b6d5899ee3e4f388506f32f261fbed6508bac
|
3649308c5d709100c4dc90e661fc9f564f184877
|
/ocs/login/models.py
|
bc379435ce64eb699e183aa176c7f68a662e65a4
|
[] |
no_license
|
anirudhasj441/django
|
54171f6141d6938201146a6d3e9475477a3f0078
|
5bb202d13d4b17daca9aedf3b213908c3245757b
|
refs/heads/master
| 2021-07-09T06:18:11.597848
| 2021-03-07T17:58:32
| 2021-03-07T17:58:32
| 230,616,005
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,042
|
py
|
from django.db import models
from datetime import date
# Create your models here.
class Student(models.Model):
# s_id = models.AutoField(primary_key=True,default="1")
s_pnr = models.IntegerField(primary_key=True)
s_name = models.CharField(max_length=50)
s_dob = models.DateField(null=True,blank=True)
s_gender = models.CharField(max_length=50,default="")
s_passwd = models.CharField(max_length=300)
s_roll = models.IntegerField()
s_class = models.CharField(max_length=50)
s_contact = models.IntegerField()
s_email = models.EmailField()
def __str__(self):
return self.s_name
class Teacher(models.Model):
t_id = models.AutoField(primary_key=True)
tnr = models.IntegerField()
t_name = models.CharField(max_length=50)
t_dob = models.DateField(null=True,blank=True)
t_email = models.EmailField(default="")
t_cont = models.IntegerField(null=True)
t_passwd = models.CharField(max_length=300)
def __str__(self):
return self.t_name
|
[
"anirudhasj441@gmail.com"
] |
anirudhasj441@gmail.com
|
210bc7bd0293918d3ca37014a57b68ebe2823f96
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03408/s214379251.py
|
fba6c07029d057c1512feb87f8d481f483ef4cb4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 283
|
py
|
N = int(input())
ListP = []
for i in range (N):
ListP.append(input())
M = int(input())
ListN = []
for i in range (M):
ListN.append(input())
res = 0
mid = 0
for i in range(N):
mid += ListP.count(ListP[i])
mid += -ListN.count(ListP[i])
res = max(res,mid)
mid = 0
print(res)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
07f87234adb59300c6bb17578632811553a04257
|
8cf633e92a0671c8201268620a0372f250c8aeb2
|
/205.同构字符串.py
|
f76217c78e58ac420845c37b25d7da82a86ce71d
|
[
"Unlicense"
] |
permissive
|
SprintGhost/LeetCode
|
76da5c785009d474542e5f2cdac275675b8e60b8
|
cdf1a86c83f2daedf674a871c4161da7e8fad17c
|
refs/heads/develop
| 2021-06-06T04:04:28.883692
| 2021-01-01T14:09:26
| 2021-01-01T14:09:26
| 230,635,046
| 0
| 0
|
Unlicense
| 2020-12-11T14:55:36
| 2019-12-28T16:34:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,636
|
py
|
#
# @lc app=leetcode.cn id=205 lang=python3
#
# [205] 同构字符串
#
# Accepted
# 30/30 cases passed (48 ms)
# Your runtime beats 55.2 % of python3 submissions
# Your memory usage beats 16.3 % of python3 submissions (14.1 MB)
# @lc code=start
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
if (not s and t) or (not t and s):
return False
temp_s = dict()
list_s = list()
temp_t = dict()
list_t = list()
cs = 0
ct = 0
for index in range(0,len(s)):
if (s[index] in temp_s):
list_s.append(temp_s[s[index]])
else:
temp_s[s[index]] = cs
list_s.append(cs)
cs += 1
if (t[index] in temp_t):
list_t.append(temp_t[t[index]])
else:
temp_t[t[index]] = ct
list_t.append(ct)
ct += 1
if list_t[index] != list_s[index]:
return False
return True
# Accepted
# 30/30 cases passed (36 ms)
# Your runtime beats 93.12 % of python3 submissions
# Your memory usage beats 40.24 % of python3 submissions (13.7 MB)
class Solution:
def eigenValues(self, x):#
L, p, k = {}, 0, ''
for i in x:
if i not in L:
p += 1
k, L[i] = k+str(p), str(p)
else:
k += L[i]
return k
def isIsomorphic(self, s: str, t: str) -> bool:
return self.eigenValues(s) == self.eigenValues(t)
# A = Solution()
# print (A.isIsomorphic("aba", "baa"))
# @lc code=end
|
[
"864047435@qq.com"
] |
864047435@qq.com
|
554b02c0fd1b8bac352fe742a597f5be3d13b43d
|
8222dcbb226682a9112720927361877a92185407
|
/fluent_contents/plugins/sharedcontent/managers.py
|
7bd0a8f6915af76928eb41ced0dc3898c6d93cf6
|
[
"Apache-2.0"
] |
permissive
|
acolorbright/django-fluent-contents
|
ada4a5fedb590e5f679463221fce2f965730bac1
|
4e5c6e99134ceee804bb42391ec37e5e17ff5a7e
|
refs/heads/master
| 2023-04-12T05:31:19.179528
| 2018-05-14T11:10:16
| 2018-05-14T11:10:16
| 108,149,326
| 0
| 0
|
Apache-2.0
| 2023-04-04T00:22:27
| 2017-10-24T15:48:46
|
Python
|
UTF-8
|
Python
| false
| false
| 1,888
|
py
|
from django.conf import settings
from django.db.models import Q, Manager
from parler.managers import TranslatableQuerySet
from fluent_contents import appsettings
from fluent_contents.plugins.sharedcontent import appsettings as sharedcontent_appsettings
class SharedContentQuerySet(TranslatableQuerySet):
"""
The QuerySet for SharedContent models.
"""
def __init__(self, *args, **kwargs):
super(SharedContentQuerySet, self).__init__(*args, **kwargs)
self._parent_site = None
def _clone(self, klass=None, setup=False, **kw):
c = super(SharedContentQuerySet, self)._clone(klass, setup, **kw)
c._parent_site = self._parent_site
return c
def parent_site(self, site):
"""
Filter to the given site, only give content relevant for that site.
"""
# Avoid auto filter if site is already set.
self._parent_site = site
if sharedcontent_appsettings.FLUENT_SHARED_CONTENT_ENABLE_CROSS_SITE:
# Allow content to be shared between all sites:
return self.filter(Q(parent_site=site) | Q(is_cross_site=True))
else:
return self.filter(parent_site=site)
def _single_site(self):
"""
Make sure the queryset is filtered on a parent site, if that didn't happen already.
"""
if appsettings.FLUENT_CONTENTS_FILTER_SITE_ID and self._parent_site is None:
return self.parent_site(settings.SITE_ID)
else:
return self
def get_for_slug(self, slug):
"""
.. versionadded:: 1.0 Return the content for the given slug.
"""
return self._single_site().get(slug=slug)
class SharedContentManager(Manager.from_queryset(SharedContentQuerySet)):
"""
Extra methods attached to ``SharedContent.objects``, see :class:`SharedContentQuerySet`.
"""
pass
|
[
"vdboor@edoburu.nl"
] |
vdboor@edoburu.nl
|
7e97dec12b5a269ee009a038ff2b1bb48711aff7
|
5577a04c006e73b8a40f68055b2173ffe34ce83e
|
/htsint/database/fetchTimeExperiment.py
|
52b01c5ccf358b0f3acfe468ea3b6ae2dc535dfc
|
[
"BSD-3-Clause",
"LicenseRef-scancode-public-domain",
"MIT"
] |
permissive
|
changanla/htsint
|
1617c56bd5f02ab01e0de80d3d06d2d75983a376
|
a343aff9b833979b4f5d4ba6d16fc2b65d8ccfc1
|
refs/heads/master
| 2020-03-16T13:10:15.082839
| 2017-05-24T21:27:27
| 2017-05-24T21:27:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,266
|
py
|
#!/usr/bin/python
import sys,time
from sqlalchemy.sql import select
from htsint.database import db_connect,fetch_annotations,fetch_taxa_annotations
from htsint.database import Taxon,taxa_mapper,Gene,gene_mapper
session,engine = db_connect()
conn = engine.connect()
#timeStart = time.time()
#annotations = fetch_annotations(['31251'],engine,idType='ncbi',useIea=False,aspect='biological_process')
#print("end: %s"%time.strftime('%H:%M:%S',time.gmtime(time.time()-timeStart)))
#print annotations
##7091(small), 7227(large)
timeStart = time.time()
annotations,goTerms = fetch_taxa_annotations(['7227'],engine,idType='ncbi',useIea=False,aspect='biological_process')
print("end: %s"%time.strftime('%H:%M:%S',time.gmtime(time.time()-timeStart)))
#print annotations
sys.exit()
###########
widget = Gene#Taxon
print("scanning %s"%widget.__tablename__)
timeStart = time.time()
myDict = {}
s = select([widget.id,widget.ncbi_id])
_result = conn.execute(s)
result = [row for row in _result]
print("core: %s"%time.strftime('%H:%M:%S',time.gmtime(time.time()-timeStart)))
sys.exit()
timeStart = time.time()
for t in session.query(widget).yield_per(5):
myDict[t.ncbi_id] = t.id
print("yield per: %s"%time.strftime('%H:%M:%S',time.gmtime(time.time()-timeStart)))
|
[
"adamricha@gmail.com"
] |
adamricha@gmail.com
|
58b7f2c696ee6df680f34658e112ba3ceb045e99
|
4503c155a0252eea7f4c80ec499999a8b52bc8b6
|
/nntool/model/sequential.py
|
8a9984b8e4fd3d8c6aec1d4526eff9e3e02fa3b0
|
[
"MIT"
] |
permissive
|
NLP-Deeplearning-Club/nntool
|
e76f7be29dd184be18a6fde509b89918a8692639
|
1bbf0a20c7526d423f351ba9a854902a669d3713
|
refs/heads/master
| 2020-12-03T01:42:44.316321
| 2017-07-12T16:08:30
| 2017-07-12T16:08:30
| 95,854,457
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,719
|
py
|
from nntool.abc.modelabc import ModelABC
import numpy as np
class Sequential(ModelABC):
"""序列模型,这个是keras中的概念,将模型理解为层的堆叠
"""
_layers = []
_trained = False
def add(self,layer:'layer'):
self._layers.append(layer)
@property
def trained(self):
"""是否已经训练过"""
return self._trained
def train(self,trainner):
"""训练模型"""
trainner(self)
self._trained = True
def fit(self,dev_x,dev_y):
"""测试在dev数据集上的效果"""
total = 0
correct = 0
for i in range(len(dev_y)):
total += 1
if self.predict(dev_x[i]).argmax() == dev_y[i].argmax():
correct += 1
correct_rate = correct/total
print('total:{total},corrcet:{correct},Correct rate:{correct_rate}'.format(
total=total,correct=correct,correct_rate=correct_rate))
def _forward(self,x,i=0):
"""前向运算"""
#print("forward {i} layer".format(i=i))
if i == len(self._layers):
#print('result:{re}'.format(re=x))
return x
else:
y = self._layers[i].forward(x)
i += 1
#print('result:{re}'.format(re=y))
return self._forward(y,i)
def predict_probability(self,x_test):
result = self._forward(x_test)
return result
def predict(self,x_test):
"""预测数据"""
probabilitys = self.predict_probability(x_test)
maxindex = probabilitys.argmax()
result = np.array([True if i == maxindex else False for i in range(
len(probabilitys))])
return result
|
[
"hsz1273327@gmail.com"
] |
hsz1273327@gmail.com
|
96c271f4ba502360e86ae8b36745e783d53d418e
|
d3f30c67faf0b593565fc5fa526d6b96a8a9f65f
|
/tests/test_dates.py
|
9c3a7b40745a472ca8520756a080d082d887c101
|
[
"BSD-3-Clause"
] |
permissive
|
has2k1/mizani
|
4b3732b13380c6f2660f313877d95f63095781f3
|
90b0a54dd3a76528fae7997083d2ab8d31f82a58
|
refs/heads/main
| 2023-09-02T00:47:17.321472
| 2023-09-01T09:44:57
| 2023-09-01T13:45:36
| 62,319,878
| 41
| 15
|
BSD-3-Clause
| 2022-04-04T04:26:51
| 2016-06-30T15:02:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,210
|
py
|
from datetime import datetime
from zoneinfo import ZoneInfo
import pytest
from mizani._core.date_utils import (
align_limits,
ceil_mid_year,
ceil_second,
ceil_week,
floor_mid_year,
floor_second,
floor_week,
)
from mizani._core.dates import (
datetime_to_num,
get_tzinfo,
num_to_datetime,
)
def test_tzinfo():
tz = ZoneInfo("Africa/Kampala")
assert get_tzinfo("Africa/Kampala") == tz
assert get_tzinfo(tz) is tz
with pytest.raises(TypeError):
assert get_tzinfo(10) # type: ignore
def test_floor_mid_year():
d1 = datetime(2022, 3, 1)
d2 = datetime(2022, 11, 9)
assert floor_mid_year(d1) == datetime(2022, 1, 1)
assert floor_mid_year(d2) == datetime(2022, 7, 1)
def test_ceil_mid_year():
d1 = datetime(2022, 1, 1)
d2 = datetime(2022, 1, 2)
d3 = datetime(2022, 8, 2)
assert ceil_mid_year(d1) == datetime(2022, 1, 1)
assert ceil_mid_year(d2) == datetime(2022, 7, 1)
assert ceil_mid_year(d3) == datetime(2023, 1, 1)
def test_floor_week():
d1 = datetime(2000, 1, 11)
d2 = datetime(2000, 8, 21)
assert floor_week(d1) == datetime(2000, 1, 8)
assert floor_week(d2) == datetime(2000, 8, 15)
def test_ceil_week():
d1 = datetime(2000, 1, 15)
d2 = datetime(2000, 8, 20)
assert ceil_week(d1) == datetime(2000, 1, 15)
assert ceil_week(d2) == datetime(2000, 8, 22)
def test_floor_second():
d1 = datetime(2000, 1, 1, 10, 10, 24, 1000)
assert floor_second(d1) == datetime(2000, 1, 1, 10, 10, 24)
def test_ceil_second():
d1 = datetime(2000, 1, 1, 10, 10, 24, 1000)
assert ceil_second(d1) == datetime(2000, 1, 1, 10, 10, 25)
def test_num_to_datetime():
limits = num_to_datetime((25552, 27743))
assert limits[0] == datetime(2039, 12, 17, tzinfo=ZoneInfo("UTC"))
assert limits[1] == datetime(2045, 12, 16, tzinfo=ZoneInfo("UTC"))
d = num_to_datetime((27742 + 1.9999999999,))[0]
assert d.microsecond == 0
def test_datetime_to_num():
x = []
res = datetime_to_num([])
assert len(res) == 0
# Just for test coverage
# TODO: Find a better test
def test_align_limits():
limits = (2009, 2010)
align_limits(limits, 1 + 1e-14)
|
[
"has2k1@gmail.com"
] |
has2k1@gmail.com
|
4af1a97e3d67f049f346cc7b4760ac232eb1d942
|
c62040636877dc3584bcf4d22988fc71739c8a78
|
/lbworkflow/tests/test_process.py
|
828d4ebd11d173132620237557b9f9d4b02ff56d
|
[
"MIT"
] |
permissive
|
felixcheruiyot/django-lb-workflow
|
82de680f37aa68707640022cb3b99435f54ea09e
|
0fb4be2d39848374d60ec27c6ee1b72913e2f674
|
refs/heads/master
| 2022-04-12T19:11:41.673818
| 2020-04-09T12:03:53
| 2020-04-09T12:03:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,312
|
py
|
from django.contrib.auth import get_user_model
from django.urls import reverse
from lbworkflow.views.helper import user_wf_info_as_dict
from .leave.models import Leave
from .test_base import BaseTests
User = get_user_model()
class HelperTests(BaseTests):
def test_user_wf_info_as_dict(self):
leave = self.leave
leave.submit_process()
info = user_wf_info_as_dict(leave, self.users['tom'])
self.assertIsNotNone(info['task'])
self.assertIsNotNone(info['object'])
self.assertFalse(info['can_give_up'])
self.assertEqual(info['wf_code'], 'leave')
info = user_wf_info_as_dict(leave, self.users['owner'])
self.assertIsNone(info['task'])
self.assertTrue(info['can_give_up'])
info = user_wf_info_as_dict(leave, self.users['vicalloy'])
self.assertIsNone(info['task'])
class ViewTests(BaseTests):
def setUp(self):
super().setUp()
self.client.login(username='owner', password='password')
def test_start_wf(self):
resp = self.client.get(reverse('wf_start_wf'))
self.assertEqual(resp.status_code, 200)
def test_wf_list(self):
resp = self.client.get(reverse('wf_list', args=('leave', )))
self.assertEqual(resp.status_code, 200)
def test_wf_report_list(self):
resp = self.client.get(reverse('wf_report_list'))
self.assertEqual(resp.status_code, 200)
def test_wf_list_export(self):
resp = self.client.get(reverse('wf_list', args=('leave', )), {'export': 1})
self.assertEqual(resp.status_code, 200)
def test_detail(self):
resp = self.client.get(reverse('wf_detail', args=('1', )))
self.assertEqual(resp.status_code, 200)
def test_submit(self):
self.client.login(username='owner', password='password')
url = reverse('wf_new', args=('leave', ))
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
data = {
'start_on': '2017-04-19 09:01',
'end_on': '2017-04-20 09:01',
'leave_days': '1',
'reason': 'test save',
}
resp = self.client.post(url, data)
leave = Leave.objects.get(reason='test save')
self.assertRedirects(resp, '/wf/%s/' % leave.pinstance.pk)
self.assertEqual('Draft', leave.pinstance.cur_node.name)
data['act_submit'] = 'Submit'
data['reason'] = 'test submit'
resp = self.client.post(url, data)
leave = Leave.objects.get(reason='test submit')
self.assertRedirects(resp, '/wf/%s/' % leave.pinstance.pk)
self.assertEqual('A2', leave.pinstance.cur_node.name)
def test_edit(self):
self.client.login(username='owner', password='password')
data = {
'start_on': '2017-04-19 09:01',
'end_on': '2017-04-20 09:01',
'leave_days': '1',
'reason': 'test save',
}
url = reverse('wf_new', args=('leave', ))
resp = self.client.post(url, data)
leave = Leave.objects.get(reason='test save')
self.assertRedirects(resp, '/wf/%s/' % leave.pinstance.pk)
self.assertEqual('Draft', leave.pinstance.cur_node.name)
url = reverse('wf_edit', args=(leave.pinstance.pk, ))
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
data['act_submit'] = 'Submit'
data['reason'] = 'test submit'
resp = self.client.post(url, data)
leave = Leave.objects.get(reason='test submit')
self.assertRedirects(resp, '/wf/%s/' % leave.pinstance.pk)
self.assertEqual('A2', leave.pinstance.cur_node.name)
def test_delete(self):
self.client.login(username='admin', password='password')
# POST
url = reverse('wf_delete')
leave = self.create_leave('to delete')
data = {'pk': leave.pinstance.pk}
resp = self.client.post(url, data)
self.assertRedirects(resp, '/wf/list/')
self.assertIsNone(self.get_leave('to delete'))
# GET
leave = self.create_leave('to delete')
data = {'pk': leave.pinstance.pk}
resp = self.client.get(url, data)
self.assertRedirects(resp, '/wf/list/')
self.assertIsNone(self.get_leave('to delete'))
|
[
"zbirder@gmail.com"
] |
zbirder@gmail.com
|
48b56952ac3dc1fd3a8bd513d93bad85874010cd
|
3927b135bd77100532e3dc82c405a2d377fc8517
|
/vndk/tools/definition-tool/tests/test_vndk.py
|
8938e68aa18145dd971748268f9c1f6e06f6e889
|
[
"Apache-2.0"
] |
permissive
|
eggfly/platform_development
|
b9367c9ecd775c766dd552bf0b417c29bc4cc1cc
|
52c291d53c8f58cfe67cd3251db19b0d94b4a9c8
|
refs/heads/master
| 2020-05-20T22:54:41.470361
| 2017-03-10T02:06:38
| 2017-03-10T02:06:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,623
|
py
|
#!/usr/bin/env python3
from __future__ import print_function
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import unittest
from compat import StringIO
from vndk_definition_tool import ELF, ELFLinker, PT_SYSTEM, PT_VENDOR
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
TESTDATA_DIR = os.path.join(SCRIPT_DIR ,'testdata', 'test_vndk')
class ELFLinkerVNDKTest(unittest.TestCase):
def _get_paths_from_nodes(self, nodes):
return sorted([node.path for node in nodes])
def test_compute_vndk(self):
class MockBannedLibs(object):
def is_banned(self, name):
return False
input_dir = os.path.join(TESTDATA_DIR, 'pre_treble')
graph = ELFLinker.create_from_dump(
system_dirs=[os.path.join(input_dir, 'system')],
vendor_dirs=[os.path.join(input_dir, 'vendor')])
vndk = graph.compute_vndk(sp_hals=set(), vndk_stable=set(),
vndk_customized_for_system=set(),
vndk_customized_for_vendor=set(),
generic_refs=None,
banned_libs=MockBannedLibs())
self.assertEqual(['/system/lib/libcutils.so',
'/system/lib64/libcutils.so'],
self._get_paths_from_nodes(vndk.vndk_core))
self.assertEqual([], self._get_paths_from_nodes(vndk.vndk_fwk_ext))
self.assertEqual([], self._get_paths_from_nodes(vndk.vndk_vnd_ext))
if __name__ == '__main__':
unittest.main()
|
[
"loganchien@google.com"
] |
loganchien@google.com
|
5a85f68337da49fec9d664ec55a0ccab7bb51369
|
fdcb2cdee4d5b398eed4eefc830213234e3e83a5
|
/00_DataCamp/07_Functions/error_handling/more_error_handling.py
|
5d65abc9f7d11cc8d26ffaba9af4fec231b1c483
|
[] |
no_license
|
daftstar/learn_python
|
be1bbfd8d7ea6b9be8407a30ca47baa7075c0d4b
|
4e8727154a24c7a1d05361a559a997c8d076480d
|
refs/heads/master
| 2021-01-20T08:53:29.817701
| 2018-01-15T22:21:02
| 2018-01-15T22:21:02
| 90,194,214
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,459
|
py
|
# #####################################################
# ERROR HANDLING W/ TRY EXCEPT
# #####################################################
def shout_echo(word1, echo=1):
""" Concat echo copies of word1 and three exclamation
marks at end of sting """
# Initialize empty strings: echo_word, shout_words
echo_word = ""
shout_words = ""
# Add exception handling with try-except
try:
# Concat echo copies of word1
echo_word = word1 * echo
# Concat '!!!' to echo_word:
shout_words = echo_word + "!!!"
except:
print ("word1 must be a string and echo must be an integer")
return (shout_words)
print (shout_echo("particle", "ddj"))
# word1 must be a string and echo must be an integer
# #####################################################
# ERROR HANDLING BY RAISING AN ERROR
# #####################################################
def shout_echo(word1, echo=1):
"""Concatenate echo copies of word1 and three
exclamation marks at the end of the string."""
# Raise an error with raise
if echo < 0:
raise ValueError('echo must be greater than 0')
# Concatenate echo copies of word1 using *: echo_word
echo_word = word1 * echo
# Concatenate '!!!' to echo_word: shout_word
shout_word = echo_word + '!!!'
# Return shout_word
return shout_word
# Call shout_echo
shout_echo("particle", echo=2) # change echo to negative value
|
[
"nikdaftary@gmail.com"
] |
nikdaftary@gmail.com
|
e1682205360b4928220bbc12cb3953be8221e9f8
|
14252ea933a08056363230c6df89223b996a0da2
|
/app/enquiry/admin.py
|
71f3c238e9b152b6658810ef408539597e9ec865
|
[
"MIT"
] |
permissive
|
S3Infosoft/mvr-insights
|
eeb02aa2e6767e6a23818d4e09f7be7ce29f80cb
|
ac73feff03c1592d5efd8e0b82f72dd4dbd3e921
|
refs/heads/master
| 2020-05-29T14:08:11.070784
| 2020-04-23T19:46:57
| 2020-04-23T19:46:57
| 189,184,619
| 0
| 1
|
MIT
| 2020-04-23T19:46:58
| 2019-05-29T08:35:56
|
CSS
|
UTF-8
|
Python
| false
| false
| 918
|
py
|
from . import models
from django.contrib import admin
@admin.register(models.OTA)
class OTAAdmin(admin.ModelAdmin):
list_display = "name", "registration", "contact_person", "contact_number",\
"contact_email",
search_fields = "name", "contact_person",
@admin.register(models.Partner)
class PartnerAdmin(admin.ModelAdmin):
list_display = "name", "partner_type", "created", "contact_person", \
"contact_number", "contact_email",
search_fields = "name", "contact_person",
@admin.register(models.Review)
class ReviewAdmin(admin.ModelAdmin):
list_display = "headline_slim", "source_slim", "rating", "created",
list_filter = "rating",
search_fields = "headline",
list_editable = "rating",
@staticmethod
def headline_slim(inst):
return inst.headline[:70]
@staticmethod
def source_slim(inst):
return inst.source[:70]
|
[
"abhie.lp@gmail.com"
] |
abhie.lp@gmail.com
|
2037b65f41e66d5efd97fb4037f35830d3fbc814
|
b1c578ce83d94848a1c2ec0bcb91ae791ef419cd
|
/src/ggrc/migrations/versions/20180319122658_679480cbd712_add_risk_propagation_roles.py
|
ed07384e4deaf9cf32f022902852e518f9698b63
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
zdqf/ggrc-core
|
0d1575557af3c49980fe6dbad586d045ad73d5ad
|
29dea12d189bc6be21006369efc0aae617bbab6f
|
refs/heads/master
| 2020-03-27T19:29:00.536374
| 2018-08-28T15:29:56
| 2018-08-28T15:29:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
# Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""
Add risk propagation roles
Create Date: 2018-03-19 12:26:58.016090
"""
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
from ggrc.migrations.utils import acr_propagation
from ggrc.migrations.utils import acr_propagation_constants as const
# revision identifiers, used by Alembic.
revision = '679480cbd712'
down_revision = '3e667570f21f'
def upgrade():
"""Upgrade database schema and/or data, creating a new revision."""
acr_propagation.propagate_roles(const.GGRC_RISKS_PROPAGATION)
def downgrade():
"""Remove Risk propagated roles"""
for object_type, roles_tree in const.GGRC_RISKS_PROPAGATION.items():
acr_propagation.remove_propagated_roles(object_type, roles_tree.keys())
|
[
"zidarsk8@gmail.com"
] |
zidarsk8@gmail.com
|
48c0fa3c02b94ef7d4860dcf8193efc152f59b9e
|
f28ef7c72a56a2a732bee3e42506c96bb69edee8
|
/old_scripts/stocks_data.py
|
f9a03cddc88b57d6fb4ab645a0f58a8230321f1b
|
[] |
no_license
|
webclinic017/backtrader_stocks_api
|
cb92311a1069199e61acc547ec69941ba861d4e6
|
e489724e7a30bb915657244bf12e55ad2f484832
|
refs/heads/main
| 2023-03-26T05:40:53.584824
| 2021-03-10T07:53:35
| 2021-03-10T07:53:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,442
|
py
|
from fastquant import get_stock_data, backtest
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from datetime import date, timedelta
#array: [open, high, low, close, volume]
class ticker_data():
def __init__(self, ticker, date_range='null'):
self.name = ticker.upper()
# format date_range : ["2018-01-01", "2019-01-01"]
self.date_range = date_range
#self.period = period
self.data_np, self.data_pd = self.get_ticker_data()
self.highs, self.lows, self.open, self.close, self.volume = self.get_constants_from_data()
self.dates = self.get_dates()
self.med_price = self.get_med_price()
self.sma5 = self.get_slow_moving_average(p=5)
#self.sma5 = self.get_awesome_ossilator(p=5)
self.sma34 = self.get_slow_moving_average(p=34)
self.AO = self.get_awesome_oss()
self.jaw = self.calculate_alligator(13,8)#522
self.teeth = self.calculate_alligator(8,5)#519 perfect
self.lips = self.calculate_alligator(5,3)#517
def calculate_alligator(self,N, start):
#### broke but on the right track
# if start 8, shift array 8 left and last 8=0 and start iter
#med price has 1525, shift 8 group 13
#start at 13+8=23 to grab all
arr = []
length = len(self.med_price)
med = self.med_price
begin = N
#smma = sum(self.med_price[length - N:]) / N
#arr.append(smma)
for i in range(begin, length):
if i == begin:
smma = sum(med[i-N:i]) / N
arr.append(smma)
if i != begin:
prev_sum = arr[-1] * N
sma = sum(med[i - N:i]) / N
smma = ( prev_sum - arr[-1] + sma) / N
arr.append(smma)
# they all have diff sma periods, 13,8, 5 being smallest and limit, prepend N zeroes
print('pre',len(arr))
diff = N - start
for b in range(diff):
arr.insert(0,0)
for f in range(start):
arr.append(0)
return arr
def get_awesome_oss(self):
print(len(self.med_price))
#len med prices = 1525
ao = []
length = len(self.sma34)
for i in reversed(range(length)):
sma_diff = self.sma5[i] - self.sma34[i]
ao.append(sma_diff)
return ao[::-1]
def get_slow_moving_average(self, p):
sma_arrs = []
#reverse to capture newest date back 1525-0; 1525-30
length = len(self.med_price)
for i in reversed(range(p, length)):
period_arr = self.med_price[i-p:i]
sma = sum(period_arr)/p
sma_arrs.append(sma)
missing = length
while len(sma_arrs) < missing:
sma_arrs.append(0)
return sma_arrs[::-1]
'''for i in reversed(range(self.period)):#reverse range of 90
sma_arr = []
#start 90, so need 89,88,
for b in range(i, self.period - p ):
sma_arr.append(self.med_price[b])
if len(sma_arr) == p:
sma = sum(sma_arr) / p
arr.append(sma)
sma_arr = []
print('sma',sma)
return arr'''
def get_med_price(self):
med_prices = []
for i in range(len(self.lows)):
med = (self.highs[i] + self.lows[i]) /2
print('med_price', med)
med_prices.append(med)
return med_prices
def get_ticker_data(self):
if(self.name):
today = date.today()
yesterday = today - timedelta(days = 1)
try:
pd_data = get_stock_data(self.name, "2017-01-01", yesterday)
np_data = pd_data.values
except Exception as e:
print('get stock data error, query misformed line 20')
print(e)
return np_data, pd_data
def get_constants_from_data(self):
opens = []
close = []
high = []
low = []
volume = []
data = self.data_np
for i in range(len(data)):
opens.append(data[i][0])
high.append(data[i][1])
low.append(data[i][2])
close.append(data[i][3])
volume.append(data[i][4])
return high, low, opens, close, volume
def get_dates(self):
data = self.data_pd
dates = []
for i in range(len(data.index)):
dates.append(data.iloc[i].name)
return dates
if __name__ == '__main__':
ticker = ticker_data('tsla')
'''plt.bar(range(90), ticker.AO)
plt.plot(range(90), ticker.sma5)
plt.plot(range(90), ticker.sma34)
plt.plot(range(90), ticker.med_price[len(ticker.med_price)-90:] )
plt.show()
plt.plot(range(90), ticker.close[len(ticker.close)-90:] )
plt.plot(range(90), ticker.open[len(ticker.open)-90:] )
plt.plot(range(90), ticker.highs[len(ticker.highs)-90:] )
plt.plot(range(90), ticker.lows[len(ticker.lows)-90:] )
plt.show()
plt.plot(range(90), ticker.volume[len(ticker.volume)-90:] )
plt.show()'''
print('len', len(ticker.med_price))
plt.plot(ticker.sma34)
plt.plot(ticker.sma5)
plt.bar(range(len(ticker.AO)),ticker.AO)
plt.show()
plt.plot(ticker.jaw)
plt.plot(ticker.teeth)
plt.plot(ticker.lips)
plt.show()
|
[
"noreply@github.com"
] |
webclinic017.noreply@github.com
|
06ea7004e6548c99ae12598d02b6772fe46d7dec
|
417ab6024a95e97b4d2236c67e28d00e6d1defc0
|
/python/fetch/s58589/video.py
|
ed22519cf6a9c389fddd2e76eb4a290ff89c4b7b
|
[] |
no_license
|
zeus911/myconf
|
11139069948f7c46f760ca0a8f1bd84df5ec4275
|
6dc7a6761ab820d6e97a33a55a8963f7835dbf34
|
refs/heads/master
| 2020-04-18T02:16:09.560219
| 2019-01-22T18:15:08
| 2019-01-22T18:15:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,392
|
py
|
#!/usr/bin python
# -*- coding: utf-8 -*-
from baseparse import *
from urlparse import urlparse
from common import common
from urllib import unquote
import time
from fetch.profile import *
class VideoParse(BaseParse):
def __init__(self):
pass
def run(self):
dbVPN = db.DbVPN()
ops = db_ops.DbOps(dbVPN)
chs = self.videoChannel()
for item in chs:
ops.inertVideoChannel(item)
print 's58589 video -- channel ok;,len=',len(chs)
dbVPN.commit()
dbVPN.close()
for item in chs:
for i in range(1, maxVideoPage):
url = item['url']
if i!=1:
url= "%s%s%s"%(item['url'].replace(".html","-pg-"),i,".html")
print url
self.videoParse(item['channel'], url)
print '解析完成 ', item['channel'], ' ---', i, '页'
time.sleep(1)
def videoChannel(self):
ahrefs = self.header()
channelList = []
for ahref in ahrefs:
obj={}
obj['name']=ahref.text
obj['url']=ahref.get('href')
obj['baseurl']=baseurl
obj['updateTime']=datetime.datetime.now()
obj['pic']=''
obj['rate']=1.2
obj['channel']=baseurl.replace("http://", "").replace("https://", "")+ahref.text
obj['showType']=3
obj['channelType']='webview'
channelList.append(obj)
return channelList
def videoParse(self, channel, url):
dataList = []
soup = self.fetchUrl(url)
metas = soup.findAll("li", {"class": "yun yun-large border-gray"})
for meta in metas:
obj = {}
ahref = meta.first("a")
mp4Url = self.parseDomVideo(ahref.get("href"))
if mp4Url == None:
print '没有mp4 文件:', ahref.get("href")
continue
obj['url'] = mp4Url
obj['pic'] = meta.first('img').get("data-original")
obj['name'] = ahref.get("title").replace(",快播,大香蕉","").replace("_chunk_1,快播云资源","").replace("成人影院","")
videourl = urlparse(obj['url'])
obj['path'] = videourl.path
obj['updateTime'] = datetime.datetime.now()
obj['channel'] = channel
if mp4Url.count("m3u8")==0 and mp4Url.count("mp4")==0:
obj['videoType'] = "webview"
else:
obj['videoType'] = "normal"
obj['baseurl'] = baseurl
print obj['name'],obj['videoType'],obj['url'],obj['pic']
dataList.append(obj)
dbVPN = db.DbVPN()
ops = db_ops.DbOps(dbVPN)
for obj in dataList:
ops.inertVideo(obj,obj['videoType'],baseurl)
print 's58589 video --解析完毕 ; channel =', channel, '; len=', len(dataList), url
dbVPN.commit()
dbVPN.close()
def parseDomVideo(self, url):
try:
soup = self.fetchUrl(url, header)
div = soup.first("div",{'class':'playlist jsplist clearfix'})
if div!=None:
ahref = div.first('a')
if ahref!=None:
soup = self.fetchUrl(ahref.get('href'), header)
play_video = soup.first('div',{'class':'video-info fn-left'})
if play_video!=None:
script = play_video.first('script')
if script!=None:
text = unquote(script.text.replace("\"","").replace("\/","/"))
texts = text.split(",")
for item in texts:
match = regVideo.search(item)
if match!=None:
videoUrl =match.group(1)
return "%s%s%s"%("http",videoUrl,'m3u8')
match = regVideo2.search(item)
if match!=None:
videoUrl =match.group(1)
return videoUrl
print '没找到mp4'
return None
except Exception as e:
print common.format_exception(e)
return None
def videoParse(queue):
queue.put(VideoParse())
|
[
"liguoqing19861028@163.com"
] |
liguoqing19861028@163.com
|
d93af998f22f0599ae05964e40bf4946e07934db
|
dd4d1a61ec680a86d4b569490bf2a898ea0d7557
|
/appengine/findit/model/test/wf_swarming_task_test.py
|
f6921dfc809d6a8bcf5b6cc3326292e6c1424897
|
[
"BSD-3-Clause"
] |
permissive
|
mcgreevy/chromium-infra
|
f1a68914b47bcbe3cd8a424f43741dd74fedddf4
|
09064105713603f7bf75c772e8354800a1bfa256
|
refs/heads/master
| 2022-10-29T23:21:46.894543
| 2017-05-16T06:22:50
| 2017-05-16T06:22:50
| 91,423,078
| 1
| 1
|
BSD-3-Clause
| 2022-10-01T18:48:03
| 2017-05-16T06:23:34
|
Python
|
UTF-8
|
Python
| false
| false
| 1,690
|
py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from model.wf_swarming_task import WfSwarmingTask
class WfSwarmingTaskTest(unittest.TestCase):
def testClassifiedTests(self):
task = WfSwarmingTask.Create('m', 'b', 121, 'browser_tests')
task.tests_statuses = {
'TestSuite1.test1': {
'total_run': 2,
'SUCCESS': 2
},
'TestSuite1.test2': {
'total_run': 4,
'SUCCESS': 2,
'FAILURE': 2
},
'TestSuite1.test3': {
'total_run': 6,
'FAILURE': 6
},
'TestSuite1.test4': {
'total_run': 6,
'SKIPPED': 6
},
'TestSuite1.test5': {
'total_run': 6,
'UNKNOWN': 6
}
}
expected_classified_tests = {
'flaky_tests': ['TestSuite1.test2', 'TestSuite1.test1'],
'reliable_tests': ['TestSuite1.test3', 'TestSuite1.test4'],
'unknown_tests': ['TestSuite1.test5']
}
self.assertEqual(expected_classified_tests, task.classified_tests)
self.assertEqual(expected_classified_tests['reliable_tests'],
task.reliable_tests)
self.assertEqual(expected_classified_tests['flaky_tests'],
task.flaky_tests)
def testStepName(self):
master_name = 'm'
builder_name = 'b'
build_number = 123
expected_step_name = 's'
task = WfSwarmingTask.Create(
master_name, builder_name, build_number, expected_step_name)
self.assertEqual(expected_step_name, task.step_name)
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
46704702b85011345fc39dacbe1433db96bfee18
|
34932f68b9878081748d96f267bd7a8359c24ffc
|
/code/derivatives.py
|
4acdd9ae7c4a81771d706b2786c1eb10623caf02
|
[] |
no_license
|
rossfadely/wfc3psf
|
388160cd692d77e4db24668a924f12004099d572
|
b0ac9fd1ed993f250cd1923d6a4ca16dd7f42a70
|
refs/heads/master
| 2020-06-04T08:54:15.044796
| 2014-12-15T20:40:38
| 2014-12-15T20:40:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,859
|
py
|
import multiprocessing
import numpy as np
from patch_fitting import eval_nll, make_background, evaluate
from generation import render_psfs
def get_derivatives(data, dq, shifts, psf_model, old_nlls, fit_parms, masks,
parms):
"""
Calculate the derivatives of the objective (in patch_fitting)
with respect to the psf model.
"""
# derivative of regularization term
old_reg, reg_term = reg(psf_model, parms)
# calculate derivative of nll term
pool = multiprocessing.Pool(parms.Nthreads)
mapfn = pool.map
steps = psf_model.copy() * parms.h
argslist = [None] * parms.Ndata
for i in range(parms.Ndata):
argslist[i] = (data[i], shifts[None, i], psf_model, old_nlls[i],
fit_parms[i], masks[i], steps, parms)
results = list(mapfn(one_datum_nll_diff, [args for args in argslist]))
Neff = 0
derivatives = np.zeros_like(psf_model)
for i in range(parms.Ndata):
derivatives += results[i]
if np.any(results[i][0] != 0.0):
Neff += 1
if Neff == 0:
derivatives = np.zeros_like(psf_model)
else:
derivatives /= Neff
derivatives += reg_term
# tidy up
pool.close()
pool.terminate()
pool.join()
return derivatives, old_reg
def reg(psf_model, parms):
"""
Regularization and derivative.
"""
eps = parms.eps
if (eps is None):
return np.zeros_like(psf_model)
psf_shape = psf_model.shape
d = np.zeros_like(psf_model)
r = np.zeros_like(psf_model)
for i in range(psf_shape[0]):
for j in range(psf_shape[1]):
if i > 0:
r[i, j] += (psf_model[i, j] - psf_model[i - 1, j]) ** 2.
d[i, j] += 2. * (psf_model[i, j] - psf_model[i - 1, j])
if j > 0:
r[i, j] += (psf_model[i, j] - psf_model[i, j - 1]) ** 2.
d[i, j] += 2. * (psf_model[i, j] - psf_model[i, j - 1])
if i < psf_shape[0] - 1:
r[i, j] += (psf_model[i, j] - psf_model[i + 1, j]) ** 2.
d[i, j] += 2. * (psf_model[i, j] - psf_model[i + 1, j])
if j < psf_shape[1] - 1:
r[i, j] += (psf_model[i, j] - psf_model[i, j + 1]) ** 2.
d[i, j] += 2. * (psf_model[i, j] - psf_model[i, j + 1])
r *= eps
d *= eps
return r, d
def regularization_derivative(psf_model, parms):
"""
Compute derivative of regularization wrt the psf.
"""
# old regularization
old_reg = local_regularization((psf_model, parms, None))
# Map to the processes
pool = multiprocessing.Pool(parms.Nthreads)
mapfn = pool.map
# compute perturbed reg
hs = parms.h * psf_model.copy()
argslist = [None] * parms.psf_model_shape[0] * parms.psf_model_shape[1]
for i in range(parms.psf_model_shape[0]):
for j in range(parms.psf_model_shape[1]):
idx = i * parms.psf_model_shape[1] + j
tmp_psf = psf_model.copy()
tmp_psf[i, j] += hs[i, j]
argslist[idx] = (tmp_psf, parms, (i, j))
new_reg = np.array((mapfn(local_regularization,
[args for args in argslist])))
new_reg = new_reg.reshape(parms.psf_model_shape)
# tidy up
pool.close()
pool.terminate()
pool.join()
return (new_reg - old_reg) / hs, old_reg
def one_datum_nll_diff((datum, shift, psf_model, old_nll, fitparms, mask,
steps, parms)):
"""
Calculate the derivative for a single datum using forward differencing.
"""
# if not enough good pixels, discard patch
min_pixels = np.ceil(parms.min_frac * datum.size)
if datum[mask].size < min_pixels:
return np.zeros_like(psf_model)
# background model
if parms.background == 'linear':
N = np.sqrt(psf_model.size).astype(np.int)
x, y = np.meshgrid(range(N), range(N))
A = np.vstack((np.ones_like(psf), np.ones_like(psf),
x.ravel(), y.ravel())).T
bkg = make_background(datum, A, fitparms, parms.background)
elif parms.background == None:
bkg = 0.0
else:
bkg = fitparms[-1]
# calculate the difference in nll, tweaking each psf parm.
steps = parms.h * psf_model
deriv = np.zeros_like(psf_model)
for i in range(parms.psf_model_shape[0]):
for j in range(parms.psf_model_shape[1]):
temp_psf = psf_model.copy()
temp_psf[i, j] += steps[i, j]
psf = render_psfs(temp_psf, shift, parms.patch_shape,
parms.psf_grid)[0]
model = fitparms[0] * psf + bkg
diff = eval_nll(datum[mask], model[mask], parms) - old_nll[mask]
deriv[i, j] = np.sum(diff) / steps[i, j]
return deriv
def local_regularization((psf_model, parms, idx)):
"""
Calculate the local regularization for each pixel.
"""
eps = parms.eps
gamma = parms.gamma
if (eps is None):
if idx is None:
return np.zeros_like(psf_model)
else:
return 0.0
pm = np.array([-1, 1])
psf_shape = psf_model.shape
reg = np.zeros_like(psf_model)
if idx is None:
# axis 0
idx = np.arange(psf_shape[0])
ind = idx[:, None] + pm[None, :]
ind[ind == -1] = 0 # boundary foo
ind[ind == psf_shape[0]] = psf_shape[0] - 1 # boundary foo
for i in range(psf_shape[1]):
diff = psf_model[ind, i] - psf_model[idx, i][:, None]
reg[:, i] += eps * np.sum(diff ** 2., axis=1)
# axis 1
idx = np.arange(psf_shape[1])
ind = idx[:, None] + pm[None, :]
ind[ind == -1] = 0 # boundary foo
ind[ind == psf_shape[1]] = psf_shape[1] - 1 # boundary foo
for i in range(psf_shape[0]):
diff = psf_model[i, ind] - psf_model[i, idx][:, None]
reg[i, :] += eps * np.sum(diff ** 2., axis=1)
# l2 norm
#reg += gamma * psf_model ** 2.
# floor
#reg += 1.e-1 / (1. + np.exp((psf_model - 4e-5) * 2.e5))
else:
idx = np.array(idx)
value = psf_model[idx[0], idx[1]]
# axis 0
ind = idx[:, None] + pm[None, :]
ind[ind == -1] = 0 # lower edge case
ind[ind == psf_shape[0]] = psf_shape[0] - 1 # upper edge case
diff = psf_model[ind[0], idx[1]] - value
reg = eps * np.sum(diff ** 2.)
# axis 1
ind = idx[:, None] + pm[None, :]
ind[ind == -1] = 0 # lower edge case
ind[ind == psf_shape[1]] = psf_shape[1] - 1 # upper edge case
diff = psf_model[idx[0], ind[1]] - value
reg += eps * np.sum(diff ** 2.)
# l2 norm
#reg += gamma * value ** 2.
# floor
#reg += 1.e-1 / (1. + np.exp((value - 4e-5) * 2.e5) )
return reg
|
[
"rossfadely@gmail.com"
] |
rossfadely@gmail.com
|
3766f9a5133652056ebd9b6b6bc0c4f68515983c
|
f2cb9b54e51e693e1a1f1c1b327b5b40038a8fbe
|
/src/bin/shipyard_airflow/tests/unit/plugins/test_deckhand_client_factory.py
|
044f4cc7ae96a556bd1cc726789890d7c1abce2c
|
[
"Apache-2.0"
] |
permissive
|
airshipit/shipyard
|
869b0c6d331e5b2d1c15145aee73397184290900
|
81066ae98fe2afd3a9c8c5c8556e9438ac47d5a2
|
refs/heads/master
| 2023-08-31T11:46:13.662886
| 2023-07-01T06:42:55
| 2023-08-30T16:04:47
| 133,844,902
| 6
| 2
|
Apache-2.0
| 2023-09-12T19:09:02
| 2018-05-17T17:07:36
|
Python
|
UTF-8
|
Python
| false
| false
| 1,083
|
py
|
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from deckhand.client import client as deckhand_client
from shipyard_airflow.plugins.deckhand_client_factory import (
DeckhandClientFactory
)
def test_get_client():
"""Test the get_client functionality"""
cur_dir = os.path.dirname(__file__)
filename = os.path.join(cur_dir, 'test.conf')
client_factory = DeckhandClientFactory(filename)
client = client_factory.get_client()
assert isinstance(client, deckhand_client.Client)
|
[
"bryan.strassner@gmail.com"
] |
bryan.strassner@gmail.com
|
e94bb0b4072bf172c48f8d8cb3bfe91985a8dd3e
|
b2de5660d81afdf6b1fba058faee6ece6a51e462
|
/amplify/agent/managers/bridge.py
|
76902e4239a982a79bdc60e47f872d32cb28807d
|
[
"BSD-2-Clause"
] |
permissive
|
Ferrisbane/nginx-amplify-agent
|
725d8a7da7fb66e0b41cddd8139d25a084570592
|
ef769934341374d4b6ede5fcf5ebff34f6cba8de
|
refs/heads/master
| 2021-01-22T00:03:49.686169
| 2016-07-20T17:50:30
| 2016-07-20T17:50:30
| 63,801,713
| 0
| 0
| null | 2016-07-20T17:41:25
| 2016-07-20T17:41:25
| null |
UTF-8
|
Python
| false
| false
| 7,064
|
py
|
# -*- coding: utf-8 -*-
import gc
import time
from collections import deque
from amplify.agent.common.context import context
from amplify.agent.common.util.backoff import exponential_delay
from amplify.agent.managers.abstract import AbstractManager
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__credits__ = ["Mike Belov", "Andrei Belov", "Ivan Poluyanov", "Oleg Mamontov", "Andrew Alexeev", "Grant Hulegaard"]
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "dedm@nginx.com"
class Bridge(AbstractManager):
"""
Manager that flushes object bins and stores them in deques. These deques are then sent to backend.
"""
name = 'bridge_manager'
def __init__(self, **kwargs):
if 'interval' not in kwargs:
kwargs['interval'] = context.app_config['cloud']['push_interval']
super(Bridge, self).__init__(**kwargs)
self.payload = {}
self.first_run = True
self.last_http_attempt = 0
self.http_fail_count = 0
self.http_delay = 0
# Instantiate payload with appropriate keys and buckets.
self._reset_payload()
@staticmethod
def look_around():
"""
Checks everything around and make appropriate tree structure
:return: dict of structure
"""
# TODO check docker or OS around
tree = {'system': ['nginx']}
return tree
def _run(self):
try:
self.flush_all()
gc.collect()
except:
context.default_log.error('failed', exc_info=True)
raise
def flush_metrics(self):
"""
Flushes only metrics
"""
flush_data = self._flush_metrics()
if flush_data:
self.payload['metrics'].append(flush_data)
self._send_payload()
def flush_all(self, force=False):
"""
Flushes all data
"""
clients = {
'meta': self._flush_meta,
'metrics': self._flush_metrics,
'events': self._flush_events,
'configs': self._flush_configs
}
# Flush data and add to appropriate payload bucket.
if self.first_run:
# If this is the first run, flush meta only to ensure object creation.
flush_data = self._flush_meta()
if flush_data:
self.payload['meta'].append(flush_data)
else:
for client_type in self.payload.keys():
if client_type in clients:
flush_data = clients[client_type].__call__()
if flush_data:
self.payload[client_type].append(flush_data)
now = time.time()
if force or now >= (self.last_http_attempt + self.interval + self.http_delay):
self._send_payload()
def _send_payload(self):
"""
Sends current payload to backend
"""
context.log.debug(
'modified payload; current payload stats: '
'meta - %s, metrics - %s, events - %s, configs - %s' % (
len(self.payload['meta']),
len(self.payload['metrics']),
len(self.payload['events']),
len(self.payload['configs'])
)
)
# Send payload to backend.
try:
self.last_http_attempt = time.time()
self._pre_process_payload() # Convert deques to lists for encoding
context.http_client.post('update/', data=self.payload)
context.default_log.debug(self.payload)
self._reset_payload() # Clear payload after successful
if self.first_run:
self.first_run = False # Set first_run to False after first successful send
if self.http_delay:
self.http_fail_count = 0
self.http_delay = 0 # Reset HTTP delay on success
context.log.debug('successful update, reset http delay')
except Exception as e:
self._post_process_payload() # Convert lists to deques since send failed
self.http_fail_count += 1
self.http_delay = exponential_delay(self.http_fail_count)
context.log.debug('http delay set to %s (fails: %s)' % (self.http_delay, self.http_fail_count))
exception_name = e.__class__.__name__
context.log.error('failed to push data due to %s' % exception_name)
context.log.debug('additional info:', exc_info=True)
context.log.debug(
'finished flush_all; new payload stats: '
'meta - %s, metrics - %s, events - %s, configs - %s' % (
len(self.payload['meta']),
len(self.payload['metrics']),
len(self.payload['events']),
len(self.payload['configs'])
)
)
def _flush_meta(self):
return self._flush(clients=['meta'])
def _flush_metrics(self):
return self._flush(clients=['metrics'])
def _flush_events(self):
return self._flush(clients=['events'])
def _flush_configs(self):
return self._flush(clients=['configs'])
def _flush(self, clients=None):
# get structure
objects_structure = context.objects.tree()
# recursive flush
results = self._recursive_object_flush(objects_structure, clients=clients) if objects_structure else None
return results
def _recursive_object_flush(self, tree, clients=None):
results = {}
object_flush = tree['object'].flush(clients=clients)
if object_flush:
results.update(object_flush)
if tree['children']:
children_results = []
for child_tree in tree['children']:
child_result = self._recursive_object_flush(child_tree, clients=clients)
if child_result:
children_results.append(child_result)
if children_results:
results['children'] = children_results
if results:
return results
def _reset_payload(self):
"""
After payload has been successfully sent, clear the queues (reset them to empty deques).
"""
self.payload = {
'meta': deque(maxlen=360),
'metrics': deque(maxlen=360),
'events': deque(maxlen=360),
'configs': deque(maxlen=360)
}
def _pre_process_payload(self):
"""
ujson.encode does not handle deque objects well. So before attempting a send, convert all the deques to lists.
"""
for key in self.payload.keys():
self.payload[key] = list(self.payload[key])
def _post_process_payload(self):
"""
If a payload is NOT reset (cannot be sent), then we should reconvert the lists to deques with maxlen to enforce
memory management.
"""
for key in self.payload.keys():
self.payload[key] = deque(self.payload[key], maxlen=360)
|
[
"dedm@nginx.com"
] |
dedm@nginx.com
|
2a4c2e2000a7aff2f1657522ab2b84b85f99e5c7
|
a16feb303b7599afac19a89945fc2a9603ae2477
|
/Simple_Python/standard/ConfigParser/ConfigParser_9.py
|
cd739d82a6b2776cc41204fb21e4a9bde96a1869
|
[] |
no_license
|
yafeile/Simple_Study
|
d75874745ce388b3d0f9acfa9ebc5606a5745d78
|
c3c554f14b378b487c632e11f22e5e3118be940c
|
refs/heads/master
| 2021-01-10T22:08:34.636123
| 2015-06-10T11:58:59
| 2015-06-10T11:58:59
| 24,746,770
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
#! /usr/bin/env/python
# -*- coding:utf-8 -*-
import ConfigParser
parser = ConfigParser.SafeConfigParser()
parser.add_section('bug_tracker')
parser.set('bug_tracker','uri','http://localhost:8080/bugs')
parser.set('bug_tracker','username','Jack')
parser.set('bug_tracker','password','123456')
for section in parser.sections():
print section
for name,value in parser.items(section):
print ' %s = %r' % (name,value)
|
[
"zhuzhulang@126.com"
] |
zhuzhulang@126.com
|
e43252b1c78b9d16a9c21784ae22ba5cd362fffa
|
d475a6cf49c0b2d40895ff6d48ca9b0298643a87
|
/pyleecan/Classes/ImportVectorField.py
|
8dbc5501c4eb5fc5d7c8a98afbad66071632c118
|
[
"Apache-2.0"
] |
permissive
|
lyhehehe/pyleecan
|
6c4a52b17a083fe29fdc8dcd989a3d20feb844d9
|
421e9a843bf30d796415c77dc934546adffd1cd7
|
refs/heads/master
| 2021-07-05T17:42:02.813128
| 2020-09-03T14:27:03
| 2020-09-03T14:27:03
| 176,678,325
| 2
| 0
| null | 2019-03-20T07:28:06
| 2019-03-20T07:28:06
| null |
UTF-8
|
Python
| false
| false
| 7,283
|
py
|
# -*- coding: utf-8 -*-
# File generated according to Generator/ClassesRef/Import/ImportVectorField.csv
# WARNING! All changes made in this file will be lost!
"""Method code available at https://github.com/Eomys/pyleecan/tree/master/pyleecan/Methods/Import/ImportVectorField
"""
from os import linesep
from logging import getLogger
from ._check import check_var, raise_
from ..Functions.get_logger import get_logger
from ..Functions.save import save
from ._frozen import FrozenClass
# Import all class method
# Try/catch to remove unnecessary dependencies in unused method
try:
from ..Methods.Import.ImportVectorField.get_data import get_data
except ImportError as error:
get_data = error
from ._check import InitUnKnowClassError
from .ImportData import ImportData
class ImportVectorField(FrozenClass):
"""Abstract class for Data Import/Generation"""
VERSION = 1
# cf Methods.Import.ImportVectorField.get_data
if isinstance(get_data, ImportError):
get_data = property(
fget=lambda x: raise_(
ImportError(
"Can't use ImportVectorField method get_data: " + str(get_data)
)
)
)
else:
get_data = get_data
# save method is available in all object
save = save
# generic copy method
def copy(self):
"""Return a copy of the class
"""
return type(self)(init_dict=self.as_dict())
# get_logger method is available in all object
get_logger = get_logger
def __init__(
self, components=dict(), name="", symbol="", init_dict=None, init_str=None
):
"""Constructor of the class. Can be use in three ways :
- __init__ (arg1 = 1, arg3 = 5) every parameters have name and default values
for Matrix, None will initialise the property with an empty Matrix
for pyleecan type, None will call the default constructor
- __init__ (init_dict = d) d must be a dictionnary with every properties as keys
- __init__ (init_str = s) s must be a string
s is the file path to load
ndarray or list can be given for Vector and Matrix
object or dict can be given for pyleecan Object"""
if init_str is not None: # Initialisation by str
from ..Functions.load import load
assert type(init_str) is str
# load the object from a file
obj = load(init_str)
assert type(obj) is type(self)
components = obj.components
name = obj.name
symbol = obj.symbol
if init_dict is not None: # Initialisation by dict
assert type(init_dict) is dict
# Overwrite default value with init_dict content
if "components" in list(init_dict.keys()):
components = init_dict["components"]
if "name" in list(init_dict.keys()):
name = init_dict["name"]
if "symbol" in list(init_dict.keys()):
symbol = init_dict["symbol"]
# Initialisation by argument
self.parent = None
# components can be None or a dict of ImportData object
self.components = dict()
if type(components) is dict:
for key, obj in components.items():
if isinstance(obj, dict):
self.components[key] = ImportData(init_dict=obj)
else:
self.components[key] = obj
elif components is None:
self.components = dict()
else:
self.components = components # Should raise an error
self.name = name
self.symbol = symbol
# The class is frozen, for now it's impossible to add new properties
self._freeze()
def __str__(self):
"""Convert this objet in a readeable string (for print)"""
ImportVectorField_str = ""
if self.parent is None:
ImportVectorField_str += "parent = None " + linesep
else:
ImportVectorField_str += (
"parent = " + str(type(self.parent)) + " object" + linesep
)
if len(self.components) == 0:
ImportVectorField_str += "components = dict()" + linesep
for key, obj in self.components.items():
tmp = (
self.components[key].__str__().replace(linesep, linesep + "\t")
+ linesep
)
ImportVectorField_str += (
"components[" + key + "] =" + tmp + linesep + linesep
)
ImportVectorField_str += 'name = "' + str(self.name) + '"' + linesep
ImportVectorField_str += 'symbol = "' + str(self.symbol) + '"' + linesep
return ImportVectorField_str
def __eq__(self, other):
"""Compare two objects (skip parent)"""
if type(other) != type(self):
return False
if other.components != self.components:
return False
if other.name != self.name:
return False
if other.symbol != self.symbol:
return False
return True
def as_dict(self):
"""Convert this objet in a json seriable dict (can be use in __init__)
"""
ImportVectorField_dict = dict()
ImportVectorField_dict["components"] = dict()
for key, obj in self.components.items():
ImportVectorField_dict["components"][key] = obj.as_dict()
ImportVectorField_dict["name"] = self.name
ImportVectorField_dict["symbol"] = self.symbol
# The class name is added to the dict fordeserialisation purpose
ImportVectorField_dict["__class__"] = "ImportVectorField"
return ImportVectorField_dict
def _set_None(self):
"""Set all the properties to None (except pyleecan object)"""
for key, obj in self.components.items():
obj._set_None()
self.name = None
self.symbol = None
def _get_components(self):
"""getter of components"""
for key, obj in self._components.items():
if obj is not None:
obj.parent = self
return self._components
def _set_components(self, value):
"""setter of components"""
check_var("components", value, "{ImportData}")
self._components = value
components = property(
fget=_get_components,
fset=_set_components,
doc=u"""Dict of components (e.g. {"radial": ImportData})
:Type: {ImportData}
""",
)
def _get_name(self):
"""getter of name"""
return self._name
def _set_name(self, value):
"""setter of name"""
check_var("name", value, "str")
self._name = value
name = property(
fget=_get_name,
fset=_set_name,
doc=u"""Name of the vector field
:Type: str
""",
)
def _get_symbol(self):
"""getter of symbol"""
return self._symbol
def _set_symbol(self, value):
"""setter of symbol"""
check_var("symbol", value, "str")
self._symbol = value
symbol = property(
fget=_get_symbol,
fset=_set_symbol,
doc=u"""Symbol of the vector field
:Type: str
""",
)
|
[
"sebgue@gmx.net"
] |
sebgue@gmx.net
|
acd10b8f4a7a1c925fe17066c2dada6d620110a8
|
f0b33d42741f3c470cc7f616c70a4b10a73fc012
|
/scripts/ddd17_steer_export.py
|
1af7dd96ef80d87062a2bd107b25ea8fa25b1c88
|
[
"MIT"
] |
permissive
|
duguyue100/ddd20-itsc20
|
1e51a7a76fe1f2759746814ae58f4e1e21c0c4e6
|
667bb5e702a06cfff30b20de669697f3271baf04
|
refs/heads/master
| 2021-09-17T06:34:17.545026
| 2018-06-28T16:35:39
| 2018-06-28T16:35:39
| 114,002,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,309
|
py
|
"""Steer export.
Author: Yuhuang Hu
Email : duguyue100@gmail.com
"""
from __future__ import print_function
import os
import os
from os.path import join, isfile, isdir
import cPickle as pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import spiker
from spiker.data import ddd17
from spiker.models import utils
# def find_best(exp_dir):
# """find best experiment."""
# exp_dir = os.path.join(spiker.SPIKER_EXPS+"-run-3", exp_dir)
# file_list = os.listdir(exp_dir)
# file_clean_list = []
# for item in file_list:
# if ".hdf5" in item:
# file_clean_list.append(item)
# file_list = sorted(file_clean_list)
# return file_list[-1]
def get_prediction(X_test, exp_type, model_base, sensor_type, model_file):
"""Get prediction."""
model_file_base = exp_type+model_base+sensor_type
model_path = os.path.join(
spiker.SPIKER_EXPS+"-run-3", model_file_base,
model_file)
print ("[MESSAGE]", model_path)
model = utils.keras_load_model(model_path)
prediction = utils.keras_predict_batch(model, X_test, verbose=True)
return prediction
data_path = os.path.join(spiker.SPIKER_DATA, "ddd17",
"jul28/rec1501288723-export.hdf5")
frame_cut = [500, 1000]
model_base = "-day-4-"
exp_type = "steering"
sensor_type = ["full", "dvs", "aps"]
load_prediction = os.path.join(
spiker.SPIKER_EXTRA, "pred"+model_base+"result-run-3")
if os.path.isfile(load_prediction):
print ("[MESSAGE] Prediction available")
with open(load_prediction, "r") as f:
(steer_full, steer_dvs, steer_aps) = pickle.load(f)
f.close()
else:
# export ground truth
test_frames, _ = ddd17.prepare_train_data(data_path,
y_name="steering",
frame_cut=frame_cut)
test_frames /= 255.
test_frames -= np.mean(test_frames, keepdims=True)
num_samples = test_frames.shape[0]
num_train = int(num_samples*0.7)
X_test = test_frames[num_train:]
del test_frames
# steering full
steer_full = get_prediction(
X_test, exp_type, model_base, sensor_type[0],
"steering-day-4-full-103-0.02.hdf5")
print ("[MESSAGE] Steering Full")
# steering dvs
steer_dvs = get_prediction(
X_test[:, :, :, 0][..., np.newaxis],
exp_type, model_base, sensor_type[1],
"steering-day-4-dvs-200-0.03.hdf5")
print ("[MESSAGE] Steering DVS")
# steering aps
steer_aps = get_prediction(
X_test[:, :, :, 1][..., np.newaxis],
exp_type, model_base, sensor_type[2],
"steering-day-4-aps-118-0.03.hdf5")
print ("[MESSAGE] Steering APS")
del X_test
save_prediction = os.path.join(
spiker.SPIKER_EXTRA, "pred"+model_base+"result-run-3")
with open(save_prediction, "w") as f:
pickle.dump([steer_full, steer_dvs, steer_aps], f)
origin_data_path = os.path.join(spiker.SPIKER_DATA, "ddd17",
"jul28/rec1501288723.hdf5")
num_samples = 500
frames, steering = ddd17.prepare_train_data(data_path,
target_size=None,
y_name="steering",
frame_cut=frame_cut,
data_portion="test",
data_type="uint8",
num_samples=num_samples)
steering = ddd17.prepare_train_data(data_path,
target_size=None,
y_name="steering",
only_y=True,
frame_cut=frame_cut,
data_portion="test",
data_type="uint8")
steer, steer_time = ddd17.export_data_field(
origin_data_path, ['steering_wheel_angle'], frame_cut=frame_cut,
data_portion="test")
steer_time -= steer_time[0]
# in ms
steer_time = steer_time.astype("float32")/1e6
print (steer_time)
idx = 250
fig = plt.figure(figsize=(10, 8))
outer_grid = gridspec.GridSpec(2, 1, wspace=0.1)
# plot frames
frame_grid = gridspec.GridSpecFromSubplotSpec(
1, 2, subplot_spec=outer_grid[0, 0],
hspace=0.1)
aps_frame = plt.Subplot(fig, frame_grid[0])
aps_frame.imshow(frames[idx, :, :, 1], cmap="gray")
aps_frame.axis("off")
aps_frame.set_title("APS Frame")
fig.add_subplot(aps_frame)
dvs_frame = plt.Subplot(fig, frame_grid[1])
dvs_frame.imshow(frames[idx, :, :, 0], cmap="gray")
dvs_frame.axis("off")
dvs_frame.set_title("DVS Frame")
fig.add_subplot(dvs_frame)
# plot steering curve
steering_curve = plt.Subplot(fig, outer_grid[1, 0])
min_steer = np.min(steering*180/np.pi)
max_steer = np.max(steering*180/np.pi)
steering_curve.plot(steer_time, steering*180/np.pi,
label="groundtruth",
color="#08306b",
linestyle="-",
linewidth=2)
steering_curve.plot(steer_time, steer_dvs*180/np.pi,
label="DVS",
color="#3f007d",
linestyle="-",
linewidth=1)
steering_curve.plot(steer_time, steer_aps*180/np.pi,
label="APS",
color="#00441b",
linestyle="-",
linewidth=1)
steering_curve.plot(steer_time, steer_full*180/np.pi,
label="DVS+APS",
color="#7f2704",
linestyle="-",
linewidth=1)
steering_curve.plot((steer_time[idx], steer_time[idx]),
(min_steer, max_steer), color="black",
linestyle="-", linewidth=1)
steering_curve.set_xlim(left=0, right=steer_time[-1])
steering_curve.set_title("Steering Wheel Angle Prediction")
steering_curve.grid(linestyle="-.")
steering_curve.legend(fontsize=10)
steering_curve.set_ylabel("degree")
steering_curve.set_xlabel("time (s)")
fig.add_subplot(steering_curve)
plt.savefig(join(spiker.SPIKER_EXTRA, "cvprfigs",
"vis"+model_base+"result"+".pdf"),
dpi=600, format="pdf",
bbox="tight", pad_inches=0.5)
|
[
"duguyue100@gmail.com"
] |
duguyue100@gmail.com
|
362df6b63b69bd5d5fd4eb04726056f47d873113
|
122f9bf0d996c104f541453ab35c56f6ff3fc7cd
|
/z수업용문제/JunminLim/2331_반복수열.py
|
cfed0ed0d334dc85da62587de4d10ebc079ceff3
|
[] |
no_license
|
JannaKim/PS
|
1302e9b6bc529d582ecc7d7fe4f249a52311ff30
|
b9c3ce6a7a47afeaa0c62d952b5936d407da129b
|
refs/heads/master
| 2023-08-10T17:49:00.925460
| 2021-09-13T02:21:34
| 2021-09-13T02:21:34
| 312,822,458
| 0
| 0
| null | 2021-04-23T15:31:11
| 2020-11-14T13:27:34
|
Python
|
UTF-8
|
Python
| false
| false
| 281
|
py
|
n=input()
L=[]
P=[]
while n not in L:
L.append(n)
a = 0
for i in n:
a+=int(i)**2
n = str(a)
while n in L:
L.remove(n)
a = 0
for i in n:
a+=int(i)**2
n = str(a)
print(len(L))
'''
for i in range (len(L)):
dic[L[i], i]
'''
|
[
"baradamoh@gmail.com"
] |
baradamoh@gmail.com
|
ad4e66e29bd494bd629bac9884cd7367ed7601f6
|
69526d234c01b1d33b9fb569e55fe363d96beac0
|
/api/routes/payments.py
|
099b50144d23882f39259fcecf2873101b650077
|
[] |
no_license
|
jzamora5/orders_creator_backend
|
53b0a773fb88d99354175835cebdfc93c8e7357e
|
d5dd51ba39a5f549cc55fd9835b6082edd91d0a6
|
refs/heads/main
| 2023-03-29T11:27:08.602656
| 2021-04-05T22:49:25
| 2021-04-05T22:49:25
| 348,373,989
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,726
|
py
|
from api.models.order import Order
from api.models.user import User
from api.models.payment import Payment
from api.routes import app_routes
from flask import abort, jsonify, make_response, request
from flask_jwt_extended import jwt_required, get_jwt_identity
from app import storage
@app_routes.route('/order/<order_id>/payments', methods=['POST'], strict_slashes=False)
@jwt_required()
def post_payment(order_id):
"""
Creates a Payment
"""
order = storage.get(Order, order_id)
if not order:
abort(make_response(jsonify({"error": "Order not found"}), 404))
if get_jwt_identity() != order.user_id:
abort(make_response(jsonify({"error": "forbidden"}), 403))
if not request.get_json():
abort(make_response(jsonify({"error": "Not a JSON"}), 400))
needed_attributes = ["status", "payment_type", "total"]
data = request.get_json()
for needed in needed_attributes:
if needed not in data:
abort(make_response(jsonify({"error": f"Missing {needed}"}), 400))
try:
float(data["total"])
except ValueError:
abort(make_response(
jsonify({"error": "Total must be a valid number"}), 400))
instance = Payment(**data)
instance.order_id = order_id
instance.save()
return make_response(jsonify(instance.to_dict()), 201)
@app_routes.route('/order/<order_id>/payments', methods=['GET'], strict_slashes=False)
@jwt_required()
def get_payments(order_id):
order = storage.get(Order, order_id)
if not order:
abort(make_response(jsonify({"error": "Order not found"}), 404))
if get_jwt_identity() != order.user_id:
abort(make_response(jsonify({"error": "forbidden"}), 403))
payments = order.payments
payments_list = []
for payment in payments:
payments_list.append(payment.to_dict())
return jsonify(payments_list)
# @app_routes.route('/order/<order_id>/payments/<payment_id>',
# methods=['GET'], strict_slashes=False)
# @jwt_required()
# def get_payment(order_id, payment_id):
# order = storage.get(Order, order_id)
# if not order:
# abort(make_response(jsonify({"error": "Order not found"}), 404))
# payment = storage.get(Payment, payment_id)
# if not payment:
# abort(make_response(jsonify({"error": "Payment not found"}), 404))
# if payment.order.id != order.id:
# abort(make_response(jsonify({"error": "Payment not found"}), 404))
# if get_jwt_identity() != order.user_id:
# abort(make_response(jsonify({"error": "forbidden"}), 403))
# payment_dict = payment.to_dict()
# del payment_dict["order"]
# return jsonify(payment_dict)
# @app_routes.route('/order/<order_id>/payments/<payment_id>',
# methods=['PUT'], strict_slashes=False)
# @jwt_required()
# def put_payment(order_id, payment_id):
# order = storage.get(Order, order_id)
# if not order:
# abort(make_response(jsonify({"error": "Order not found"}), 404))
# payment = storage.get(Payment, payment_id)
# if not payment:
# abort(make_response(jsonify({"error": "Payment not found"}), 404))
# if payment.order.id != order.id:
# abort(make_response(jsonify({"error": "Payment not found"}), 404))
# if get_jwt_identity() != order.user_id:
# abort(make_response(jsonify({"error": "forbidden"}), 403))
# ignore = ['id', 'created_at', 'updated_at']
# data = request.get_json()
# for key, value in data.items():
# if key not in ignore:
# setattr(payment, key, value)
# payment.save()
# payment_dict = payment.to_dict()
# del payment_dict["order"]
# return make_response(jsonify(payment_dict), 200)
|
[
"jzamora_5@yahoo.com"
] |
jzamora_5@yahoo.com
|
26f2f4fa282fac3064a8d18fa75e67c517c1a09c
|
a1c8731a8527872042bd46340d8d3e6d47596732
|
/programming-laboratory-I/70b7/seguro.py
|
f49bcb6be7708680dab29dab7ae4ee5f91b095ce
|
[
"MIT"
] |
permissive
|
MisaelAugusto/computer-science
|
bbf98195b0ee954a7ffaf58e78f4a47b15069314
|
d21335a2dc824b54ffe828370f0e6717fd0c7c27
|
refs/heads/master
| 2022-12-04T08:21:16.052628
| 2020-08-31T13:00:04
| 2020-08-31T13:00:04
| 287,621,838
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
# coding: utf-8
# Aluno: Misael Augusto
# Matrícula: 117110525
# Problema: Cálculo de Seguro
def calcula_seguro(valor_veiculo, lista):
dados_cliente = []
verdadeiros = [10, 20, 20, 20, 10]
falsos = [20, 10, 10, 10, 20]
pontos = 0
if lista[0] <= 21:
pontos += 20
elif 22 <= lista[0] <= 30:
pontos += 15
elif 31 <= lista[0] <= 40:
pontos += 12
elif 41 <= lista[0] <= 60:
pontos += 10
else:
pontos += 20
for i in range(1, len(lista) - 1):
if lista[i]:
pontos += verdadeiros[i - 1]
else:
pontos += falsos[i - 1]
if lista[-1] == "Lazer" or lista[-1] == "Misto":
pontos += 20
else:
pontos += 10
if pontos <= 80:
mensagem = "Risco Baixo"
valor = valor_veiculo * 0.1
elif 80 < pontos <= 100:
mensagem = "Risco Medio"
valor = valor_veiculo * 0.2
else:
mensagem = "Risco Alto"
valor = valor_veiculo * 0.3
dados_cliente.append(pontos)
dados_cliente.append(mensagem)
dados_cliente.append(valor)
return dados_cliente
print calcula_seguro(2000.0, [21, True, True, True, True, True, "Misto"])
|
[
"misael.costa@ccc.ufcg.edu.br"
] |
misael.costa@ccc.ufcg.edu.br
|
e258390aa13593f651e7ecf2780121ade1ffe47d
|
5ec06dab1409d790496ce082dacb321392b32fe9
|
/clients/python/generated/test/test_com_adobe_granite_acp_platform_platform_servlet_info.py
|
488d29b6091afc227c8d8f0f40e8699ea4f50cdc
|
[
"Apache-2.0"
] |
permissive
|
shinesolutions/swagger-aem-osgi
|
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
|
c2f6e076971d2592c1cbd3f70695c679e807396b
|
refs/heads/master
| 2022-10-29T13:07:40.422092
| 2021-04-09T07:46:03
| 2021-04-09T07:46:03
| 190,217,155
| 3
| 3
|
Apache-2.0
| 2022-10-05T03:26:20
| 2019-06-04T14:23:28
| null |
UTF-8
|
Python
| false
| false
| 1,263
|
py
|
# coding: utf-8
"""
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0
Contact: opensource@shinesolutions.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import swaggeraemosgi
from swaggeraemosgi.models.com_adobe_granite_acp_platform_platform_servlet_info import ComAdobeGraniteAcpPlatformPlatformServletInfo # noqa: E501
from swaggeraemosgi.rest import ApiException
class TestComAdobeGraniteAcpPlatformPlatformServletInfo(unittest.TestCase):
"""ComAdobeGraniteAcpPlatformPlatformServletInfo unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testComAdobeGraniteAcpPlatformPlatformServletInfo(self):
"""Test ComAdobeGraniteAcpPlatformPlatformServletInfo"""
# FIXME: construct object with mandatory attributes with example values
# model = swaggeraemosgi.models.com_adobe_granite_acp_platform_platform_servlet_info.ComAdobeGraniteAcpPlatformPlatformServletInfo() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"michael.bloch@shinesolutions.com"
] |
michael.bloch@shinesolutions.com
|
7379d9371c3922d86ed73492c5400df4bd96a4b1
|
fb72d7eb880c7777e414587347d54a0446e962a3
|
/pycis/wrappers/base_wrapper.py
|
ce076d12e21a7994866c5b8b5224567e4a2ce62d
|
[
"MIT"
] |
permissive
|
marcwebbie/pycis
|
4ad806aeb9f257f5178dcb19741666b0f4576721
|
4c123c5805dac2e302f863c6ed51c9e2e05a67c8
|
refs/heads/master
| 2016-09-06T01:10:11.301029
| 2013-12-28T09:39:36
| 2013-12-28T09:39:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,386
|
py
|
class BaseWrapper(object):
""" BaseWrapper gives the default interface for wrappers.
It also add utility functions to be shared by sub classes.
Sub classes should override:
self.site_url:
Wrapped site base url
get_streams(self, media):
Get a list of stream for given Media
search(self, search_query, best_match=False):
Search wrapped site for Media objects. Return a list of Media.
When best_match is True it returns only one media with best
search match ratio.
index(self):
Return a list of options to be navigated by user
"""
def __init__(self):
self.site_url = None
def __str__(self):
class_name = self.__class__.__name__
return "{}(name={}, site_url={})".format(class_name, self.name, self.site_url)
@property
def name(self):
class_name = self.__class__.__name__.lower().replace('wrapper', '')
return class_name
def get_streams(self, media):
raise NotImplemented("get_streams wasn't overriden by base class")
def get_children(self, media):
raise NotImplemented("get_children wasn't overriden by base class")
def search(self, search_query, best_match=False):
raise NotImplemented("search wasn't overriden by base class")
def index(self):
return None
|
[
"marcwebbie@gmail.com"
] |
marcwebbie@gmail.com
|
6726e26d26e7add78314772b18f26038174e56e8
|
64a80df5e23b195eaba7b15ce207743e2018b16c
|
/Downloads/adafruit-circuitpython-bundle-py-20201107/lib/adafruit_onewire/device.py
|
8e2dcb3c176bb555d7382ad17baa965dce45f366
|
[] |
no_license
|
aferlazzo/messageBoard
|
8fb69aad3cd7816d4ed80da92eac8aa2e25572f5
|
f9dd4dcc8663c9c658ec76b2060780e0da87533d
|
refs/heads/main
| 2023-01-27T20:02:52.628508
| 2020-12-07T00:37:17
| 2020-12-07T00:37:17
| 318,548,075
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,270
|
py
|
# The MIT License (MIT)
#
# Copyright (c) 2017 Carter Nelson for Adafruit Industries
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
`adafruit_onewire.device`
====================================================
Provides access to a single device on the 1-Wire bus.
* Author(s): Carter Nelson
"""
__version__ = "1.2.2"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_OneWire.git"
_MATCH_ROM = b"\x55"
class OneWireDevice:
"""A class to represent a single device on the 1-Wire bus."""
def __init__(self, bus, address):
self._bus = bus
self._address = address
def __enter__(self):
self._select_rom()
return self
def __exit__(self, *exc):
return False
def readinto(self, buf, *, start=0, end=None):
"""
Read into ``buf`` from the device. The number of bytes read will be the
length of ``buf``.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buf[start:end]``. This will not cause an allocation like
``buf[start:end]`` will so it saves memory.
:param bytearray buf: buffer to write into
:param int start: Index to start writing at
:param int end: Index to write up to but not include
"""
self._bus.readinto(buf, start=start, end=end)
if start == 0 and end is None and len(buf) >= 8:
if self._bus.crc8(buf):
raise RuntimeError("CRC error.")
def write(self, buf, *, start=0, end=None):
"""
Write the bytes from ``buf`` to the device.
If ``start`` or ``end`` is provided, then the buffer will be sliced
as if ``buffer[start:end]``. This will not cause an allocation like
``buffer[start:end]`` will so it saves memory.
:param bytearray buf: buffer containing the bytes to write
:param int start: Index to start writing from
:param int end: Index to read up to but not include
"""
return self._bus.write(buf, start=start, end=end)
def _select_rom(self):
self._bus.reset()
self.write(_MATCH_ROM)
self.write(self._address.rom)
|
[
"aferlazzo@gmail.com"
] |
aferlazzo@gmail.com
|
09ee19f59fcbf8de31c5285d7d5cfcf228701935
|
de33ba7be349eed5e2a1fc3f2bd9fce5bfdb9f13
|
/phenocube/lib/python3.8/site-packages/setuptools/__init__.py
|
25b4679b185857fa015cb43acc5f8b34a0faf3b3
|
[
"MIT"
] |
permissive
|
SteveMHill/phenocube-py
|
9bebf239e24af3f97e59b080560228605e6611c5
|
cb262aef1c0925efd2e955170bacd2989da03769
|
refs/heads/main
| 2023-02-24T03:35:11.461869
| 2020-12-22T12:15:22
| 2020-12-22T12:15:22
| 334,703,261
| 0
| 0
|
MIT
| 2021-01-31T16:37:21
| 2021-01-31T16:36:47
| null |
UTF-8
|
Python
| false
| false
| 7,430
|
py
|
"""Extensions to the 'distutils' for large or complex distributions"""
import os
import functools
import distutils.core
import distutils.filelist
import re
from distutils.errors import DistutilsOptionError
from distutils.util import convert_path
from fnmatch import fnmatchcase
from ._deprecation_warning import SetuptoolsDeprecationWarning
from setuptools.extern.six import PY3, string_types
from setuptools.extern.six.moves import filter, map
import setuptools.version
from setuptools.extension import Extension
from setuptools.dist import Distribution
from setuptools.depends import Require
from . import monkey
__metaclass__ = type
__all__ = [
"setup",
"Distribution",
"Command",
"Extension",
"Require",
"SetuptoolsDeprecationWarning",
"find_packages",
]
if PY3:
__all__.append("find_namespace_packages")
__version__ = setuptools.version.__version__
bootstrap_install_from = None
# If we run 2to3 on .py files, should we also convert docstrings?
# Default: yes; assume that we can detect doctests reliably
run_2to3_on_doctests = True
# Standard package names for fixer packages
lib2to3_fixer_packages = ["lib2to3.fixes"]
class PackageFinder:
"""
Generate a list of all Python packages found within a directory
"""
@classmethod
def find(cls, where=".", exclude=(), include=("*",)):
"""Return a list all Python packages found within directory 'where'
'where' is the root directory which will be searched for packages. It
should be supplied as a "cross-platform" (i.e. URL-style) path; it will
be converted to the appropriate local path syntax.
'exclude' is a sequence of package names to exclude; '*' can be used
as a wildcard in the names, such that 'foo.*' will exclude all
subpackages of 'foo' (but not 'foo' itself).
'include' is a sequence of package names to include. If it's
specified, only the named packages will be included. If it's not
specified, all found packages will be included. 'include' can contain
shell style wildcard patterns just like 'exclude'.
"""
return list(
cls._find_packages_iter(
convert_path(where),
cls._build_filter("ez_setup", "*__pycache__", *exclude),
cls._build_filter(*include),
)
)
@classmethod
def _find_packages_iter(cls, where, exclude, include):
"""
All the packages found in 'where' that pass the 'include' filter, but
not the 'exclude' filter.
"""
for root, dirs, files in os.walk(where, followlinks=True):
# Copy dirs to iterate over it, then empty dirs.
all_dirs = dirs[:]
dirs[:] = []
for dir in all_dirs:
full_path = os.path.join(root, dir)
rel_path = os.path.relpath(full_path, where)
package = rel_path.replace(os.path.sep, ".")
# Skip directory trees that are not valid packages
if "." in dir or not cls._looks_like_package(full_path):
continue
# Should this package be included?
if include(package) and not exclude(package):
yield package
# Keep searching subdirectories, as there may be more packages
# down there, even if the parent was excluded.
dirs.append(dir)
@staticmethod
def _looks_like_package(path):
"""Does a directory look like a package?"""
return os.path.isfile(os.path.join(path, "__init__.py"))
@staticmethod
def _build_filter(*patterns):
"""
Given a list of patterns, return a callable that will be true only if
the input matches at least one of the patterns.
"""
return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns)
class PEP420PackageFinder(PackageFinder):
@staticmethod
def _looks_like_package(path):
return True
find_packages = PackageFinder.find
if PY3:
find_namespace_packages = PEP420PackageFinder.find
def _install_setup_requires(attrs):
# Note: do not use `setuptools.Distribution` directly, as
# our PEP 517 backend patch `distutils.core.Distribution`.
dist = distutils.core.Distribution(
dict(
(k, v)
for k, v in attrs.items()
if k in ("dependency_links", "setup_requires")
)
)
# Honor setup.cfg's options.
dist.parse_config_files(ignore_option_errors=True)
if dist.setup_requires:
dist.fetch_build_eggs(dist.setup_requires)
def setup(**attrs):
# Make sure we have any requirements needed to interpret 'attrs'.
_install_setup_requires(attrs)
return distutils.core.setup(**attrs)
setup.__doc__ = distutils.core.setup.__doc__
_Command = monkey.get_unpatched(distutils.core.Command)
class Command(_Command):
__doc__ = _Command.__doc__
command_consumes_arguments = False
def __init__(self, dist, **kw):
"""
Construct the command for dist, updating
vars(self) with any keyword parameters.
"""
_Command.__init__(self, dist)
vars(self).update(kw)
def _ensure_stringlike(self, option, what, default=None):
val = getattr(self, option)
if val is None:
setattr(self, option, default)
return default
elif not isinstance(val, string_types):
raise DistutilsOptionError(
"'%s' must be a %s (got `%s`)" % (option, what, val)
)
return val
def ensure_string_list(self, option):
r"""Ensure that 'option' is a list of strings. If 'option' is
currently a string, we split it either on /,\s*/ or /\s+/, so
"foo bar baz", "foo,bar,baz", and "foo, bar baz" all become
["foo", "bar", "baz"].
"""
val = getattr(self, option)
if val is None:
return
elif isinstance(val, string_types):
setattr(self, option, re.split(r",\s*|\s+", val))
else:
if isinstance(val, list):
ok = all(isinstance(v, string_types) for v in val)
else:
ok = False
if not ok:
raise DistutilsOptionError(
"'%s' must be a list of strings (got %r)" % (option, val)
)
def reinitialize_command(self, command, reinit_subcommands=0, **kw):
cmd = _Command.reinitialize_command(self, command, reinit_subcommands)
vars(cmd).update(kw)
return cmd
def _find_all_simple(path):
"""
Find all files under 'path'
"""
results = (
os.path.join(base, file)
for base, dirs, files in os.walk(path, followlinks=True)
for file in files
)
return filter(os.path.isfile, results)
def findall(dir=os.curdir):
"""
Find all files under 'dir' and return the list of full filenames.
Unless dir is '.', return full filenames with dir prepended.
"""
files = _find_all_simple(dir)
if dir == os.curdir:
make_rel = functools.partial(os.path.relpath, start=dir)
files = map(make_rel, files)
return list(files)
class sic(str):
"""Treat this string as-is (https://en.wikipedia.org/wiki/Sic)"""
# Apply monkey patches
monkey.patch_all()
|
[
"steven.smhill@gmail.com"
] |
steven.smhill@gmail.com
|
82a16cd345d6ca544ea367fa613b86c7f22ffdc1
|
afafaa82a058a3ac1d3721039a11e587278bc80b
|
/script/plot_condition_numbers.py
|
b1f2c2ccefdab57544cf7d8851cff68ddbec1b06
|
[
"BSD-3-Clause"
] |
permissive
|
tonymcdaniel/sfepy
|
24ec0b84bd0ee94ac3935ce01a25db5e6574110a
|
b7a70547515c6b0faf642dcc127841b782a51200
|
refs/heads/master
| 2021-01-15T20:13:28.735206
| 2012-07-23T14:33:32
| 2012-07-23T15:17:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,961
|
py
|
#!/usr/bin/env python
"""
Plot conditions numbers w.r.t. polynomial approximation order of reference
element matrices for various FE polynomial spaces (bases).
"""
from optparse import OptionParser
import time
import numpy as nm
import matplotlib.pyplot as plt
from sfepy import data_dir
from sfepy.base.base import output, assert_
from sfepy.fem import Mesh, Domain, Field, FieldVariable, Material, Integral
from sfepy.terms import Term
from sfepy.solvers import eig
usage = '%prog [options]\n' + __doc__.rstrip()
help = {
'basis' :
'name of the FE basis [default: %default]',
'max_order' :
'maximum order of polynomials [default: %default]',
'matrix_type' :
'matrix type, one of "elasticity", "laplace" [default: %default]',
'geometry' :
'reference element geometry, one of "2_3", "2_4", "3_4", "3_8"'
' [default: %default]',
}
def main():
parser = OptionParser(usage=usage, version='%prog')
parser.add_option('-b', '--basis', metavar='name',
action='store', dest='basis',
default='lagrange', help=help['basis'])
parser.add_option('-n', '--max-order', metavar='order', type=int,
action='store', dest='max_order',
default=10, help=help['max_order'])
parser.add_option('-m', '--matrix', metavar='type',
action='store', dest='matrix_type',
default='laplace', help=help['matrix_type'])
parser.add_option('-g', '--geometry', metavar='name',
action='store', dest='geometry',
default='2_4', help=help['geometry'])
options, args = parser.parse_args()
dim, n_ep = int(options.geometry[0]), int(options.geometry[2])
output('reference element geometry:')
output(' dimension: %d, vertices: %d' % (dim, n_ep))
n_c = {'laplace' : 1, 'elasticity' : dim}[options.matrix_type]
output('matrix type:', options.matrix_type)
output('number of variable components:', n_c)
output('polynomial space:', options.basis)
output('max. order:', options.max_order)
mesh = Mesh.from_file(data_dir + '/meshes/elements/%s_1.mesh'
% options.geometry)
domain = Domain('domain', mesh)
omega = domain.create_region('Omega', 'all')
orders = nm.arange(1, options.max_order + 1, dtype=nm.int)
conds = []
order_fix = 0 if options.geometry in ['2_4', '3_8'] else 1
for order in orders:
output('order:', order, '...')
field = Field('fu', nm.float64, n_c, omega,
space='H1', poly_space_base=options.basis,
approx_order=order)
to = field.approx_order
quad_order = 2 * (max(to - order_fix, 0))
output('quadrature order:', quad_order)
u = FieldVariable('u', 'unknown', field, n_c)
v = FieldVariable('v', 'test', field, n_c, primary_var_name='u')
m = Material('m', lam=1.0, mu=1.0)
integral = Integral('i', order=quad_order)
if options.matrix_type == 'laplace':
term = Term.new('dw_laplace(m.mu, v, u)',
integral, omega, m=m, v=v, u=u)
n_zero = 1
else:
assert_(options.matrix_type == 'elasticity')
term = Term.new('dw_lin_elastic_iso(m.lam, m.mu, v, u)',
integral, omega, m=m, v=v, u=u)
n_zero = (dim + 1) * dim / 2
term.setup()
output('assembling...')
tt = time.clock()
mtx, iels = term.evaluate(mode='weak', diff_var='u')
output('...done in %.2f s' % (time.clock() - tt))
mtx = mtx[0][0, 0]
try:
assert_(nm.max(nm.abs(mtx - mtx.T)) < 1e-10)
except:
from sfepy.base.base import debug; debug()
output('matrix shape:', mtx.shape)
eigs = eig(mtx, method='eig.sgscipy', eigenvectors=False)
eigs.sort()
# Zero 'true' zeros.
eigs[:n_zero] = 0.0
ii = nm.where(eigs < 0.0)[0]
if len(ii):
output('matrix is not positive semi-definite!')
ii = nm.where(eigs[n_zero:] < 1e-12)[0]
if len(ii):
output('matrix has more than %d zero eigenvalues!' % n_zero)
output('smallest eigs:\n', eigs[:10])
ii = nm.where(eigs > 0.0)[0]
emin, emax = eigs[ii[[0, -1]]]
output('min:', emin, 'max:', emax)
cond = emax / emin
conds.append(cond)
output('condition number:', cond)
output('...done')
plt.figure(1)
plt.semilogy(orders, conds)
plt.xticks(orders, orders)
plt.xlabel('polynomial order')
plt.ylabel('condition number')
plt.grid()
plt.figure(2)
plt.loglog(orders, conds)
plt.xticks(orders, orders)
plt.xlabel('polynomial order')
plt.ylabel('condition number')
plt.grid()
plt.show()
if __name__ == '__main__':
main()
|
[
"cimrman3@ntc.zcu.cz"
] |
cimrman3@ntc.zcu.cz
|
5e73ef0d3118c4e024fe986a11cdce3910655b65
|
01b04d980b2746b4d4db1c2be1a263f77e2a7596
|
/liangsongyou.blog/blog/views.py
|
95f46d736ea14fb65f41de2b0e1d859dea64a6e2
|
[] |
no_license
|
liangsongyou/quarkblob
|
e9763efefe91f30b6da278ca6787564770cef4ec
|
5d926ab40881a5f499734bfcbcb083d8bbb5e03e
|
refs/heads/master
| 2022-11-26T17:30:47.276314
| 2018-11-28T09:47:54
| 2018-11-28T09:47:54
| 155,494,671
| 0
| 0
| null | 2022-11-22T03:07:32
| 2018-10-31T03:42:41
|
Python
|
UTF-8
|
Python
| false
| false
| 1,505
|
py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import permission_required
from blog.models import Post
from blog.forms import PostForm
def post(request, slug=None):
item = get_object_or_404(Post, slug=slug)
return render(request, 'blog/post.html', {'item':item,'title':item,})
@permission_required('blog.add_post')
def add_post(request):
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
item = form.save(commit=False)
item.author = request.user
item.save()
form.save_m2m()
return redirect(item.get_absolute_url())
else:
form = PostForm()
return render(request, 'blog/post_form.html', {'form':form,
'title':'Add Post',})
@permission_required('blog.edit_post')
def edit_post(request, pk=None):
item = get_object_or_404(Post, pk=pk)
if request.method == 'POST':
form = PostForm(request.POST, request.FILES, instance=item)
if form.is_valid():
form.save()
return redirect(item.get_absolute_url())
else:
form = PostForm(instance=item)
title = 'Eidt: %s' % item
return render(request, 'blog/post_form.html', {'form':form,
'item':item,
'title':title,})
|
[
"yuebei58@gmail.com"
] |
yuebei58@gmail.com
|
203d9a37000a582dcdc625710f4e7bbb0c159639
|
78f65f6c8be381773cc847c93da4b28eb4eeefae
|
/fastmri/models/__init__.py
|
d1d79c2627e2cc480fe53930fa89a0e984117d8d
|
[
"MIT"
] |
permissive
|
soumickmj/fastMRI
|
af7bc3c654eda93905e19c24ab40dd255eb6c128
|
2056879fd9444c14599447af38ba0507f1222901
|
refs/heads/master
| 2022-11-29T22:32:26.152484
| 2022-03-09T20:50:02
| 2022-03-09T20:50:02
| 214,513,364
| 1
| 0
|
MIT
| 2022-11-08T08:29:57
| 2019-10-11T19:22:54
|
Python
|
UTF-8
|
Python
| false
| false
| 270
|
py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from .unet import Unet
from .varnet import NormUnet, SensitivityModel, VarNet, VarNetBlock
|
[
"matt.muckley@gmail.com"
] |
matt.muckley@gmail.com
|
b789dcc8c2c8b5c5cc7429535c32875a9f690efc
|
8cfee59143ecd307fe7d7a27986c3346aa8ce60c
|
/AI/1. Machine Learning/163_mnist-tocsv.py
|
cf18d04e18f98b2720fada6f34a867fd43f3f5a4
|
[] |
no_license
|
kiminhan/Python
|
daafc1fde804f172ebfb1385ab9d6205c7a45970
|
dc6af486aaf7d25dbe13bcee4e115207f37d4696
|
refs/heads/master
| 2020-03-08T19:18:10.173346
| 2018-09-06T06:11:40
| 2018-09-06T06:11:40
| 128,288,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,346
|
py
|
import struct
def to_csv(name, maxdata):
# 레이블 파일과 이미지 파일 열기
lbl_f = open("./mnist_/"+name+"-labels-idx1-ubyte", "rb")
img_f = open("./mnist_/"+name+"-images-idx3-ubyte", "rb")
csv_f = open("./mnist_/"+name+".csv", "w", encoding="utf-8")
# 헤더 정보 읽기 --- (※1)
mag, lbl_count = struct.unpack(">II", lbl_f.read(8))
mag, img_count = struct.unpack(">II", img_f.read(8))
rows, cols = struct.unpack(">II", img_f.read(8))
pixels = rows * cols
# 이미지 데이터를 읽고 CSV로 저장하기 --- (※2)
res = []
for idx in range(lbl_count):
if idx > maxdata: break
label = struct.unpack("B", lbl_f.read(1))[0]
bdata = img_f.read(pixels)
sdata = list(map(lambda n: str(n), bdata))
csv_f.write(str(label)+",")
csv_f.write(",".join(sdata)+"\r\n")
# 잘 저장됐는지 이미지 파일로 저장해서 테스트하기 -- (※3)
if idx < 10:
s = "P2 28 28 255\n"
s += " ".join(sdata)
iname = "./mnist_/{0}-{1}-{2}.pgm".format(name,idx,label)
with open(iname, "w", encoding="utf-8") as f:
f.write(s)
csv_f.close()
lbl_f.close()
img_f.close()
# 결과를 파일로 출력하기 --- (※4)
to_csv("train", 1000)
to_csv("t10k", 500)
|
[
"rladlsgks4@naver.com"
] |
rladlsgks4@naver.com
|
bc03d8274188df69eac85d025d78dbfa59a16efd
|
42321745dbc33fcf01717534f5bf7581f2dc9b3a
|
/lab/jax/linear_algebra.py
|
618778d388a9415d7318fdcb5ef3dd6f36ac76e4
|
[
"MIT"
] |
permissive
|
talayCh/lab
|
0a34b99fd60bc65fdfd1ead602d94dfb6b96f846
|
4ce49b68782a1ef8390b14ee61f57eeaa13070cf
|
refs/heads/master
| 2023-08-25T04:42:06.904800
| 2021-11-01T18:22:00
| 2021-11-01T18:22:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,756
|
py
|
import logging
from typing import Union, Optional
import jax.numpy as jnp
import jax.scipy.linalg as jsla
from . import dispatch, B, Numeric
from .custom import jax_register
from ..custom import (
toeplitz_solve,
i_toeplitz_solve,
s_toeplitz_solve,
i_s_toeplitz_solve,
expm,
i_expm,
s_expm,
i_s_expm,
logm,
i_logm,
s_logm,
i_s_logm,
)
from ..linear_algebra import _default_perm
from ..types import Int
from ..util import batch_computation
__all__ = []
log = logging.getLogger(__name__)
@dispatch
def matmul(a: Numeric, b: Numeric, tr_a: bool = False, tr_b: bool = False):
a = transpose(a) if tr_a else a
b = transpose(b) if tr_b else b
return jnp.matmul(a, b)
@dispatch
def transpose(a: Numeric, perm: Optional[Union[tuple, list]] = None):
# Correctly handle special cases.
rank_a = B.rank(a)
if rank_a == 0:
return a
elif rank_a == 1 and perm is None:
return a[None, :]
if perm is None:
perm = _default_perm(a)
return jnp.transpose(a, axes=perm)
@dispatch
def trace(a: Numeric, axis1: Int = -2, axis2: Int = -1):
return jnp.trace(a, axis1=axis1, axis2=axis2)
@dispatch
def svd(a: Numeric, compute_uv: bool = True):
res = jnp.linalg.svd(a, full_matrices=False, compute_uv=compute_uv)
return (res[0], res[1], jnp.conj(transpose(res[2]))) if compute_uv else res
@dispatch
def eig(a: Numeric, compute_eigvecs: bool = True):
vals, vecs = jnp.linalg.eig(a)
return (vals, vecs) if compute_eigvecs else vals
@dispatch
def solve(a: Numeric, b: Numeric):
return jnp.linalg.solve(a, b)
@dispatch
def inv(a: Numeric):
return jnp.linalg.inv(a)
@dispatch
def det(a: Numeric):
return jnp.linalg.det(a)
@dispatch
def logdet(a: Numeric):
return jnp.linalg.slogdet(a)[1]
_expm = jax_register(expm, i_expm, s_expm, i_s_expm)
@dispatch
def expm(a: Numeric):
return _expm(a)
_logm = jax_register(logm, i_logm, s_logm, i_s_logm)
@dispatch
def logm(a: Numeric):
return _logm(a)
@dispatch
def _cholesky(a: Numeric):
return jnp.linalg.cholesky(a)
@dispatch
def cholesky_solve(a: Numeric, b: Numeric):
return triangular_solve(transpose(a), triangular_solve(a, b), lower_a=False)
@dispatch
def triangular_solve(a: Numeric, b: Numeric, lower_a: bool = True):
def _triangular_solve(a_, b_):
return jsla.solve_triangular(
a_, b_, trans="N", lower=lower_a, check_finite=False
)
return batch_computation(_triangular_solve, (a, b), (2, 2))
_toeplitz_solve = jax_register(
toeplitz_solve, i_toeplitz_solve, s_toeplitz_solve, i_s_toeplitz_solve
)
@dispatch
def toeplitz_solve(a: Numeric, b: Numeric, c: Numeric):
return _toeplitz_solve(a, b, c)
|
[
"wessel.p.bruinsma@gmail.com"
] |
wessel.p.bruinsma@gmail.com
|
35da58bdb8be02fba0f38d7f0bb56498199a2c1a
|
b090cb9bc30ac595675d8aa253fde95aef2ce5ea
|
/trunk/test/NightlyRun/test304.py
|
73f9108132ad2bddc032b4278bf438f74d72234c
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
eyhl/issm
|
5ae1500715c258d7988e2ef344c5c1fd15be55f7
|
1013e74c28ed663ebb8c9d398d9be0964d002667
|
refs/heads/master
| 2022-01-05T14:31:23.235538
| 2019-01-15T13:13:08
| 2019-01-15T13:13:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 837
|
py
|
#Test Name: SquareSheetConstrainedStressSSA3d
from model import *
from socket import gethostname
from triangle import *
from setmask import *
from parameterize import *
from setflowequation import *
from solve import *
md=triangle(model(),'../Exp/Square.exp',180000.)
md=setmask(md,'','')
md=parameterize(md,'../Par/SquareSheetConstrained.py')
md.extrude(3,2.)
md=setflowequation(md,'SSA','all')
md.cluster=generic('name',gethostname(),'np',3)
md=solve(md,'Stressbalance')
#Fields and tolerances to track changes
field_names =['Vx','Vy','Vz','Vel','Pressure']
field_tolerances=[1e-13,1e-13,1e-13,1e-13,1e-13]
field_values=[\
md.results.StressbalanceSolution.Vx,\
md.results.StressbalanceSolution.Vy,\
md.results.StressbalanceSolution.Vz,\
md.results.StressbalanceSolution.Vel,\
md.results.StressbalanceSolution.Pressure,\
]
|
[
"cummings.evan@gmail.com"
] |
cummings.evan@gmail.com
|
b8137ddbd4d31ee1e675044996c2784fc45b202a
|
28c1c3afaf5e70c0530b864ead16fa8762ef1ca4
|
/ch05_Array/list_size.py
|
78660ba7e14c0f9ebf85b4bc2b7a1d1726f1190f
|
[] |
no_license
|
luoshao23/Data_Structure_and_Algorithm_in_Python
|
8059381c21580e3e4f1276089b9fe4f96de385f8
|
051754963ca2eb818b981ba72583314a043e5df4
|
refs/heads/master
| 2020-04-29T06:29:02.148886
| 2019-05-15T02:46:48
| 2019-05-15T02:46:48
| 175,917,337
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 181
|
py
|
import sys
data = []
n = 27
for k in range(n):
a = len(data)
b = sys.getsizeof(data)
print('Length: {0:3d}; Size in bytes: {1:4d}'.format(a, b))
data.append(None)
|
[
"luoshao23@gmail.com"
] |
luoshao23@gmail.com
|
816e04e5d69c642ba2a24942f2af7ce25030a1a5
|
8c9402d753e36d39e0bef431c503cf3557b7e777
|
/Sarsa_lambda_learning/main.py
|
e198580483ff22b2a9cc4c9141037276d21998a9
|
[] |
no_license
|
HuichuanLI/play_with_deep_reinforcement_learning
|
9477e925f6ade81f885fb3f3b526485f49423611
|
df2368868ae9489aff1be4ef0c6de057f094ef56
|
refs/heads/main
| 2023-07-08T04:52:38.167831
| 2021-08-21T14:05:36
| 2021-08-21T14:05:36
| 395,042,978
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,418
|
py
|
# -*- coding:utf-8 -*-
# @Time : 2021/8/17 10:59 下午
# @Author : huichuan LI
# @File : main.py
# @Software: PyCharm
from maze import Maze
from Sara_Lambda import SarsaLambdaTable
def update():
for episode in range(100):
# initial observation
observation = env.reset()
# RL choose action based on observation
action = RL.choose_action(str(observation))
while True:
# fresh env
env.render()
# RL take action and get next observation and reward
# 和Q_learning 一样
observation_, reward, done = env.step(action)
# RL choose action based on next observation
# 直接通过状态选择下一步
action_ = RL.choose_action(str(observation_))
# RL learn from this transition (s, a, r, s, a) ==> Sarsa
# 直接更新action_对应的哪一步
RL.learn(str(observation), action, reward, str(observation_), action_)
# swap observation and action
observation = observation_
action = action_
# break while loop when end of this episode
if done:
break
# end of game
print('game over')
env.destroy()
if __name__ == "__main__":
env = Maze()
RL = SarsaLambdaTable(actions=list(range(env.n_actions)))
env.after(100, update)
env.mainloop()
|
[
"lhc14124908@163.com"
] |
lhc14124908@163.com
|
2c67af0e6a0e47698557d1c16075616c11e7da42
|
1ec59e88299c7af9df3854188736b706e89e01fa
|
/app/forms/public/profile_forms.py
|
1f3f68864842f4660895d45a56b96a51956c26cd
|
[] |
no_license
|
Chenger1/NutCompany_FlaskApp
|
7484b04721766b42f9cc909d11c3e942bf3b3371
|
c51129e04f2c9e35263d9e28810b4c2862932ef6
|
refs/heads/master
| 2023-08-06T09:08:27.532820
| 2021-09-23T19:52:25
| 2021-09-23T19:52:25
| 405,457,276
| 0
| 0
| null | 2021-09-12T10:55:47
| 2021-09-11T18:44:35
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,241
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, SelectField
from wtforms.validators import Email, Optional
from ..custom_field import CustomFileField
from app._db.choices import CountryChoice
class ClientPersonalInfoForm(FlaskForm):
fio = StringField('ФИО', validators=[Optional()])
email = StringField('Email', validators=[Optional(), Email()])
phone = StringField('Телефон', validators=[Optional()])
company = StringField('Компания', validators=[Optional()])
photo = CustomFileField('Фото', validators=[Optional()])
class ClientProfileAddressForm(FlaskForm):
country = SelectField('Страна', choices=CountryChoice.choices(), coerce=CountryChoice.coerce)
city = StringField('Город', validators=[Optional()])
address = StringField('Адрес', validators=[Optional()])
country_ur = SelectField('Страна', choices=CountryChoice.choices(), coerce=CountryChoice.coerce)
city_ur = StringField('Город', validators=[Optional()])
address_ur = StringField('Адрес', validators=[Optional()])
index = StringField('Индекс', validators=[Optional()])
credentials = StringField('Реквизиты', validators=[Optional()])
|
[
"exs2199@gmail.com"
] |
exs2199@gmail.com
|
cb1b09b13545f6e89fee158e5b5e37ee7d392d73
|
59366342805d7b7682a8c45fd5c11b910e791c21
|
/L8包/package/pack1/py1.py
|
b0fd52ca063138c053548e40274a039e81ea139e
|
[] |
no_license
|
wantwantwant/tutorial
|
dad006b5c9172b57c53f19d8229716f1dec5ccd1
|
8d400711ac48212e6992cfd187ee4bfb3642f637
|
refs/heads/master
| 2022-12-29T05:41:12.485718
| 2019-01-07T08:28:33
| 2019-01-07T08:28:33
| 171,679,026
| 2
| 0
| null | 2022-12-08T01:21:22
| 2019-02-20T13:33:42
|
Python
|
UTF-8
|
Python
| false
| false
| 214
|
py
|
def foo():
# 假设代表一些逻辑处理
print('foo')
def boo():
print('boo')
# 单脚本的时候,调用方法
foo()
boo()
print(__name__)
#
# if __name__ =='__main__':
# foo()
# boo()
|
[
"778042395@qq.com"
] |
778042395@qq.com
|
a99c2a5837c537a407dd87963f6047684fc42131
|
60b52f75e2b0712738d5ad2f9c2113e4d8016c1e
|
/Chapter01/Logistic regression model building/logistic.py
|
9173bc687a2a76562df6ba94ab599b0b78764c5a
|
[
"MIT"
] |
permissive
|
PacktPublishing/Hands-On-Deep-Learning-with-TensorFlow
|
b63b40140882762841403467f9255612972f7ec7
|
c81fdc1edf8f2275ea76a9900c92e7fae0ddf6ed
|
refs/heads/master
| 2023-01-24T19:44:40.191675
| 2023-01-24T11:07:02
| 2023-01-24T11:07:02
| 100,028,897
| 96
| 77
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,308
|
py
|
import tensorflow as tf
import numpy as np
%autoindent
try:
from tqdm import tqdm
except ImportError:
def tqdm(x, *args, **kwargs):
return x
# Set random seed
np.random.seed(0)
# Load data
data = np.load('data_with_labels.npz')
train = data['arr_0']/255.
labels = data['arr_1']
# Look at some data
print(train[0])
print(labels[0])
# If you have matplotlib installed
import matplotlib.pyplot as plt
plt.ion()
# Let's look at a subplot of one of A in each font
f, plts = plt.subplots(5, sharex=True)
c = 91
for i in range(5):
plts[i].pcolor(train[c + i * 558],
cmap=plt.cm.gray_r)
def to_onehot(labels,nclasses = 5):
'''
Convert labels to "one-hot" format.
>>> a = [0,1,2,3]
>>> to_onehot(a,5)
array([[ 1., 0., 0., 0., 0.],
[ 0., 1., 0., 0., 0.],
[ 0., 0., 1., 0., 0.],
[ 0., 0., 0., 1., 0.]])
'''
outlabels = np.zeros((len(labels),nclasses))
for i,l in enumerate(labels):
outlabels[i,l] = 1
return outlabels
onehot = to_onehot(labels)
# Split data into training and validation
indices = np.random.permutation(train.shape[0])
valid_cnt = int(train.shape[0] * 0.1)
test_idx, training_idx = indices[:valid_cnt],\
indices[valid_cnt:]
test, train = train[test_idx,:],\
train[training_idx,:]
onehot_test, onehot_train = onehot[test_idx,:],\
onehot[training_idx,:]
sess = tf.InteractiveSession()
# These will be inputs
## Input pixels, flattened
x = tf.placeholder("float", [None, 1296])
## Known labels
y_ = tf.placeholder("float", [None,5])
# Variables
W = tf.Variable(tf.zeros([1296,5]))
b = tf.Variable(tf.zeros([5]))
# Just initialize
sess.run(tf.global_variables_initializer())
# Define model
y = tf.nn.softmax(tf.matmul(x,W) + b)
### End model specification, begin training code
# Climb on cross-entropy
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(
logits = y + 1e-50, labels = y_))
# How we train
train_step = tf.train.GradientDescentOptimizer(
0.02).minimize(cross_entropy)
# Define accuracy
correct_prediction = tf.equal(tf.argmax(y,1),
tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(
correct_prediction, "float"))
# Actually train
epochs = 1000
train_acc = np.zeros(epochs//10)
test_acc = np.zeros(epochs//10)
for i in tqdm(range(epochs)):
# Record summary data, and the accuracy
if i % 10 == 0:
# Check accuracy on train set
A = accuracy.eval(feed_dict={
x: train.reshape([-1,1296]),
y_: onehot_train})
train_acc[i//10] = A
# And now the validation set
A = accuracy.eval(feed_dict={
x: test.reshape([-1,1296]),
y_: onehot_test})
test_acc[i//10] = A
train_step.run(feed_dict={
x: train.reshape([-1,1296]),
y_: onehot_train})
# Notice that accuracy flattens out
print(train_acc[-1])
print(test_acc[-1])
# Plot the accuracy curves
plt.figure(figsize=(6,6))
plt.plot(train_acc,'bo')
plt.plot(test_acc,'rx')
# Look at a subplot of the weights for each font
f, plts = plt.subplots(5, sharex=True)
for i in range(5):
plts[i].pcolor(W.eval()[:,i].reshape([36,36]))
|
[
"noreply@github.com"
] |
PacktPublishing.noreply@github.com
|
123d18a02f05d17059d952a8169d5b7d13b2133e
|
61bd4a9dfd606b3c9efd52f23848b7329b18a909
|
/Pythonscripts/run_predictions.py
|
071dc31901d1c69daae74de43dde6e21c174c466
|
[] |
no_license
|
philmcc/aistocks
|
e9e85dc65e5439793cc5caa4d851a9149ff762a1
|
0706ce7d63db271ee807cc1f6dba8cd178223612
|
refs/heads/master
| 2021-01-10T05:36:33.736881
| 2016-09-06T13:53:03
| 2016-09-06T13:53:03
| 46,048,154
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,673
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import MySQLdb as mdb
from pyfann import libfann
from datetime import date
from network_functions import save_prediction
mydate = date.today()
con = None
con = mdb.connect('localhost', 'root',
'fil1202job', 'stock');
with con:
cur = con.cursor(mdb.cursors.DictCursor)
cur1 = con.cursor()
cur2 = con.cursor()
#
# Get a list of all networks
#
cur.execute("SELECT a.id, a.group, b.ticker, b.predict_data, a.net_file FROM `network`.`network` a, network.net_group b where a.group = b.id;")
rows = cur.fetchall()
for row in rows:
#
# For each network get the training data - only most recent data at the moment
#
#seldate = "select latest_prediction from network.network where id = " + str(row["id"])
#cur2.execute(seldate)
#latestdate = cur2.fetchone()
#latestdate1 = latestdate[0]
#print latestdate1
cur1.execute(row["predict_data"])
for row1 in cur1.fetchall():
#
# Extract Date
#
mydate = row1[(len(row1) - 1)]
row1b = list(row1)
del row1b[(len(row1b) - 1)]
#
# Set up network
#
ann = libfann.neural_net()
ann.create_from_file(row["net_file"])
#
# Run Prediction
#
print ann.run(row1b)
prediction = ann.run(row1b)
prediction = str(prediction).translate(None, '[]')
#
# Store results in db - Function
#
save_prediction(row["id"], mydate, prediction)
|
[
"pmcclarence@iparadigms.com"
] |
pmcclarence@iparadigms.com
|
0f5ed518db714ea344380b6429275fec41ee5e98
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/chrome/test/webapps/graph_analysis_unittest.py
|
8c279f8cf4de227a48180ac060fca8eb86fd07b9
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 4,714
|
py
|
#!/usr/bin/env python3
# Copyright 2021 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import csv
from file_reading import read_actions_file, read_enums_file, read_platform_supported_actions, read_unprocessed_coverage_tests_file
from test_analysis import expand_parameterized_tests, filter_coverage_tests_for_platform, partition_framework_tests_per_platform_combination
from graph_analysis import build_action_node_graph, generate_framework_tests, trim_graph_to_platform_actions
import os
import unittest
from models import ActionNode, CoverageTestsByPlatform, CoverageTestsByPlatformSet, TestPartitionDescription
from models import TestPlatform
TEST_DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"test_data")
class GraphAnalysisUnittest(unittest.TestCase):
def test_test_generation(self):
self.maxDiff = None
actions_filename = os.path.join(TEST_DATA_DIR, "test_actions.md")
enums_filename = os.path.join(TEST_DATA_DIR, "test_enums.md")
supported_actions_filename = os.path.join(
TEST_DATA_DIR, "framework_supported_actions.csv")
coverage_filename = os.path.join(TEST_DATA_DIR,
"test_unprocessed_coverage.md")
test_partition = TestPartitionDescription(
action_name_prefixes=set(),
browsertest_dir=os.path.join(TEST_DATA_DIR, "expected_test_txt"),
test_file_prefix="tests_default",
test_fixture="TestName")
with open(actions_filename, "r", encoding="utf-8") as actions_file, \
open(supported_actions_filename, "r", encoding="utf-8") \
as supported_actions_file, \
open (enums_filename, "r", encoding="utf-8") as enums, \
open(coverage_filename, "r", encoding="utf-8") \
as coverage_file:
enums = read_enums_file(enums.readlines())
platform_supported_actions = read_platform_supported_actions(
csv.reader(supported_actions_file, delimiter=','))
(actions, action_base_name_to_default_param) = read_actions_file(
actions_file.readlines(), enums, platform_supported_actions)
required_coverage_tests = read_unprocessed_coverage_tests_file(
coverage_file.readlines(), actions, enums,
action_base_name_to_default_param)
required_coverage_tests = expand_parameterized_tests(
required_coverage_tests)
required_coverage_by_platform: CoverageTestsByPlatform = {}
generated_tests_by_platform: CoverageTestsByPlatform = {}
for platform in TestPlatform:
platform_tests = filter_coverage_tests_for_platform(
required_coverage_tests.copy(), platform)
required_coverage_by_platform[platform] = platform_tests
generated_tests_root_node = ActionNode.CreateRootNode()
build_action_node_graph(generated_tests_root_node,
platform_tests)
trim_graph_to_platform_actions(generated_tests_root_node,
platform)
generated_tests_by_platform[
platform] = generate_framework_tests(
generated_tests_root_node, platform)
required_coverage_by_platform_set: CoverageTestsByPlatformSet = (
partition_framework_tests_per_platform_combination(
generated_tests_by_platform))
for platform_set, tests in required_coverage_by_platform_set.items(
):
expected_filename = os.path.join(
test_partition.browsertest_dir,
test_partition.test_file_prefix)
if len(platform_set) != len(TestPlatform):
for platform in TestPlatform:
if platform in platform_set:
expected_filename += "_" + platform.suffix
expected_filename += ".txt"
with open(expected_filename, "r",
encoding="utf-8") as expected_tests_file:
expected_tests_str = expected_tests_file.read()
actual_tests_str = "\n".join([
test.generate_browsertest(test_partition)
for test in tests
])
self.assertEqual(expected_tests_str, actual_tests_str)
if __name__ == '__main__':
unittest.main()
|
[
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] |
chromium-scoped@luci-project-accounts.iam.gserviceaccount.com
|
ce7933230d5bc50519059d8bf563e142cacd0f9d
|
4f1218079f90a65befbf658679721886d71f4ee8
|
/python/hackerrank/birthdaychocolate.py
|
ef225e1789e29cfa011f85c8ddf433ee3d17c0b9
|
[] |
no_license
|
Escaity/Library
|
9f57767617422a7930caf48718d18f7ebef81547
|
b34d8600e0a65845f1b3a16eb4b98fc7087a3160
|
refs/heads/master
| 2022-07-29T16:18:33.073738
| 2022-07-17T10:25:22
| 2022-07-17T10:25:22
| 238,588,249
| 0
| 0
| null | 2021-08-17T03:02:34
| 2020-02-06T02:04:08
|
Python
|
UTF-8
|
Python
| false
| false
| 213
|
py
|
def birthday(s, d, m):
n = len(s)
cnt = 0
for i in range(n - m + 1):
bar = 0
for j in range(i, i + m):
bar += s[j]
if bar == d:
cnt += 1
return cnt
|
[
"esk2306@gmail.com"
] |
esk2306@gmail.com
|
e9e335201ab716e0b4e0c4dd41ecd24d930e054d
|
b7eb41b068614e04f38a969326f43d8f8119cb05
|
/74_search_a_2d_matrix.py
|
ca546b4123fa5936447cab9c7edc0057dcffd1b4
|
[] |
no_license
|
YI-DING/daily-leetcode
|
ddfb6985bf5014886cba8d6219da243e0aa28d71
|
a6d3898d900f2063302dc1ffc3dafd61eefa79b7
|
refs/heads/master
| 2020-05-19T06:07:21.557077
| 2019-07-19T16:31:46
| 2019-07-19T16:31:46
| 184,866,366
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
class Solution:
def searchMatrix(self, matrix: List[List[int]], target: int):
if not matrix or not matrix[0]:
return False
start, end = 0, len(matrix)-1
while start+1 < end:
mid = (start+end)//2
if matrix[mid][0] > target:
end = mid
else:
start = mid
if matrix[end][0] > target:
row, start, end = start, 0, len(matrix[0])-1
else:
row, start, end = end, 0, len(matrix[0])-1
while start+1 < end:
mid = (start+end)//2
if matrix[row][mid] > target:
end = mid
else:
start = mid
if matrix[row][start] == target:
return True
elif matrix[row][end] == target:
return True
return False
#this method uses BFS twice, first among rows then among cols
#however you could see it as len(m*n) and do binary search for only once
|
[
"yiding1@uchicago.edu"
] |
yiding1@uchicago.edu
|
f7a23f0389fe8115da3ae140207cef638d3ed979
|
cb3634622480f918540ff3ff38c96990a1926fda
|
/PyProject/leetcode/history/symmetric-tree—2.py
|
6a7f516c3065f6a3a5169f67922957b4efac8b15
|
[] |
no_license
|
jacksonyoudi/AlgorithmCode
|
cab2e13cd148354dd50a0487667d38c25bb1fd9b
|
216299d43ee3d179c11d8ca0783ae16e2f6d7c88
|
refs/heads/master
| 2023-04-28T07:38:07.423138
| 2022-10-23T12:45:01
| 2022-10-23T12:45:01
| 248,993,623
| 3
| 0
| null | 2023-04-21T20:44:40
| 2020-03-21T14:32:15
|
Go
|
UTF-8
|
Python
| false
| false
| 725
|
py
|
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def isSymmetric(self, root):
if root is None:
return True
else:
return self.isMirror(root.left, root.right)
def isMirror(self, left, right):
if left is None and right is None:
return True
if left is None or right is None:
return False
if left.val == right.val:
outPair = self.isMirror(left.left, right.right)
inPiar = self.isMirror(left.right, right.left)
return outPair and inPiar
else:
return False
|
[
"liangchangyoujackson@gmail.com"
] |
liangchangyoujackson@gmail.com
|
2fc6c3ca11a0533b9e305d1c97100d5ac134da5a
|
7044043460c74a9c1c9d386bdeccb87289362f76
|
/mysite/urls.py
|
7794602ec06995938b9e62a0ce60bf93ca078cb7
|
[] |
no_license
|
KIMJONGIK/mysite
|
6630682eca869b5122597baf2e2f59dd0b40869a
|
84b908ea75602c7ca801eafb7dd975aadf70593b
|
refs/heads/master
| 2022-12-09T14:33:38.741339
| 2020-09-16T11:48:53
| 2020-09-16T11:48:53
| 293,227,641
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,811
|
py
|
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
import main.views as mainviews
import guestbook.views as guestbookviews
import user.views as userviews
import board.views as boardviews
urlpatterns = [
path('main/', mainviews.index),
path('guestbook/', guestbookviews.index),
path('guestbook/add', guestbookviews.add),
path('guestbook/deleteform', guestbookviews.deleteform),
path('guestbook/delete', guestbookviews.delete),
path('user/joinform', userviews.joinform),
path('user/joinsuccess', userviews.joinsuccess),
path('user/join', userviews.join),
path('user/loginform', userviews.loginform),
path('user/login', userviews.login),
path('user/logout', userviews.logout),
path('user/updateform', userviews.updateform),
path('user/update', userviews.update),
path('board/', boardviews.index),
path('board/write', boardviews.write),
path('board/register', boardviews.register),
path('board/view', boardviews.view),
path('board/delete', boardviews.delete),
path('board/modifyform', boardviews.modifyform),
path('board/modify', boardviews.modify),
path('admin/', admin.site.urls),
]
|
[
"kji089@naver.com"
] |
kji089@naver.com
|
d5676fa17de1d686869f532cf7410e0555426ced
|
a75e7f434271f1ce4bc9e89f6cc10126aa1947e7
|
/test/__main__.py
|
b6661dcb01917492dc29fa3c377d63eb7fd7c385
|
[] |
no_license
|
smutel/pylib
|
53f0918ef897d5df5e2ecb7a6b0179bdd3647843
|
463873a0f9ff2052f740be632dde746be6e3b19b
|
refs/heads/master
| 2020-06-15T16:26:16.476496
| 2016-11-25T14:15:44
| 2016-11-25T14:15:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,637
|
py
|
#!/usr/bin/env python
# vim:ts=4:sts=4:sw=4:et
#
# Author: Hari Sekhon
# Date: 2015-11-14 12:21:54 +0000 (Sat, 14 Nov 2015)
#
# https://github.com/harisekhon/pylib
#
# License: see accompanying Hari Sekhon LICENSE file
#
# If you're using my code you're welcome to connect with me on LinkedIn and optionally send me feedback to help improve or steer this or other code I publish
#
# http://www.linkedin.com/in/harisekhon
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = 'Hari Sekhon'
__version__ = '0.1'
import glob
import inspect
import os
import subprocess
import sys
## using optparse rather than argparse for servers still on Python 2.6
#from optparse import OptionParser
# libdir = os.path.join(os.path.dirname(inspect.getfile(inspect.currentframe())), '..')
libdir = os.path.join(os.path.dirname(__file__), '..')
# sys.path.append(libdir)
# try:
# from harisekhon.utils import *
# except ImportError, e:
# print('module import failed: %s' % e)
# sys.exit(4)
def main():
print('running unit tests')
# this doesn't allow coverage to follow the code and see what's been covered
# for x in glob.glob(libdir + "/test/test_*.py"):
# if subprocess.call(['python', x]):
# sys.exit(2)
# subprocess.check_call(['python', x])
from test.test_utils import main
main()
from test.test_cli import main
main()
from test.test_nagiosplugin import main
main()
from test.test_threshold import main
main()
if __name__ == '__main__':
main()
|
[
"harisekhon@gmail.com"
] |
harisekhon@gmail.com
|
8fc33e667b9cd3bc3e640188e68f4aa66390f63a
|
6bd4d4845ac3569fb22ce46e6bdd0a8e83dd38b7
|
/fastreid/data/build.py
|
da5b4b0137cd82c4dc9cc869976c914e7c475f7a
|
[] |
no_license
|
wodole/fast-reid
|
a227219acf2606124655d63fa88c0cf3e22f4099
|
9cf222e093b0d37c67d2d95829fdf74097b7fce1
|
refs/heads/master
| 2022-04-15T15:10:07.045423
| 2020-04-08T13:04:09
| 2020-04-08T13:04:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,159
|
py
|
# encoding: utf-8
"""
@author: l1aoxingyu
@contact: sherlockliao01@gmail.com
"""
import logging
import torch
from torch._six import container_abcs, string_classes, int_classes
from torch.utils.data import DataLoader
from . import samplers
from .common import CommDataset, data_prefetcher
from .datasets import DATASET_REGISTRY
from .transforms import build_transforms
def build_reid_train_loader(cfg):
train_transforms = build_transforms(cfg, is_train=True)
logger = logging.getLogger(__name__)
train_items = list()
for d in cfg.DATASETS.NAMES:
logger.info('prepare training set {}'.format(d))
dataset = DATASET_REGISTRY.get(d)()
train_items.extend(dataset.train)
train_set = CommDataset(train_items, train_transforms, relabel=True)
num_workers = cfg.DATALOADER.NUM_WORKERS
batch_size = cfg.SOLVER.IMS_PER_BATCH
num_instance = cfg.DATALOADER.NUM_INSTANCE
if cfg.DATALOADER.PK_SAMPLER:
data_sampler = samplers.RandomIdentitySampler(train_set.img_items, batch_size, num_instance)
else:
data_sampler = samplers.TrainingSampler(len(train_set))
batch_sampler = torch.utils.data.sampler.BatchSampler(data_sampler, batch_size, True)
train_loader = torch.utils.data.DataLoader(
train_set,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=fast_batch_collator,
)
return data_prefetcher(cfg, train_loader)
def build_reid_test_loader(cfg, dataset_name):
test_transforms = build_transforms(cfg, is_train=False)
logger = logging.getLogger(__name__)
logger.info('prepare test set {}'.format(dataset_name))
dataset = DATASET_REGISTRY.get(dataset_name)()
test_items = dataset.query + dataset.gallery
test_set = CommDataset(test_items, test_transforms, relabel=False)
num_workers = cfg.DATALOADER.NUM_WORKERS
batch_size = cfg.TEST.IMS_PER_BATCH
data_sampler = samplers.InferenceSampler(len(test_set))
batch_sampler = torch.utils.data.BatchSampler(data_sampler, batch_size, False)
test_loader = DataLoader(
test_set,
batch_sampler=batch_sampler,
num_workers=num_workers,
collate_fn=fast_batch_collator)
return data_prefetcher(cfg, test_loader), len(dataset.query)
def trivial_batch_collator(batch):
"""
A batch collator that does nothing.
"""
return batch
def fast_batch_collator(batched_inputs):
"""
A simple batch collator for most common reid tasks
"""
elem = batched_inputs[0]
if isinstance(elem, torch.Tensor):
out = torch.zeros((len(batched_inputs), *elem.size()), dtype=elem.dtype)
for i, tensor in enumerate(batched_inputs):
out[i] += tensor
return out
elif isinstance(elem, container_abcs.Mapping):
return {key: fast_batch_collator([d[key] for d in batched_inputs]) for key in elem}
elif isinstance(elem, float):
return torch.tensor(batched_inputs, dtype=torch.float64)
elif isinstance(elem, int_classes):
return torch.tensor(batched_inputs)
elif isinstance(elem, string_classes):
return batched_inputs
|
[
"sherlockliao01@gmail.com"
] |
sherlockliao01@gmail.com
|
bda08bb1e8392fe0495c5b0f7bc2ba3dc882b580
|
8dc84558f0058d90dfc4955e905dab1b22d12c08
|
/third_party/android_ndk/toolchains/llvm/prebuilt/linux-x86_64/tools/scan-view/share/startfile.py
|
673935909f823467ad1dd737788133966d2a00e3
|
[
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"MIT",
"Apache-2.0",
"NCSA",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-arm-llvm-sga",
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
meniossin/src
|
42a95cc6c4a9c71d43d62bc4311224ca1fd61e03
|
44f73f7e76119e5ab415d4593ac66485e65d700a
|
refs/heads/master
| 2022-12-16T20:17:03.747113
| 2020-09-03T10:43:12
| 2020-09-03T10:43:12
| 263,710,168
| 1
| 0
|
BSD-3-Clause
| 2020-05-13T18:20:09
| 2020-05-13T18:20:08
| null |
UTF-8
|
Python
| false
| false
| 6,038
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Utility for opening a file using the default application in a cross-platform
manner. Modified from http://code.activestate.com/recipes/511443/.
"""
__version__ = '1.1x'
__all__ = ['open']
import os
import sys
import webbrowser
import subprocess
_controllers = {}
_open = None
class BaseController(object):
'''Base class for open program controllers.'''
def __init__(self, name):
self.name = name
def open(self, filename):
raise NotImplementedError
class Controller(BaseController):
'''Controller for a generic open program.'''
def __init__(self, *args):
super(Controller, self).__init__(os.path.basename(args[0]))
self.args = list(args)
def _invoke(self, cmdline):
if sys.platform[:3] == 'win':
closefds = False
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
closefds = True
startupinfo = None
if (os.environ.get('DISPLAY') or sys.platform[:3] == 'win' or
sys.platform == 'darwin'):
inout = file(os.devnull, 'r+')
else:
# for TTY programs, we need stdin/out
inout = None
# if possible, put the child precess in separate process group,
# so keyboard interrupts don't affect child precess as well as
# Python
setsid = getattr(os, 'setsid', None)
if not setsid:
setsid = getattr(os, 'setpgrp', None)
pipe = subprocess.Popen(cmdline, stdin=inout, stdout=inout,
stderr=inout, close_fds=closefds,
preexec_fn=setsid, startupinfo=startupinfo)
# It is assumed that this kind of tools (gnome-open, kfmclient,
# exo-open, xdg-open and open for OSX) immediately exit after lauching
# the specific application
returncode = pipe.wait()
if hasattr(self, 'fixreturncode'):
returncode = self.fixreturncode(returncode)
return not returncode
def open(self, filename):
if isinstance(filename, basestring):
cmdline = self.args + [filename]
else:
# assume it is a sequence
cmdline = self.args + filename
try:
return self._invoke(cmdline)
except OSError:
return False
# Platform support for Windows
if sys.platform[:3] == 'win':
class Start(BaseController):
'''Controller for the win32 start progam through os.startfile.'''
def open(self, filename):
try:
os.startfile(filename)
except WindowsError:
# [Error 22] No application is associated with the specified
# file for this operation: '<URL>'
return False
else:
return True
_controllers['windows-default'] = Start('start')
_open = _controllers['windows-default'].open
# Platform support for MacOS
elif sys.platform == 'darwin':
_controllers['open']= Controller('open')
_open = _controllers['open'].open
# Platform support for Unix
else:
import commands
# @WARNING: use the private API of the webbrowser module
from webbrowser import _iscommand
class KfmClient(Controller):
'''Controller for the KDE kfmclient program.'''
def __init__(self, kfmclient='kfmclient'):
super(KfmClient, self).__init__(kfmclient, 'exec')
self.kde_version = self.detect_kde_version()
def detect_kde_version(self):
kde_version = None
try:
info = commands.getoutput('kde-config --version')
for line in info.splitlines():
if line.startswith('KDE'):
kde_version = line.split(':')[-1].strip()
break
except (OSError, RuntimeError):
pass
return kde_version
def fixreturncode(self, returncode):
if returncode is not None and self.kde_version > '3.5.4':
return returncode
else:
return os.EX_OK
def detect_desktop_environment():
'''Checks for known desktop environments
Return the desktop environments name, lowercase (kde, gnome, xfce)
or "generic"
'''
desktop_environment = 'generic'
if os.environ.get('KDE_FULL_SESSION') == 'true':
desktop_environment = 'kde'
elif os.environ.get('GNOME_DESKTOP_SESSION_ID'):
desktop_environment = 'gnome'
else:
try:
info = commands.getoutput('xprop -root _DT_SAVE_MODE')
if ' = "xfce4"' in info:
desktop_environment = 'xfce'
except (OSError, RuntimeError):
pass
return desktop_environment
def register_X_controllers():
if _iscommand('kfmclient'):
_controllers['kde-open'] = KfmClient()
for command in ('gnome-open', 'exo-open', 'xdg-open'):
if _iscommand(command):
_controllers[command] = Controller(command)
def get():
controllers_map = {
'gnome': 'gnome-open',
'kde': 'kde-open',
'xfce': 'exo-open',
}
desktop_environment = detect_desktop_environment()
try:
controller_name = controllers_map[desktop_environment]
return _controllers[controller_name].open
except KeyError:
if _controllers.has_key('xdg-open'):
return _controllers['xdg-open'].open
else:
return webbrowser.open
if os.environ.get("DISPLAY"):
register_X_controllers()
_open = get()
def open(filename):
'''Open a file or an URL in the registered default application.'''
return _open(filename)
|
[
"arnaud@geometry.ee"
] |
arnaud@geometry.ee
|
a269a7226604cf187ef5653174f1c4c263b1f6a7
|
92dd6a174bf90e96895127bb562e3f0a05d6e079
|
/apply dfs and bfs/섬나라 아일랜드.py
|
d24817c5606aba77e667c87bdc35fa782e3a2e65
|
[] |
no_license
|
123qpq/inflearn_python
|
caa4a86d051d76bf5612c57ae9578f1925abc5a9
|
5904cedabea9d5bc4afa3f1f76911dfccce754b5
|
refs/heads/main
| 2023-03-12T05:14:06.162651
| 2021-02-28T14:03:58
| 2021-02-28T14:03:58
| 338,735,340
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 683
|
py
|
from collections import deque
n = int(input())
table = [list(map(int, input().split())) for _ in range(n)]
dx = [-1, -1, 0, 1, 1, 1, 0, -1]
dy = [0, 1, 1, 1, 0, -1, -1, -1]
q = deque()
cnt = 0
for i in range(n):
for j in range(n):
if table[i][j] == 1:
table[i][j] = 0
q.append((i, j))
while q:
now = q.popleft()
for a in range(8):
xx = now[0] + dx[a]
yy = now[1] + dy[a]
if 0 <= xx < n and 0 <= yy < n and table[xx][yy] == 1:
table[xx][yy] = 0
q.append((xx, yy))
cnt += 1
print(cnt)
|
[
"45002168+123qpq@users.noreply.github.com"
] |
45002168+123qpq@users.noreply.github.com
|
34017423ccd92177b7ccc9ac8445d31505fcfc05
|
20aadf6ec9fd64d1d6dffff56b05853e0ab26b1f
|
/problemset3/hangmanPart1.py
|
98e635434a0aee5915adad9d46256d25316d340e
|
[] |
no_license
|
feminas-k/MITx---6.00.1x
|
9a8e81630be784e5aaa890d811674962c66d56eb
|
1ddf24c25220f8b5f78d36e2a3342b6babb40669
|
refs/heads/master
| 2021-01-19T00:59:57.434511
| 2016-06-13T18:13:17
| 2016-06-13T18:13:17
| 61,058,244
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 423
|
py
|
def isWordGuessed(secretWord, lettersGuessed):
'''
secretWord: string, the word the user is guessing
lettersGuessed: list, what letters have been guessed so far
returns: boolean, True if all the letters of secretWord are in lettersGuessed;
False otherwise
'''
# FILL IN YOUR CODE HERE...
for i in secretWord:
if i not in lettersGuessed:
return False
return True
|
[
"femi1991@gmail.com"
] |
femi1991@gmail.com
|
e2305a194758b56976ba2b3d942a874de4f50a80
|
bfe13b5458c5a3b8a212479ad8596934738a83d9
|
/solar/solar_conv1d_1.py
|
b6c23eee1267e5d4790dbb3a0f5d9eff7cae0ab1
|
[] |
no_license
|
sswwd95/Project
|
f32968b6a640dffcfba53df943f0cf48e60d29df
|
fdcf8556b6203a407e5548cb4eda195fb597ad6e
|
refs/heads/master
| 2023-04-21T23:03:24.282518
| 2021-02-15T00:55:16
| 2021-02-15T00:55:16
| 338,989,928
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,872
|
py
|
import pandas as pd
import numpy as np
import os
import glob
import random
import tensorflow.keras.backend as K
import warnings
warnings.filterwarnings('ignore')
train = pd.read_csv('./solar/csv/train.csv')
sub = pd.read_csv('./solar/csv/sample_submission.csv')
# Hour - 시간
# Minute - 분
# DHI - 수평면 산란일사량(Diffuse Horizontal Irradiance (W/m2))
# DNI - 직달일사량(Direct Normal Irradiance (W/m2))
# WS - 풍속(Wind Speed (m/s))
# RH - 상대습도(Relative Humidity (%))
# T - 기온(Temperature (Degree C))
# Target - 태양광 발전량 (kW)
# axis = 0은 행렬에서 행의 원소를 다 더함, 1은 열의 원소를 다 더함
# 1. 데이터
#DHI, DNI 보다 더 직관적인 GHI 열 추가.
def preprocess_data(data, is_train=True):
data['cos'] = np.cos(np.pi/2 - np.abs(data['Hour']%12-6)/6*np.pi/2)
data.insert(1, 'GHI', data['DNI']*data['cos']+data['DHI'])
temp = data.copy()
temp = temp[['Hour','TARGET','GHI','DHI', 'DNI', 'WS', 'RH', 'T']]
if is_train==True:
temp['Target1'] = temp['TARGET'].shift(-48).fillna(method='ffill') # day7
temp['Target2'] = temp['TARGET'].shift(-48*2).fillna(method='ffill') # day8
temp = temp.dropna()
return temp.iloc[:-96] # day8에서 2일치 땡겨서 올라갔기 때문에 마지막 2일 빼주기
elif is_train==False:
temp = temp[['Hour','TARGET','GHI','DHI', 'DNI', 'WS', 'RH', 'T']]
return temp.iloc[-48:,:] # 트레인데이터가 아니면 마지막 하루만 리턴시킴
df_train = preprocess_data(train)
x_train = df_train.to_numpy()
print(x_train)
print(x_train.shape) #(52464, 10) day7,8일 추가해서 컬럼 10개
###### test파일 합치기############
df_test = []
for i in range(81):
file_path = '../solar/test/' + str(i) + '.csv'
temp = pd.read_csv(file_path)
temp = preprocess_data(temp, is_train=False) # 위에서 명시한 False => 마지막 하루만 리턴
df_test.append(temp) # 마지막 하루 값들만 전부 붙여주기
x_test = pd.concat(df_test)
print(x_test.shape) #(3888, 8) -> (81, 48,8) 81일, 하루(24*2(30분단위)=48), 8개 컬럼
x_test = x_test.to_numpy()
##################################
# 정규화 (데이터가 0으로 많이 쏠려있어서 standardscaler 사용)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x_train[:,:-2]) # day7,8일을 빼고 나머지 컬럼들을 train
x_train[:,:-2] = scaler.transform(x_train[:,:-2])
x_test = scaler.transform(x_test)
######## train데이터 분리 ###########
def split_xy(data,timestep):
x, y1, y2 = [],[],[]
for i in range(len(data)):
x_end = i + timestep
if x_end>len(data):
break
tmp_x = data[i:x_end,:-2] # x_train
tmp_y1 = data[x_end-1:x_end,-2] # day7 / x_end-1:x_end => i:x_end와 같은 위치로 맞춰주기
tmp_y2 = data[x_end-1:x_end,-1] # day8
x.append(tmp_x)
y1.append(tmp_y1)
y2.append(tmp_y2)
return(np.array(x), np.array(y1), np.array(y2))
x, y1, y2 = split_xy(x_train,1) # x_train을 한 행씩 자른다. (30분 단위로 보면서 day7,8의 같은 시간대 예측)
print(x.shape) #(52464, 1, 8)
print(y1.shape) #(52464, 1)
print(y2.shape) #(52464, 1)
########## test 데이터를 train 데이터와 같게 분리 ######
def split_x(data, timestep) :
x = []
for i in range(len(data)):
x_end = i + timestep
if x_end>len(data):
break
tmp_x = data[i:x_end]
x.append(tmp_x)
return(np.array(x))
x_test = split_x(x_test,1)
######################################################
from sklearn.model_selection import train_test_split
x_train, x_val, y1_train, y1_val, y2_train, y2_val = train_test_split(
x, y1, y2, train_size = 0.8, random_state=0)
print(x_train.shape) #(41971, 1, 8)
def quantile_loss(q, y_true, y_pred):
e = (y_true - y_pred) # 원래값에서 예측값 뺀 것
return K.mean(K.maximum(q*e, (q-1)*e), axis=-1)
quantiles = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]
# 2. 모델구성
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv1D, Flatten, Dropout
def Model():
model = Sequential()
model.add(Conv1D(128,2,padding='same',activation='relu', input_shape = (1,8)))
model.add(Dropout(0.2))
model.add(Conv1D(64,2,padding='same', activation='relu'))
model.add(Conv1D(64,2,padding='same', activation='relu'))
model.add(Flatten())
model.add(Dense(64, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(16, activation='relu'))
model.add(Dense(1, activation='relu'))
return model
from tensorflow.keras.callbacks import EarlyStopping,ModelCheckpoint,ReduceLROnPlateau
modelpath = '../solar/check/solar0121_{epoch:02d}_{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
es = EarlyStopping(monitor = 'val_loss', patience=10, mode='min')
lr = ReduceLROnPlateau(monitor='val_loss', patience=5, factor=0.5)
bs = 16
epochs = 1
######day7######
x=[]
for q in quantiles:
model = Model()
modelpath = '../solar/check/solar_0121_day7_{epoch:02d}_{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
model.compile(loss=lambda y_true,y_pred: quantile_loss(q,y_true, y_pred),
optimizer='adam', metrics = [lambda y, y_pred: quantile_loss(q, y, y_pred)])
model.fit(x_train,y1_train, batch_size = bs, callbacks=[es, cp, lr], epochs=epochs, validation_data=(x_val, y1_val))
pred = pd.DataFrame(model.predict(x_test).round(2)) # round는 반올림 (2)는 . 뒤의 자리수 -> ex) 0.xx를 반올림
x.append(pred)
df_temp1 = pd.concat(x, axis=1)
df_temp1[df_temp1<0] = 0 # 0보다 작으면 0로 한다.
num_temp1 = df_temp1.to_numpy()
sub.loc[sub.id.str.contains('Day7'), 'q_0.1':] = num_temp1
######day8#######
x = []
for q in quantiles:
model = Model()
modelpath = '../solar/check/solar_0121_day8_{epoch:02d}_{val_loss:.4f}.hdf5'
cp = ModelCheckpoint(filepath=modelpath, monitor='val_loss', save_best_only=True, mode='auto')
model.compile(loss=lambda y_true,y_pred: quantile_loss(q,y_true, y_pred),
optimizer='adam', metrics = [lambda y, y_pred: quantile_loss(q, y, y_pred)])
model.fit(x_train,y2_train, batch_size = bs, callbacks=[es, cp, lr], epochs=epochs, validation_data=(x_val, y2_val))
pred = pd.DataFrame(model.predict(x_test).round(2)) # round는 반올림 (2)는 . 뒤의 자리수 -> ex) 0.xx를 반올림
x.append(pred)
df_temp2 = pd.concat(x, axis=1)
df_temp2[df_temp2<0] = 0
num_temp2 = df_temp2.to_numpy()
sub.loc[sub.id.str.contains('Day8'), 'q_0.1':] = num_temp2
sub.to_csv('./solar/csv/sub_0121.csv', index=False)
|
[
"sswwd95@gmail.com"
] |
sswwd95@gmail.com
|
237ed5f539d9574b418d151c89a4c1c84834526c
|
3adec884f06eabfe50d4ab3456123e04d02b02ff
|
/287. Find the Duplicate Number.py
|
df0582aa45ceb74b6bdc850e22299524e03b7121
|
[] |
no_license
|
windmzx/pyleetcode
|
c57ecb855c8e560dd32cf7cf14616be2f91ba50e
|
d0a1cb895e1604fcf70a73ea1c4b1e6b283e3400
|
refs/heads/master
| 2022-10-05T17:51:08.394112
| 2020-06-09T09:24:28
| 2020-06-09T09:24:28
| 250,222,719
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 527
|
py
|
from typing import List
class Solution:
def findDuplicate(self, nums: List[int]) -> int:
left = 1
right = len(nums)
while left < right:
mid = (left+right)//2
count = 0
for i in nums:
if i <= mid:
count += 1
if count>mid:
right=mid
else:
left=mid+1
return left
if __name__ == "__main__":
x=Solution()
print(x.findDuplicate([1,3,3,2]))
|
[
"2281927774@qq.com"
] |
2281927774@qq.com
|
4c6b37c4b6d003a5c694b4bdd7795f7854e6f430
|
6fa701cdaa0d83caa0d3cbffe39b40e54bf3d386
|
/google/cloud/managedidentities/v1beta1/managedidentities-v1beta1-py/noxfile.py
|
34dc58b5f6e2c0eefe1b194e280ee2a1542d9b95
|
[
"Apache-2.0"
] |
permissive
|
oltoco/googleapis-gen
|
bf40cfad61b4217aca07068bd4922a86e3bbd2d5
|
00ca50bdde80906d6f62314ef4f7630b8cdb6e15
|
refs/heads/master
| 2023-07-17T22:11:47.848185
| 2021-08-29T20:39:47
| 2021-08-29T20:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,595
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pathlib
import shutil
import subprocess
import sys
import nox # type: ignore
CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute()
LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt"
PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8")
nox.sessions = [
"unit",
"cover",
"mypy",
"check_lower_bounds"
# exclude update_lower_bounds from default
"docs",
]
@nox.session(python=['3.6', '3.7', '3.8', '3.9'])
def unit(session):
"""Run the unit test suite."""
session.install('coverage', 'pytest', 'pytest-cov', 'asyncmock', 'pytest-asyncio')
session.install('-e', '.')
session.run(
'py.test',
'--quiet',
'--cov=google/cloud/managedidentities_v1beta1/',
'--cov-config=.coveragerc',
'--cov-report=term',
'--cov-report=html',
os.path.join('tests', 'unit', ''.join(session.posargs))
)
@nox.session(python='3.7')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install("coverage", "pytest-cov")
session.run("coverage", "report", "--show-missing", "--fail-under=100")
session.run("coverage", "erase")
@nox.session(python=['3.6', '3.7'])
def mypy(session):
"""Run the type checker."""
session.install('mypy', 'types-pkg_resources')
session.install('.')
session.run(
'mypy',
'--explicit-package-bases',
'google',
)
@nox.session
def update_lower_bounds(session):
"""Update lower bounds in constraints.txt to match setup.py"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'update',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session
def check_lower_bounds(session):
"""Check lower bounds in setup.py are reflected in constraints file"""
session.install('google-cloud-testutils')
session.install('.')
session.run(
'lower-bound-checker',
'check',
'--package-name',
PACKAGE_NAME,
'--constraints-file',
str(LOWER_BOUND_CONSTRAINTS_FILE),
)
@nox.session(python='3.6')
def docs(session):
"""Build the docs for this library."""
session.install("-e", ".")
session.install("sphinx<3.0.0", "alabaster", "recommonmark")
shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True)
session.run(
"sphinx-build",
"-W", # warnings as errors
"-T", # show full traceback on exception
"-N", # no colors
"-b",
"html",
"-d",
os.path.join("docs", "_build", "doctrees", ""),
os.path.join("docs", ""),
os.path.join("docs", "_build", "html", ""),
)
|
[
"bazel-bot-development[bot]@users.noreply.github.com"
] |
bazel-bot-development[bot]@users.noreply.github.com
|
4a8839c76e364ce097ae40ad6f248bb84cc4d8ef
|
7bcb0b7f721c8fa31da7574f13ed0056127715b3
|
/src/apps/base/models/dimensions/dimension_client.py
|
666ebe39af5dc08ced900d20257b4276f2e8c9ce
|
[] |
no_license
|
simonchapman1986/ripe
|
09eb9452ea16730c105c452eefb6a6791c1b4a69
|
c129da2249b5f75015f528e4056e9a2957b7d884
|
refs/heads/master
| 2022-07-22T05:15:38.485619
| 2016-01-15T12:53:43
| 2016-01-15T12:53:43
| 49,718,671
| 1
| 0
| null | 2022-07-07T22:50:50
| 2016-01-15T12:53:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,358
|
py
|
from django.db import models
from django_extensions.db.fields import UUIDField
from apps.base.models.dimensions.dimension import select_or_insert
from apps.flags.checks.client import client
class DimensionClient(models.Model):
"""
DimensionClient
Dim to filter down on clients within the reported data facts
Although this is merely a dim within the system, we have a flag set to this dim.
The reason for this is because we ingest clients. If we are receiving events for a client that does not yet
exist in the clients table, something is going awry, either the ingested data, or one of our events is failing
to ingest as it should.
The 'client' flag simply checks the client table upon insertion, if the client does exist, we are ok and no
flag is required. However if it does not yet exist, there may be an issue so a DoesNotExist flag is raised.
Regardless of the flag outcome we always store the client dim, we cannot ignore the data we receive.
"""
client_id = UUIDField(version=4, unique=True)
class Meta:
app_label = 'base'
db_table = 'dim_client'
@classmethod
def insert(cls, **kwargs):
cid = kwargs.get('client_id', False)
if cid != -1:
client(client_id=cid, event_name='insert')
return select_or_insert(cls, values={}, **kwargs)
|
[
"simon-ch@moving-picture.com"
] |
simon-ch@moving-picture.com
|
7df42e2ac65b41410913aeea15f66a7ecc66569b
|
772d1ab6a1814e4b6a408ee39865c664563541a6
|
/lms_app/lms_dto/QuestionDto.py
|
8b8efd36df53eb095889030e90c1f10efc0d854d
|
[] |
no_license
|
omitogunjesufemi/lms
|
7deed8bf54799034d6af2b379a0c56801f5645cc
|
9c8bb88556a3f5598cf555623ef016a74ae3f5c7
|
refs/heads/master
| 2023-05-04T12:52:13.862572
| 2021-05-25T13:48:26
| 2021-05-25T13:48:26
| 330,643,258
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 842
|
py
|
class SetQuestionDto:
question_title: str
question_content: str
choice1: str
choice2: str
choice3: str
choice4: str
answer: str
assigned_mark: int
assessment_id: int
id: int
class UpdateQuestionDto:
question_title: str
question_content: str
choice1: str
choice2: str
choice3: str
choice4: str
answer: str
assigned_mark: int
id: int
class ListQuestionDto:
question_title: str
assigned_mark: int
assessment_id: int
question_content: str
choice1: str
choice2: str
choice3: str
choice4: str
answer: str
id: int
class GetQuestionDto:
question_title: str
question_content: str
choice1: str
choice2: str
choice3: str
choice4: str
answer: str
assigned_mark: int
assessment_id: int
id: int
|
[
"omitogunopeyemi@gmail.com"
] |
omitogunopeyemi@gmail.com
|
edfb5453073a6d9575cdaf11a8e4117f7ae0ec0d
|
5e05c6ec892d9a6bc33c0c0a9b6ce4c7135a83f4
|
/cristianoronaldoyopmailcom_299/settings.py
|
d5a0c910720d8dd82153b4b4433f70e3d17e090e
|
[] |
no_license
|
payush/cristianoronaldoyopmailcom-299
|
54eb5118840ea7ea68f077ffd7032a62a79880f3
|
52e5bb6ad599605b8cdf1088f9d7cdcf7c1a0265
|
refs/heads/master
| 2020-03-23T14:23:17.476546
| 2018-07-20T06:30:07
| 2018-07-20T06:30:07
| 141,672,766
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,157
|
py
|
"""
Django settings for cristianoronaldoyopmailcom_299 project.
Generated by 'django-admin startproject' using Django 1.11.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't!!vo0zfzvwkp-_r@$vuqjc=hanbxi^#jl1w9*^z8m(q)mlke8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'cristianoronaldoyopmailcom_299.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'cristianoronaldoyopmailcom_299.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
import environ
env = environ.Env()
ALLOWED_HOSTS = ['*']
SITE_ID = 1
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
DATABASES = {
'default': env.db()
}
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
|
[
"ayushpuroheet@gmail.com"
] |
ayushpuroheet@gmail.com
|
ab2312766b10746a33edee87aae7a0185bc0508e
|
70ce903a7b835e4e960abe405158513790d37426
|
/django-bloggy/bloggy_project/blog/models.py
|
6e45b65afb50888a69149b4da6bd875560586d7b
|
[] |
no_license
|
lpatmo/book2-exercises
|
29af718d74732a5bbe287ab60a67b0d84d4e0abd
|
9524bc58997ff4eda10177abf70805f3691e247c
|
refs/heads/master
| 2020-12-25T22:29:09.391501
| 2014-10-26T03:15:35
| 2014-10-26T03:15:35
| 25,755,186
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 667
|
py
|
from django.db import models
from uuslug import uuslug
class Post(models.Model):
created_at = models.DateTimeField(auto_now_add=True)
title = models.CharField(max_length=100)
content = models.TextField()
tag = models.CharField(max_length=20, blank=True, null=True)
image = models.ImageField(upload_to="images", blank=True, null=True)
views = models.IntegerField(default=0)
slug = models.CharField(max_length=100, unique=True)
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = uuslug(self.title, instance=self, max_length=100)
super(Post, self).save(*args, **kwargs)
|
[
"hermanmu@gmail.com"
] |
hermanmu@gmail.com
|
808afd2c166dd88286794b21c33a75891fcad75a
|
eb0bb5267035c0222da0c072c5dcd85b46099904
|
/test/bug.986.t
|
7d7e22538d6c69ad56a124722cd5c465bf5b6fda
|
[
"MIT"
] |
permissive
|
bjornreppen/task
|
6d96f578eec7b9cceeb4d728caeda87e7a446949
|
a9eac8bb715ac8f51073c080ac439bf5c09493e8
|
refs/heads/master
| 2021-05-30T07:48:39.263967
| 2015-10-21T20:50:42
| 2015-10-21T20:50:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,329
|
t
|
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
###############################################################################
#
# Copyright 2006 - 2015, Paul Beckingham, Federico Hernandez.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# http://www.opensource.org/licenses/mit-license.php
#
###############################################################################
import sys
import os
import unittest
# Ensure python finds the local simpletap module
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from basetest import Task, TestCase
from basetest import Taskd, ServerTestCase
class TestBug986(TestCase):
def setUp(self):
"""Executed before each test in the class"""
self.t = Task()
def test_dateformat_precedence(self):
"""Verify rc.dateformat.info takes precedence over rc.dateformat"""
self.t('add test')
self.t('1 start')
code, out, err = self.t('1 info rc.dateformat:XX rc.dateformat.info:__')
self.assertIn('__', out)
self.assertNotIn('XX', out)
code, out, err = self.t('1 info rc.dateformat:__ rc.dateformat.info:')
self.assertIn('__', out)
if __name__ == "__main__":
from simpletap import TAPTestRunner
unittest.main(testRunner=TAPTestRunner())
# vim: ai sts=4 et sw=4 ft=python
|
[
"paul@beckingham.net"
] |
paul@beckingham.net
|
ae2eade74f9f078d1840f1f5df750227c8959659
|
ce6e91fb9a5a9049d817d020ca0018b7f4008b9b
|
/runtests.py
|
ef35cd877b6d81a7ad6d506365c6d7dfbe0e8cb7
|
[] |
no_license
|
ccnmtl/django-pagetimer
|
b98536273b38c64f10d6832b7b74833099e68436
|
2844b3c702df2952deffdf6cd75c9e47e6f35284
|
refs/heads/master
| 2021-01-09T20:53:18.627185
| 2017-08-30T19:32:23
| 2017-08-30T19:32:23
| 58,394,973
| 0
| 0
| null | 2017-08-30T19:32:23
| 2016-05-09T17:25:37
|
Python
|
UTF-8
|
Python
| false
| false
| 2,149
|
py
|
""" run tests for pagetimer
$ virtualenv ve
$ ./ve/bin/pip install Django==1.8
$ ./ve/bin/pip install .
$ ./ve/bin/python runtests.py
"""
import django
from django.conf import settings
from django.core.management import call_command
def main():
# Dynamically configure the Django settings with the minimum necessary to
# get Django running tests
settings.configure(
MIDDLEWARE_CLASSES=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
),
INSTALLED_APPS=(
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.contenttypes',
'pagetimer',
),
TEST_RUNNER='django.test.runner.DiscoverRunner',
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
],
COVERAGE_EXCLUDES_FOLDERS=['migrations'],
ROOT_URLCONF='pagetimer.urls',
# Django replaces this, but it still wants it. *shrugs*
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'HOST': '',
'PORT': '',
'USER': '',
'PASSWORD': '',
}
},
)
django.setup()
# Fire off the tests
call_command('test')
if __name__ == '__main__':
main()
|
[
"anders@columbia.edu"
] |
anders@columbia.edu
|
1f9f53be7d85b393f7c0638c796d8ddc9f14b72f
|
77090c3eaf15342505edc228ea19769ab219e0f7
|
/CNVbenchmarkeR/output/manta3-datasetall/results17316/runWorkflow.py
|
8ecfbc9ac2eb16a453983e3a063bca3a9ffd2a6b
|
[
"MIT"
] |
permissive
|
robinwijngaard/TFM_code
|
046c983a8eee7630de50753cff1b15ca3f7b1bd5
|
d18b3e0b100cfb5bdd9c47c91b01718cc9e96232
|
refs/heads/main
| 2023-06-20T02:55:52.071899
| 2021-07-13T13:18:09
| 2021-07-13T13:18:09
| 345,280,544
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,090
|
py
|
#!/usr/bin/env python2
# Workflow run script auto-generated by command: '/home/robin/Documents/Project/manta/Install/bin/configManta.py --bam=/home/robin/Documents/Project/Samples/bam/all/17316.bam --referenceFasta=/home/robin/Documents/Project/Samples/hg38/hg38.fa --config=/home/robin/Documents/Project/TFM_code/CNVbenchmarkeR/output/manta2-datasetall/configManta.py.ini --exome --runDir=/home/robin/Documents/Project/TFM_code/CNVbenchmarkeR/output/manta2-datasetall/results17316'
#
import os, sys
if sys.version_info >= (3,0):
import platform
raise Exception("Manta does not currently support python3 (version %s detected)" % (platform.python_version()))
if sys.version_info < (2,6):
import platform
raise Exception("Manta requires python2 version 2.6+ (version %s detected)" % (platform.python_version()))
scriptDir=os.path.abspath(os.path.dirname(__file__))
sys.path.append(r'/home/robin/Documents/Project/manta/Install/lib/python')
from mantaWorkflow import MantaWorkflow
def get_run_options(workflowClassName) :
from optparse import OptionGroup, SUPPRESS_HELP
from configBuildTimeInfo import workflowVersion
from configureUtil import EpilogOptionParser
from estimateHardware import EstException, getNodeHyperthreadCoreCount, getNodeMemMb
epilog="""Note this script can be re-run to continue the workflow run in case of interruption.
Also note that dryRun option has limited utility when task definition depends on upstream task
results -- in this case the dry run will not cover the full 'live' run task set."""
parser = EpilogOptionParser(description="Version: %s" % (workflowVersion), epilog=epilog, version=workflowVersion)
parser.add_option("-m", "--mode", type="string",dest="mode",
help=SUPPRESS_HELP)
parser.add_option("-j", "--jobs", type="string",dest="jobs",
help="number of jobs, must be an integer or 'unlimited' (default: Estimate total cores on this node)")
parser.add_option("-g","--memGb", type="string",dest="memGb",
help="gigabytes of memory available to run workflow, must be an integer (default: Estimate the total memory for this node)")
parser.add_option("-d","--dryRun", dest="isDryRun",action="store_true",default=False,
help="dryRun workflow code without actually running command-tasks")
parser.add_option("--quiet", dest="isQuiet",action="store_true",default=False,
help="Don't write any log output to stderr (but still write to workspace/pyflow.data/logs/pyflow_log.txt)")
def isLocalSmtp() :
import smtplib
try :
smtplib.SMTP('localhost')
except :
return False
return True
isEmail = isLocalSmtp()
emailHelp = SUPPRESS_HELP
if isEmail :
emailHelp="send email notification of job completion status to this address (may be provided multiple times for more than one email address)"
parser.add_option("-e","--mailTo", type="string",dest="mailTo",action="append",help=emailHelp)
debug_group = OptionGroup(parser,"development debug options")
debug_group.add_option("--rescore", dest="isRescore",action="store_true",default=False,
help="Reset task list to re-run hypothesis generation and scoring without resetting graph generation.")
parser.add_option_group(debug_group)
ext_group = OptionGroup(parser,"extended portability options (should not be needed by most users)")
ext_group.add_option("--maxTaskRuntime", type="string", metavar="hh:mm:ss",
help="Specify max runtime per task (no default)")
parser.add_option_group(ext_group)
(options,args) = parser.parse_args()
if not isEmail : options.mailTo = None
if len(args) :
parser.print_help()
sys.exit(2)
if options.mode is None :
options.mode = "local"
elif options.mode not in ["local"] :
parser.error("Invalid mode. Available modes are: local")
if options.jobs is None :
try :
options.jobs = getNodeHyperthreadCoreCount()
except EstException:
parser.error("Failed to estimate cores on this node. Please provide job count argument (-j).")
if options.jobs != "unlimited" :
options.jobs=int(options.jobs)
if options.jobs <= 0 :
parser.error("Jobs must be 'unlimited' or an integer greater than 1")
# note that the user sees gigs, but we set megs
if options.memGb is None :
try :
options.memMb = getNodeMemMb()
except EstException:
parser.error("Failed to estimate available memory on this node. Please provide available gigabyte argument (-g).")
elif options.memGb != "unlimited" :
options.memGb=int(options.memGb)
if options.memGb <= 0 :
parser.error("memGb must be 'unlimited' or an integer greater than 1")
options.memMb = 1024*options.memGb
else :
options.memMb = options.memGb
options.resetTasks=[]
if options.isRescore :
options.resetTasks.append("makeHyGenDir")
return options
def main(pickleConfigFile, primaryConfigSection, workflowClassName) :
from configureUtil import getConfigWithPrimaryOptions
runOptions=get_run_options(workflowClassName)
flowOptions,configSections=getConfigWithPrimaryOptions(pickleConfigFile,primaryConfigSection)
# new logs and marker files to assist automated workflow monitoring:
warningpath=os.path.join(flowOptions.runDir,"workflow.warning.log.txt")
errorpath=os.path.join(flowOptions.runDir,"workflow.error.log.txt")
exitpath=os.path.join(flowOptions.runDir,"workflow.exitcode.txt")
# the exit path should only exist once the workflow completes:
if os.path.exists(exitpath) :
if not os.path.isfile(exitpath) :
raise Exception("Unexpected filesystem item: '%s'" % (exitpath))
os.unlink(exitpath)
wflow = workflowClassName(flowOptions)
retval=1
try:
retval=wflow.run(mode=runOptions.mode,
nCores=runOptions.jobs,
memMb=runOptions.memMb,
dataDirRoot=flowOptions.workDir,
mailTo=runOptions.mailTo,
isContinue="Auto",
isForceContinue=True,
isDryRun=runOptions.isDryRun,
isQuiet=runOptions.isQuiet,
resetTasks=runOptions.resetTasks,
successMsg=wflow.getSuccessMessage(),
retryWindow=0,
retryMode='all',
warningLogFile=warningpath,
errorLogFile=errorpath)
finally:
exitfp=open(exitpath,"w")
exitfp.write("%i\n" % (retval))
exitfp.close()
sys.exit(retval)
main(r"/home/robin/Documents/Project/TFM_code/CNVbenchmarkeR/output/manta2-datasetall/results17316/runWorkflow.py.config.pickle","manta",MantaWorkflow)
|
[
"robinwijngaard@gmail.com"
] |
robinwijngaard@gmail.com
|
743cc0818768c373bc08f9acf81e567aacb3a69b
|
d528d21d32a2a7f299e8365d0a935b8718f9c07f
|
/cogs/utils/checks.py
|
7f0962fe5b0e94c665e2849f9eb198a293c99c7d
|
[] |
no_license
|
sizumita/Aegis
|
53b3f3db4d88b8ffdbc0d44781f55251081a32fc
|
2c9684695a32481583fd214fa63deaddea3d5ebc
|
refs/heads/master
| 2020-09-11T00:05:48.629459
| 2020-06-23T14:04:41
| 2020-06-23T14:04:41
| 221,874,644
| 6
| 4
| null | 2019-12-10T10:58:34
| 2019-11-15T08:04:23
|
Python
|
UTF-8
|
Python
| false
| false
| 2,758
|
py
|
from .database import CommandPermission
from discord.ext.commands import check
import discord
async def check_command_permission(context):
"""
権限周りについて:
DMの場合確実に有効
CommandPermissionがなければそもそも有効化されていない
作成されていて、かつroles、users、permissionsが空であれば誰でも使える
:param context: commands.Context
:return: bool
"""
# DMの場合
if not context.guild:
return True
# manage系、ヘルプコマンドだった場合
if context.command.name == 'help':
return True
elif context.cog:
if context.cog.qualified_name == 'Manage':
return True
p: CommandPermission = await CommandPermission.query.where(CommandPermission.id == context.guild.id) \
.where(CommandPermission.name == context.bot.get_command_full_name(context.command)).gino.first()
# ない場合
if not p:
if getattr(context.cog, 'already_on', False):
p = await CommandPermission.create(id=context.guild.id,
name=context.bot.get_command_full_name(context.command))
else:
return False
if context.author.guild_permissions.administrator:
return True
# 制限なしの場合
if not p.roles and not p.users:
return True
checks = []
if p.roles:
is_id_in = any(True for i in context.author.roles if str(i.id) in p.roles)
checks.append(is_id_in)
if p.users:
checks.append(True if str(context.author.id) in p.users else False)
return any(checks)
def admin_only():
def predicate(ctx):
permissions: discord.Permissions = ctx.author.guild_permissions
if not permissions.administrator:
return False
return True
return check(predicate)
def safety():
"""CommandPermissionがあってかつ何も設定されていないときにadminしか実行できないようにする"""
async def predicate(ctx):
p: CommandPermission = await CommandPermission.query.where(CommandPermission.id == ctx.guild.id) \
.where(CommandPermission.name == ctx.bot.get_command_full_name(ctx.command)).gino.first()
if not p:
return False
if not p.users and not p.roles:
permissions: discord.Permissions = ctx.author.guild_permissions
if not permissions.administrator:
return False
return True
return check(predicate)
def prefix_in(prefixes):
async def predicate(ctx):
if ctx.prefix not in prefixes:
return False
return True
return check(predicate)
|
[
"sumito@izumita.com"
] |
sumito@izumita.com
|
94ec5975940892096bc5b805de5af3e9c66312a3
|
6b8960551ee4be37c46f6c5f28257845fcb871ed
|
/task1.py
|
2105ae960977b9acf3bde10337df6d46c5ad633f
|
[] |
no_license
|
htrueman/db2_limited_test
|
10e9e574fe52b2346c33f4485f8b1dec00c30ac8
|
489379a952ad5c1ecb5123e9e3d41ec28206dc01
|
refs/heads/master
| 2022-12-09T06:32:27.709446
| 2017-06-12T01:40:08
| 2017-06-12T01:40:08
| 93,772,542
| 0
| 0
| null | 2022-11-22T01:46:27
| 2017-06-08T16:56:17
|
Python
|
UTF-8
|
Python
| false
| false
| 649
|
py
|
test_num1 = 1
test_num2 = 10
test_num3 = 2
def handle_numbers(number1, number2, number3):
count_div_numbers = 0
div_numbers_list = []
for number in range(number1, number2 + 1):
if number % number3 == 0:
count_div_numbers += 1
div_numbers_list.append(str(number))
if div_numbers_list:
return "Result:\n{}, because {} are divisible by {}".\
format(count_div_numbers, ', '.join(div_numbers_list), number3)
else:
return "Result:\nThere are no divisible numbers by {} in given range".\
format(number3)
print (handle_numbers(test_num1, test_num2, test_num3))
|
[
"vege1wgw@gmail.com"
] |
vege1wgw@gmail.com
|
fe01b307a0814fd473a553ad5bfd3a7ad7f22547
|
245a3f8cea6f232bf3142706c11188b51eb21774
|
/python/hetu/onnx/onnx_opset/Where.py
|
6da3b659d9f9858f398695ae791903a6f8c2c8b5
|
[
"Apache-2.0"
] |
permissive
|
initzhang/Hetu
|
5bfcb07e62962fbc83def14148f8367fab02625a
|
447111a358e4dc6df5db9c216bdb3590fff05f84
|
refs/heads/main
| 2023-06-20T18:37:21.760083
| 2021-07-27T04:37:48
| 2021-07-27T04:37:48
| 389,848,768
| 0
| 0
|
Apache-2.0
| 2021-07-27T04:32:57
| 2021-07-27T04:32:57
| null |
UTF-8
|
Python
| false
| false
| 610
|
py
|
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from onnx import onnx_pb
from hetu.onnx import constants, util, graph
from hetu.onnx.handler import hetu_op
from hetu.onnx.onnx_opset import general
@hetu_op(["WhereOp"], onnx_op=["Where"])
class Where():
@classmethod
def version_1(cls, ctx, node, **kwargs):
assert False, "This version of the operator has been available since version 9 of the default ONNX operator set"
pass
@classmethod
def version_9(cls, ctx, node, **kwargs):
pass
|
[
"swordonline@foxmail.com"
] |
swordonline@foxmail.com
|
2ca77983524514c47a936a1f296297e5ba1c4456
|
7b1b4ed8bd4c887362b367625a833c28aa919dd8
|
/wpaudit/providers/aliyun/resources/ram/policies.py
|
09ac9427cfcba323da87129ef7e60ece906a9935
|
[] |
no_license
|
wperic/wpaudit
|
6bbd557c803ce9bceb764c1451daeb5e440a3d9c
|
ed69c1eabcf85e80ed8fe5397d2d369fd3ff35d8
|
refs/heads/main
| 2023-07-16T21:36:57.528548
| 2021-09-03T10:35:43
| 2021-09-03T10:35:43
| 402,716,870
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,794
|
py
|
from wpaudit.providers.aliyun.resources.base import AliyunResources
from wpaudit.providers.aliyun.facade.base import AliyunFacade
import json
class Policies(AliyunResources):
def __init__(self, facade: AliyunFacade):
super().__init__(facade)
async def fetch_all(self):
for raw_policy in await self.facade.ram.get_policies():
id, policy = await self._parse_policy(raw_policy)
if id:
self[id] = policy
async def _parse_policy(self, raw_policy):
"""
Only processing policies with an
:param raw_policy:
:return:
"""
if raw_policy.get('AttachmentCount') > 0:
policy_dict = {}
policy_dict['id'] = policy_dict['name'] = raw_policy.get('PolicyName')
policy_dict['description'] = raw_policy.get('Description')
policy_dict['create_date'] = raw_policy.get('CreateDate')
policy_dict['update_date'] = raw_policy.get('UpdateDate')
policy_dict['attachment_count'] = raw_policy.get('AttachmentCount')
policy_dict['type'] = raw_policy.get('PolicyType')
policy_dict['default_version'] = raw_policy.get('DefaultVersion')
policy_version = await self.facade.ram.get_policy_version(policy_dict['name'],
policy_dict['type'],
policy_dict['default_version'])
policy_version['PolicyDocument'] = json.loads(policy_version['PolicyDocument'])
# policy_dict['policy_document'] = policy_version['PolicyDocument']
policy_dict['policy_document'] = policy_version
policy_entities = await self.facade.ram.get_policy_entities(policy_dict['name'],
policy_dict['type'])
policy_dict['entities'] = {}
if policy_entities['Users']['User']:
policy_dict['entities']['users'] = []
for user in policy_entities['Users']['User']:
policy_dict['entities']['users'].append(user['UserName'])
if policy_entities['Groups']['Group']:
policy_dict['entities']['groups'] = []
for group in policy_entities['Groups']['Group']:
policy_dict['entities']['groups'].append(group['GroupName'])
if policy_entities['Roles']['Role']:
policy_dict['entities']['roles'] = []
for role in policy_entities['Roles']['Role']:
policy_dict['entities']['roles'].append(role['RoleName'])
return policy_dict['id'], policy_dict
else:
return None, None
|
[
"90035639+wperic@users.noreply.github.com"
] |
90035639+wperic@users.noreply.github.com
|
04d46f70d2543594d36fc9d340ad9c2da9f9cd7b
|
7eb8bf846dc7021751019debf91925139203bed2
|
/Django_Clases/tercer_proyecto/populate_modelos_aplicacion.py
|
348b1e00929e50d9b01698e636df06708a4c9001
|
[] |
no_license
|
rpparada/python-and-django-full-stack-web-developer-bootcamp
|
5c384dc1c19557097c893cf6149c1831984b1946
|
7b91f16cfb49d7de71901857b4e4c8f447db5e6f
|
refs/heads/master
| 2021-09-08T22:40:44.737431
| 2018-03-12T15:12:06
| 2018-03-12T15:12:06
| 116,153,519
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE','tercer_proyecto.settings')
import django
django.setup()
import random
from modelos_aplicacion.models import Usuarios
from faker import Faker
generaFake = Faker()
def popular(N=10):
for entrada in range(N):
nombre_falso = generaFake.first_name()
apellido_falso = generaFake.last_name()
email_falso = generaFake.email()
# email_falso = generaFake.email(*args, **kwargs)
usuario = Usuarios.objects.get_or_create(nombre=nombre_falso,apellido=apellido_falso,email=email_falso)[0]
if __name__ == '__main__':
print('Cargando tabla(s)... ')
popular(20)
print('Rabla(s) cargada(s)!')
|
[
"rpparada@gmail.com"
] |
rpparada@gmail.com
|
6bbe246fd9bd6d0eb23ccd5e2f43f5280487874c
|
d29c2dea4afbb21de0b1e508e501ee6711805451
|
/__main__.py
|
e084aa8b11fab88e422d61a1e430451cb2602f83
|
[
"MIT"
] |
permissive
|
cdeitrick/workflows
|
ef69003cbd6030bc828815b7c898128327da129a
|
8edd2a08078144a2445af3903eb13b71abb96538
|
refs/heads/master
| 2020-03-18T07:04:20.554986
| 2019-12-18T21:16:39
| 2019-12-18T21:16:39
| 134,430,686
| 0
| 0
|
MIT
| 2019-07-11T03:29:48
| 2018-05-22T14:50:28
|
Python
|
UTF-8
|
Python
| false
| false
| 333
|
py
|
from pipelines import main
import argparse
def create_parser()->argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument(
"which",
help = "assembly or variants.",
type = str,
choices = ['assembly', 'variants']
)
args = parser.parse_args()
return args
if __name__ == "__main__":
main.main_shelly()
|
[
"cld100@pitt.edu"
] |
cld100@pitt.edu
|
6479a595ec5e5e6a86e7178104d6df7763bfa983
|
5f58a50d7c44d0cf612b9076df40da89302b5ba6
|
/geeadd/batch_copy.py
|
ff05da1fea9836b073de8a843dc1faa2c53b45c2
|
[
"Apache-2.0"
] |
permissive
|
jkrizan/gee_asset_manager_addon
|
386a2a5b96e31bdb5e40a08ad12545e11a376764
|
884793185ef5641f0b53349feb5f4c3be272fd28
|
refs/heads/master
| 2020-05-19T12:58:15.830923
| 2019-01-01T16:46:16
| 2019-01-01T16:46:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 716
|
py
|
from __future__ import print_function
import ee
import os
ee.Initialize()
def copy(collection_path,final_path):
assets_list = ee.data.getList(params={'id': collection_path})
assets_names = [os.path.basename(asset['id']) for asset in assets_list]
print('Copying a total of '+str(len(assets_names))+'.....')
for count,items in enumerate(assets_names):
print ('Copying '+str(count+1)+' of '+str(len(assets_names)), end='\r')
init=collection_path+'/'+items
final=final_path+'/'+items
try:
ee.data.copyAsset(init,final)
except Exception as e:
pass
#batchcopy(collection_path='users/samapriya/Belem/BelemRE',final_path='users/samapriya/bl')
|
[
"samapriya.roy@gmail.com"
] |
samapriya.roy@gmail.com
|
99d16f620ac24b74834e13c63e09b6196c038fb0
|
7f4fb112bc9ab2b90f5f2248f43285ce9ac2e0a0
|
/src/igem/neutronics/air/bare/borosilicate-glass-backfill/0wt/plot_all.in.one_cask.thickness_dose.rate_t4045_plug.py
|
c763b91a79e02074300d90606bbccfa7b9fb3d2b
|
[] |
no_license
|
TheDoctorRAB/plot
|
dd3b5134c91c8fa7032fcc077c5427b26a80e49d
|
ed6746d511222c03e79f93548fe3ecd4286bf7b1
|
refs/heads/master
| 2021-07-11T10:21:19.347531
| 2020-07-16T17:13:15
| 2020-07-16T17:13:15
| 20,462,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,119
|
py
|
########################################################################
# R.A.Borrelli
# @TheDoctorRAB
# rev.11.March.2015
########################################################################
#
# Plot routine
# All in one file, with no separate control input, lib files
# Plot data is contained in a separate data file, read on command line
# Set up for a secondary y axis if needed
#
########################################################################
#
#
#
#######
#
# imports
#
# plot
#
import numpy
import matplotlib
import matplotlib.pyplot as plot
from matplotlib.ticker import MultipleLocator
#
#######
#
# command line
#
from sys import argv
script,plot_datafile=argv #column 0 is the x values then odd columns contain dose/flux
#
#######
#
# screen resolution
#
import Tkinter
root=Tkinter.Tk()
#
########################################################################
#
#
#
#######
#
# screen resolution
#
###
#
# pixels
#
width=root.winfo_screenwidth()
height=root.winfo_screenheight()
#
###
#
# mm
#
width_mm=root.winfo_screenmmwidth()
height_mm=root.winfo_screenmmheight()
#
###
#
# in
#
width_in=width_mm/25.4
height_in=height_mm/25.4
#
###
#
# dpi
#
width_dpi=width/width_in
height_dpi=height/height_in
#
dpi_values=(96,120,144,168,192)
current_dpi=width_dpi
minimum=1000
#
for dval in dpi_values:
difference=abs(dval-width_dpi)
if difference<minimum:
minimum=difference
current_dpi=dval
#
#######
#
# output to screen
#
print('width: %i px, height: %i px'%(width,height))
print('width: %i mm, height: %i mm'%(width_mm,height_mm))
print('width: %0.f in, height: %0.f in'%(width_in,height_in))
print('width: %0.f dpi, height: %0.f dpi'%(width_dpi,height_dpi))
print('size is %0.f %0.f'%(width,height))
print('current DPI is %0.f' % (current_dpi))
#
#######
#
# open the plot data file(s)
# add plot_dataN for each plot_datafileN
#
plot_data=numpy.loadtxt(plot_datafile,dtype=float)
#
#######
#
# graph parameters
#
###
#
# font sizes
#
matplotlib.rcParams.update({'font.size': 48}) #axis numbers
#
title_fontsize=54 #plot title
axis_fontsize=48 #axis labels
annotate_fontsize=48 #annotation
#
###
#
# set up for two y axis
#
fig,left_axis=plot.subplots()
# right_axis=left_axis.twinx()
#
###
#
# plot text
#
title='Dose rate - Bottom plate'
xtitle='Wall thickness [cm]'
ytitle='Dose rate [$\mu$Sv/h]'
#
###
#
# legend
# add linecolorN for each plot_dataN
# add curve_textN for each plot_dataN
#
line_color0='blue' #color
line_color1='orange' #color
line_color2='red' #color
line_color3='green' #color
line_color4='cyan' #color
#
curve_text0='10 wt% $B_4C$' #legend text
curve_text1='30 wt% $B_4C$' #legend text
curve_text2='50 wt% $B_4C$' #legend text
curve_text3='70 wt% $B_4C$' #legend text
curve_text4='90 wt% $B_4C$' #legend text
#
legend_location='lower left' #location of legend on grid
legend_font=42
#
###
#
# annotate
# position of the annotation dependent on axis domain and range
#
annotate_title='T-4045'
annotate_x=23
annotate_y=10000
#
annotate_title2='Air-Glass backfill'
annotate_x2=23
annotate_y2=7000
#
annotate_title3='0 wt% $^{10}B$'
annotate_x3=23
annotate_y3=3000
#
###
#
# axis domain and range
#
xmin=1
xmax=31
#
ymin=1
ymax=15000
#
###
#
# axis ticks
#
xmajortick=5
ymajortick=5000
#
xminortick=1
yminortick=1000
#
###
#
# grid linewidth
#
major_grid_linewidth=2.5
minor_grid_linewidth=2.1
#
major_grid_tick_length=7
minor_grid_tick_length=5
#
###
#
# curve linewidth
#
curve_linewidth=4.0
#
#######
#
# set plot diagnostics
#
###
#
# titles
#
plot.title(title,fontsize=title_fontsize)
left_axis.set_xlabel(xtitle,fontsize=axis_fontsize)
left_axis.set_ylabel(ytitle,fontsize=axis_fontsize)
# right_axis.set_ylabel()
#
###
#
# grid
#
left_axis.grid(which='major',axis='both',linewidth=major_grid_linewidth)
left_axis.grid(which='minor',axis='both',linewidth=minor_grid_linewidth)
#
left_axis.tick_params(axis='both',which='major',direction='inout',length=major_grid_tick_length)
left_axis.tick_params(axis='both',which='minor',direction='inout',length=minor_grid_tick_length)
#
###
#
# axis domain and range
#
plot.xlim(xmin,xmax)
left_axis.axis(ymin=ymin,ymax=ymax)
###
#
# axis ticks
#
left_axis.xaxis.set_major_locator(MultipleLocator(xmajortick))
left_axis.xaxis.set_minor_locator(MultipleLocator(xminortick))
left_axis.yaxis.set_major_locator(MultipleLocator(ymajortick))
left_axis.yaxis.set_minor_locator(MultipleLocator(yminortick))
#
###
#
# log scale option
# xmin,ymin !=0 for log scale
#
#left_axis.set_xscale('log')
left_axis.set_yscale('log')
#
###
#
# annotation
# comment out if not needed
#
left_axis.annotate(annotate_title,xy=(annotate_x,annotate_y),xytext=(annotate_x,annotate_y),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title2,xy=(annotate_x2,annotate_y2),xytext=(annotate_x2,annotate_y2),fontsize=annotate_fontsize)
left_axis.annotate(annotate_title3,xy=(annotate_x3,annotate_y3),xytext=(annotate_x3,annotate_y3),fontsize=annotate_fontsize)
#
#######
#
# plot data
#
left_axis.plot(plot_data[:,0],plot_data[:,1],marker='o',color=line_color0,label=curve_text0,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,3],marker='o',color=line_color1,label=curve_text1,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,5],marker='o',color=line_color2,label=curve_text2,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,7],marker='o',color=line_color3,label=curve_text3,linewidth=curve_linewidth,markersize=20)
left_axis.plot(plot_data[:,0],plot_data[:,9],marker='o',color=line_color4,label=curve_text4,linewidth=curve_linewidth,markersize=20)
left_axis.legend(loc=legend_location,fontsize=legend_font) #legend needs to be after all the plot data
plot.get_current_fig_manager().resize(width,height)
plot.gcf().set_size_inches((0.01*width),(0.01*height))
#
#######
#
# save
#
plot.savefig(title,dpi=current_dpi)
#
#######
#
# plot to screen
#
# plot.show()
#
########################################################################
#
# EOF
#
########################################################################
|
[
"borrelli@localhost.localdomain"
] |
borrelli@localhost.localdomain
|
83e185e53ee41e521bdd311be71ebf8b7318349e
|
05b8143f004c6531a1d24a66888e2b02a41616cf
|
/mainApp/apis/cinemas_api.py
|
905d41498de23e6efa289decd85035190b6c01d9
|
[] |
no_license
|
cangmingssir/flask_tpp
|
1b0d8f40fd3298789beffca877874dd45d734987
|
e6903a47aa2658a105f79c37a30ef5f44a4d1fab
|
refs/heads/master
| 2020-03-19T12:04:37.056215
| 2018-06-17T08:07:48
| 2018-06-17T08:07:48
| 136,493,916
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,864
|
py
|
# coding:utf-8
from flask import request, session
from flask_restful import Resource, reqparse, fields, marshal_with
from mainApp import dao
from mainApp.models import Cinemas, User, Qx
from mainApp.settings import QX
def check_login(qx):
def check(fn):
def wrapper(*args,**kwargs):
token = request.args.get('token')
if not token:
token = request.form.get('token')
user_id = session.get(token)
loginUser = dao.getById(User,user_id)
if not loginUser:
return {'msg':'请先登录!'}
if loginUser.rights & qx == qx:
return fn(*args,**kwargs)
qxObj = dao.queryOne(Qx).filter(Qx.right==qx).first()
return {'msg':'您没有 {} 权限'.format(qxObj.name)}
return wrapper
return check
class CinemasApi(Resource):
#定义输入字段
parser = reqparse.RequestParser()
parser.add_argument('token')
parser.add_argument('opt',required=True)
parser.add_argument('name',help='电影院名称')
parser.add_argument('city',help='影院城市不能为空')
parser.add_argument('district',help='城市区域不能为空')
parser.add_argument('sort',type=int,default=1)
parser.add_argument('orderby',default='hallnum')
parser.add_argument('limit',type=int,default=10)
parser.add_argument('page',type=int,default=1)
#定义输出字段
cinemas_fields = {
'id':fields.Integer,
'name':fields.String,
'city':fields.String,
'district':fields.String,
'address':fields.String,
'phone':fields.String,
'score':fields.Float,
'hallnum':fields.Integer,
'servicecharge':fields.Float,
'astrict':fields.Integer,
'flag':fields.Boolean,
'isdelete':fields.Boolean
}
out_fields={
'returnValue':fields.Nested(cinemas_fields)
}
def selectCinemas(self,cinemas):
args=self.parser.parse_args()
sort = args.get('sort')
cinemas = cinemas.order_by(('-' if sort ==1 else '')+args.get('orderby'))
pager = cinemas.paginate(args.get('page'),args.get('limit'))
return {'returnValue':pager.items}
@marshal_with(out_fields)
def get(self):
#验证请求参数
args=self.parser.parse_args()
opt =args.get('opt')
city = args.get('city')
district = args.get('district')
#用于查询某城市区域的影城信息
if opt == 'cityAndDistrict':
if city and district:
cinemas=dao.queryOne(Cinemas).filter(Cinemas.city==city,
Cinemas.district==district)
if not cinemas.count():
return {'msg':'该地区没有电影院'}
self.selectCinemas(cinemas)
return {'msg':'城市和城区区域不能为空'}
#用于查询某一城市的影城信息
elif opt == 'city':
if city:
cinemas=dao.queryOne(Cinemas).filter(Cinemas.city==city)
if not cinemas.count():
return {'msg':'该城市没有电影院'}
self.selectCinemas(cinemas)
return {'msg':'搜索城市不能为空'}
#查询所有的影城信息
else:
cinemas=dao.queryAll(Cinemas)
self.selectCinemas(cinemas)
@check_login(QX.DELETE_QX)
def delete(self):
cid = request.args.get('cid')
cinemas = dao.getById(Cinemas,cid)
if not cinemas:
return {'msg':'您删除的影院不存在'}
if not dao.delete(cinemas):
return {'msg':'删除失败'}
return {'msg':'删除成功'}
def post(self):
pass
|
[
"mu_tongwu@163.com"
] |
mu_tongwu@163.com
|
140384afde407034a54ba2db872c23687b2803b5
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/exeY2wDuEW4rFeYvL_18.py
|
df232bc3446da8ba44e538db19a12468c1434bda
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 854
|
py
|
"""
Create an ordered 2D list (matrix). A matrix is ordered if its (0, 0) element
is 1, its (0, 1) element is 2, and so on. Your function needs to create an a ×
b matrix. `a` is the first argument and `b` is the second.
### Examples
ordered_matrix(5, 5) ➞ [
[1, 2, 3, 4, 5],
[6, 7, 8, 9, 10],
[11, 12, 13, 14, 15],
[16, 17, 18, 19, 20],
[21, 22, 23, 24, 25]
]
ordered_matrix(1, 1) ➞ [[1]]
ordered_matrix(1, 5) ➞ [[1, 2, 3, 4, 5]]
### Notes
* `a` is the height of the matrix (y coordinate), and `b` is the width (x coordinate).
* `a` and `b` will always be positive, and the matrix will always be square shaped (in each row are the same amount of columns).
* `a` and `b` are integers.
"""
def ordered_matrix(a, b):
return [[b*i+j for j in range(1, b+1)] for i in range(a)]
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.